From ee77dcf650c1f56a0a0a65c693da1ddddcea1492 Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Tue, 16 Feb 2021 17:48:48 +0100 Subject: [PATCH 01/10] FEA allow any resampler in the BalancedBaggingClassifier --- imblearn/ensemble/_bagging.py | 29 ++-- imblearn/ensemble/tests/test_bagging.py | 199 ++++++++++++++++-------- 2 files changed, 155 insertions(+), 73 deletions(-) diff --git a/imblearn/ensemble/_bagging.py b/imblearn/ensemble/_bagging.py index 073147038..2f93a2864 100644 --- a/imblearn/ensemble/_bagging.py +++ b/imblearn/ensemble/_bagging.py @@ -31,7 +31,7 @@ class BalancedBaggingClassifier(BaggingClassifier): This implementation of Bagging is similar to the scikit-learn implementation. It includes an additional step to balance the training set - at fit time using a ``RandomUnderSampler``. + at fit time using a given sampler. Read more in the :ref:`User Guide `. @@ -74,7 +74,9 @@ class BalancedBaggingClassifier(BaggingClassifier): {sampling_strategy} replacement : bool, default=False - Whether or not to sample randomly with replacement or not. + Whether or not to randomly sample with replacement or not when + `sampler is None`, corresponding to a + :class:`~imblearn.under_sampling.RandomUnderSampler`. {n_jobs} @@ -196,6 +198,7 @@ def __init__( n_jobs=None, random_state=None, verbose=0, + sampler=None, ): super().__init__( @@ -213,6 +216,7 @@ def __init__( ) self.sampling_strategy = sampling_strategy self.replacement = replacement + self.sampler = sampler def _validate_y(self, y): y_encoded = super()._validate_y(y) @@ -222,7 +226,7 @@ def _validate_y(self, y): for key, value in check_sampling_strategy( self.sampling_strategy, y, - "under-sampling", + self.sampler_._sampling_type, ).items() } else: @@ -247,15 +251,11 @@ def _validate_estimator(self, default=DecisionTreeClassifier()): else: base_estimator = clone(default) + self.sampler_.set_params(sampling_strategy=self._sampling_strategy) + self.base_estimator_ = Pipeline( [ - ( - "sampler", - RandomUnderSampler( - sampling_strategy=self._sampling_strategy, - replacement=self.replacement, - ), - ), + ("sampler", self.sampler_), ("classifier", base_estimator), ] ) @@ -277,6 +277,15 @@ def fit(self, X, y): Returns self. """ check_target_type(y) + # the sampler needs to be validated before to call _fit because + # _validate_y is called before _validate_estimator and would require + # to know which type of sampler we are using. + if self.sampler is None: + self.sampler_ = RandomUnderSampler( + replacement=self.replacement, + ) + else: + self.sampler_ = clone(self.sampler) # RandomUnderSampler is not supporting sample_weight. We need to pass # None. return self._fit(X, y, self.max_samples, sample_weight=None) diff --git a/imblearn/ensemble/tests/test_bagging.py b/imblearn/ensemble/tests/test_bagging.py index b889c9dec..28b87c4bc 100644 --- a/imblearn/ensemble/tests/test_bagging.py +++ b/imblearn/ensemble/tests/test_bagging.py @@ -3,6 +3,8 @@ # Christos Aridas # License: MIT +from collections import Counter + import numpy as np import pytest @@ -24,45 +26,57 @@ from imblearn.datasets import make_imbalance from imblearn.ensemble import BalancedBaggingClassifier +from imblearn.over_sampling import RandomOverSampler, SMOTE from imblearn.pipeline import make_pipeline -from imblearn.under_sampling import RandomUnderSampler +from imblearn.under_sampling import ClusterCentroids, RandomUnderSampler iris = load_iris() -def test_balanced_bagging_classifier(): - # Check classification for various parameter settings. - X, y = make_imbalance( - iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, - ) - X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) - grid = ParameterGrid( +@pytest.mark.parametrize( + "base_estimator", + [ + None, + DummyClassifier(strategy="prior"), + Perceptron(max_iter=1000, tol=1e-3), + DecisionTreeClassifier(), + KNeighborsClassifier(), + SVC(gamma="scale"), + ], +) +@pytest.mark.parametrize( + "params", + ParameterGrid( { "max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False], } + ), +) +def test_balanced_bagging_classifier(base_estimator, params): + # Check classification for various parameter settings. + X, y = make_imbalance( + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) - for base_estimator in [ - None, - DummyClassifier(strategy="prior"), - Perceptron(max_iter=1000, tol=1e-3), - DecisionTreeClassifier(), - KNeighborsClassifier(), - SVC(gamma="scale"), - ]: - for params in grid: - BalancedBaggingClassifier( - base_estimator=base_estimator, random_state=0, **params - ).fit(X_train, y_train).predict(X_test) + BalancedBaggingClassifier( + base_estimator=base_estimator, random_state=0, **params + ).fit(X_train, y_train).predict(X_test) def test_bootstrap_samples(): # Test that bootstrapping samples generate non-perfect base estimators. X, y = make_imbalance( - iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) @@ -95,7 +109,10 @@ def test_bootstrap_samples(): def test_bootstrap_features(): # Test that bootstrapping features may generate duplicate features. X, y = make_imbalance( - iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) @@ -125,7 +142,10 @@ def test_bootstrap_features(): def test_probability(): # Predict probabilities. X, y = make_imbalance( - iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) @@ -136,11 +156,13 @@ def test_probability(): ).fit(X_train, y_train) assert_array_almost_equal( - np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test)), + np.sum(ensemble.predict_proba(X_test), axis=1), + np.ones(len(X_test)), ) assert_array_almost_equal( - ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test)), + ensemble.predict_proba(X_test), + np.exp(ensemble.predict_log_proba(X_test)), ) # Degenerate case, where some classes are missing @@ -152,11 +174,13 @@ def test_probability(): ensemble.fit(X_train, y_train) assert_array_almost_equal( - np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test)), + np.sum(ensemble.predict_proba(X_test), axis=1), + np.ones(len(X_test)), ) assert_array_almost_equal( - ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test)), + ensemble.predict_proba(X_test), + np.exp(ensemble.predict_log_proba(X_test)), ) @@ -164,7 +188,10 @@ def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. X, y = make_imbalance( - iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) @@ -195,7 +222,10 @@ def test_oob_score_classification(): def test_single_estimator(): # Check singleton ensembles. X, y = make_imbalance( - iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) @@ -215,42 +245,32 @@ def test_single_estimator(): assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) -def test_error(): +@pytest.mark.parametrize( + "params", + [ + {"n_estimators": 1.5}, + {"n_estimators": -1}, + {"max_samples": -1}, + {"max_samples": 0.0}, + {"max_samples": 2.0}, + {"max_samples": 1000}, + {"max_samples": "foobar"}, + {"max_features": -1}, + {"max_features": 0.0}, + {"max_features": 2.0}, + {"max_features": 5}, + {"max_features": "foobar"}, + ], +) +def test_balanced_bagging_classifier_error(params): # Test that it gives proper exception on deficient input. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50} ) base = DecisionTreeClassifier() - - # Test n_estimators - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, n_estimators=1.5).fit(X, y) + clf = BalancedBaggingClassifier(base_estimator=base, **params) with pytest.raises(ValueError): - BalancedBaggingClassifier(base, n_estimators=-1).fit(X, y) - - # Test max_samples - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_samples=-1).fit(X, y) - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_samples=0.0).fit(X, y) - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_samples=2.0).fit(X, y) - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_samples=1000).fit(X, y) - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_samples="foobar").fit(X, y) - - # Test max_features - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_features=-1).fit(X, y) - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_features=0.0).fit(X, y) - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_features=2.0).fit(X, y) - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_features=5).fit(X, y) - with pytest.raises(ValueError): - BalancedBaggingClassifier(base, max_features="foobar").fit(X, y) + clf.fit(X, y) # Test support of decision_function assert not (hasattr(BalancedBaggingClassifier(base).fit(X, y), "decision_function")) @@ -276,7 +296,10 @@ def test_gridsearch(): def test_base_estimator(): # Check base_estimator and its default values. X, y = make_imbalance( - iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) @@ -301,10 +324,14 @@ def test_base_estimator(): def test_bagging_with_pipeline(): X, y = make_imbalance( - iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, ) estimator = BalancedBaggingClassifier( - make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2, + make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), + max_features=2, ) estimator.fit(X, y).predict(X) @@ -318,7 +345,9 @@ def test_warm_start(random_state=42): for n_estimators in [5, 10]: if clf_ws is None: clf_ws = BalancedBaggingClassifier( - n_estimators=n_estimators, random_state=random_state, warm_start=True, + n_estimators=n_estimators, + random_state=random_state, + warm_start=True, ) else: clf_ws.set_params(n_estimators=n_estimators) @@ -477,3 +506,47 @@ def test_max_samples_consistency(): ) bagging.fit(X, y) assert bagging._max_samples == max_samples + + +class CountDecisionTreeClassifier(DecisionTreeClassifier): + """DecisionTreeClassifier that will memorize the number of samples seen + at fit.""" + + def fit(self, X, y, sample_weight=None): + self.class_counts_ = Counter(y) + return super().fit(X, y, sample_weight=sample_weight) + + +@pytest.mark.parametrize( + "sampler, n_samples_bootstrap", + [ + (None, 15), + (RandomUnderSampler(), 15), # under-sampling with sample_indices_ + (ClusterCentroids(), 15), # under-sampling without sample_indices_ + (RandomOverSampler(), 40), # over-sampling with sample_indices_ + (SMOTE(), 40), # over-sampling without sample_indices_ + ], +) +def test_balanced_bagging_classifier_samplers(sampler, n_samples_bootstrap): + # check that we can pass any kind of sampler to a bagging classifier + X, y = make_imbalance( + iris.data, + iris.target, + sampling_strategy={0: 20, 1: 25, 2: 50}, + random_state=0, + ) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + clf = BalancedBaggingClassifier( + base_estimator=CountDecisionTreeClassifier(), + n_estimators=2, + sampler=sampler, + random_state=0, + ) + clf.fit(X_train, y_train) + clf.predict(X_test) + + # check that we have balanced class with the right counts of class + # sample depending on the sampling strategy + assert_array_equal( + list(clf.estimators_[0][-1].class_counts_.values()), n_samples_bootstrap + ) From f62c867d3623a69820aff4e7cddcce7dcedfc3bd Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Tue, 16 Feb 2021 18:54:31 +0100 Subject: [PATCH 02/10] DOC add more details regarding bootstrap --- imblearn/ensemble/_bagging.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/imblearn/ensemble/_bagging.py b/imblearn/ensemble/_bagging.py index 2f93a2864..38c6bcdec 100644 --- a/imblearn/ensemble/_bagging.py +++ b/imblearn/ensemble/_bagging.py @@ -59,6 +59,10 @@ class BalancedBaggingClassifier(BaggingClassifier): bootstrap : bool, default=True Whether samples are drawn with replacement. + .. note:: + It is important to note that the bootstrap will be generated from + the resampled dataset. + bootstrap_features : bool, default=False Whether features are drawn with replacement. @@ -85,6 +89,13 @@ class BalancedBaggingClassifier(BaggingClassifier): verbose : int, default=0 Controls the verbosity of the building process. + sampler : sampler object, default=None + The sampler used to balanced the dataset before to bootstrap + (if `bootstrap=True`) and `fit` a base estimator. By default, a + :class:`~sklearn.under_sampling.RandomUnderSampler` is used. + + .. versionadded:: 0.8 + Attributes ---------- base_estimator_ : estimator From 0a6a5095b061b86dd16ce69a2ea150099f66d5cc Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Tue, 16 Feb 2021 19:49:30 +0100 Subject: [PATCH 03/10] DOC improve user guide --- doc/ensemble.rst | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/doc/ensemble.rst b/doc/ensemble.rst index dc4ca94c7..7b2322124 100644 --- a/doc/ensemble.rst +++ b/doc/ensemble.rst @@ -18,9 +18,9 @@ Bagging classifier In ensemble classifiers, bagging methods build several estimators on different randomly selected subset of data. In scikit-learn, this classifier is named -``BaggingClassifier``. However, this classifier does not allow to balance each -subset of data. Therefore, when training on imbalanced data set, this -classifier will favor the majority classes:: +:class:`~sklearn.ensemble.BaggingClassifier`. However, this classifier does not +allow to balance each subset of data. Therefore, when training on imbalanced +data set, this classifier will favor the majority classes:: >>> from sklearn.datasets import make_classification >>> X, y = make_classification(n_samples=10000, n_features=2, n_informative=2, @@ -41,14 +41,13 @@ classifier will favor the majority classes:: >>> balanced_accuracy_score(y_test, y_pred) # doctest: +ELLIPSIS 0.77... -:class:`BalancedBaggingClassifier` allows to resample each subset of data -before to train each estimator of the ensemble. In short, it combines the -output of an :class:`EasyEnsemble` sampler with an ensemble of classifiers -(i.e. ``BaggingClassifier``). Therefore, :class:`BalancedBaggingClassifier` -takes the same parameters than the scikit-learn -``BaggingClassifier``. Additionally, there is two additional parameters, -``sampling_strategy`` and ``replacement`` to control the behaviour of the -random under-sampler:: +In :class:`BalancedBaggingClassifier`, each bootstrap sample will be further +resampled to achieve the `sampling_strategy` desired. Therefore, +:class:`BalancedBaggingClassifier` takes the same parameters than the +scikit-learn :class:`~sklearn.ensemble.BaggingClassifier`. In addition, the +sampling is controlled by the parameter `sampler` or the two parameters +`sampling_strategy` and `replacement`, if one wants to use the +:class:`~imblearn.under_sampling.RandomUnderSampler`:: >>> from imblearn.ensemble import BalancedBaggingClassifier >>> bbc = BalancedBaggingClassifier(base_estimator=DecisionTreeClassifier(), @@ -69,8 +68,7 @@ Forest of randomized trees :class:`BalancedRandomForestClassifier` is another ensemble method in which each tree of the forest will be provided a balanced bootstrap sample :cite:`chen2004using`. This class provides all functionality of the -:class:`~sklearn.ensemble.RandomForestClassifier` and notably the -`feature_importances_` attributes:: +:class:`~sklearn.ensemble.RandomForestClassifier`:: >>> from imblearn.ensemble import BalancedRandomForestClassifier >>> brf = BalancedRandomForestClassifier(n_estimators=100, random_state=0) @@ -99,11 +97,11 @@ a boosting iteration :cite:`seiffert2009rusboost`:: >>> balanced_accuracy_score(y_test, y_pred) # doctest: +ELLIPSIS 0... -A specific method which uses ``AdaBoost`` as learners in the bagging classifier -is called EasyEnsemble. The :class:`EasyEnsembleClassifier` allows to bag -AdaBoost learners which are trained on balanced bootstrap samples -:cite:`liu2008exploratory`. Similarly to the :class:`BalancedBaggingClassifier` -API, one can construct the ensemble as:: +A specific method which uses :class:`~sklearn.ensemble.AdaBoostClassifier` as +learners in the bagging classifier is called "EasyEnsemble". The +:class:`EasyEnsembleClassifier` allows to bag AdaBoost learners which are +trained on balanced bootstrap samples :cite:`liu2008exploratory`. Similarly to +the :class:`BalancedBaggingClassifier` API, one can construct the ensemble as:: >>> from imblearn.ensemble import EasyEnsembleClassifier >>> eec = EasyEnsembleClassifier(random_state=0) From ee2679c76a202fd3082ab33254f1a4b95559e13a Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Wed, 17 Feb 2021 23:27:35 +0100 Subject: [PATCH 04/10] DOC add references to known algorithms --- imblearn/ensemble/_bagging.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/imblearn/ensemble/_bagging.py b/imblearn/ensemble/_bagging.py index 38c6bcdec..ea570150b 100644 --- a/imblearn/ensemble/_bagging.py +++ b/imblearn/ensemble/_bagging.py @@ -33,6 +33,10 @@ class BalancedBaggingClassifier(BaggingClassifier): implementation. It includes an additional step to balance the training set at fit time using a given sampler. + This classifier can serves as a basis to implement various methods such as + Exactly Balanced Bagging [6]_, Roughly Balanced Bagging [7]_, + Over-Bagging [6]_, or SMOTE-Bagging [8]_. + Read more in the :ref:`User Guide `. Parameters @@ -60,8 +64,8 @@ class BalancedBaggingClassifier(BaggingClassifier): Whether samples are drawn with replacement. .. note:: - It is important to note that the bootstrap will be generated from - the resampled dataset. + Note that this bootstrap will be generated from the resampled + dataset. bootstrap_features : bool, default=False Whether features are drawn with replacement. @@ -92,7 +96,7 @@ class BalancedBaggingClassifier(BaggingClassifier): sampler : sampler object, default=None The sampler used to balanced the dataset before to bootstrap (if `bootstrap=True`) and `fit` a base estimator. By default, a - :class:`~sklearn.under_sampling.RandomUnderSampler` is used. + :class:`~imblearn.under_sampling.RandomUnderSampler` is used. .. versionadded:: 0.8 @@ -164,10 +168,21 @@ class BalancedBaggingClassifier(BaggingClassifier): .. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine Learning and Knowledge Discovery in Databases, 346-361, 2012. - .. [5] Chen, Chao, Andy Liaw, and Leo Breiman. "Using random forest to + .. [5] C. Chen Chao, A. Liaw, and L. Breiman. "Using random forest to learn imbalanced data." University of California, Berkeley 110, 2004. + .. [6] R. Maclin, and D. Opitz. "An empirical evaluation of bagging and + boosting." AAAI/IAAI 1997 (1997): 546-551. + + .. [7] S. Hido, H. Kashima, and Y. Takahashi. "Roughly balanced bagging + for imbalanced data." Statistical Analysis and Data Mining: The ASA + Data Science Journal 2.5‐6 (2009): 412-426. + + .. [8] S. Wang, and X. Yao. "Diversity analysis on imbalanced data sets by + using ensemble models." 2009 IEEE symposium on computational + intelligence and data mining. IEEE, 2009. + Examples -------- >>> from collections import Counter From afa54bde9fbfa76d4ac82e18ee21811fba2b4f6b Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Thu, 18 Feb 2021 00:12:59 +0100 Subject: [PATCH 05/10] iter --- examples/ensemble/plot_bagging_classifier.py | 113 ++++++++++++++++++ .../plot_comparison_ensemble_classifier.py | 3 +- 2 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 examples/ensemble/plot_bagging_classifier.py diff --git a/examples/ensemble/plot_bagging_classifier.py b/examples/ensemble/plot_bagging_classifier.py new file mode 100644 index 000000000..9bfc26135 --- /dev/null +++ b/examples/ensemble/plot_bagging_classifier.py @@ -0,0 +1,113 @@ +""" +================================= +Bagging classifiers using sampler +================================= + +In this example, we show how +:class:`~imblearn.ensemble.BalancedBaggingClassifier` can be used to create a +large variety of classifiers by giving different samplers. + +We will give several examples that have been published in the passed year. +""" + +# Authors: Guillaume Lemaitre +# License: MIT + +# %% +print(__doc__) + +# %% [markdown] +# Generate an imbalanced dataset +# ------------------------------ +# +# For this example, we will create a synthetic dataset using the function +# :func:`~sklearn.datasets.make_classification`. The problem will be a toy +# classification problem with a ratio of 1:9 between the two classes. + +# %% +from sklearn.datasets import make_classification + +X, y = make_classification( + n_samples=10_000, + n_features=10, + weights=[0.1, 0.9], + class_sep=0.5, + random_state=0, +) + +# %% +import pandas as pd + +pd.Series(y).value_counts(normalize=True) + +# %% [markdown] +# In the following sections, we will show a couple of algorithms that have +# been proposed over the years. We intend to illustrate how one can reuse the +# :class:`~imblearn.ensemble.BalancedBaggingClassifier` by passing different +# sampler. + +# %% +from sklearn.model_selection import cross_validate +from sklearn.ensemble import BaggingClassifier + +ebb = BaggingClassifier() +cv_results = cross_validate(ebb, X, y, scoring="balanced_accuracy") + +print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}") + +# %% [markdown] +# Exactly Balanced Bagging and Over-Bagging +# ----------------------------------------- +# +# The :class:`~imblearn.ensemble.BalancedBaggingClassifier` can use in +# conjunction with a :class:`~imblearn.under_sampling.RandomUnderSampler` or +# :class:`~imblearn.over_sampling.RandomOverSampler`. These methods are +# referred as Exactly Balanced Bagging and Over-Bagging, respectively and have +# been proposed first in [1]_. + +# %% +from imblearn.ensemble import BalancedBaggingClassifier +from imblearn.under_sampling import RandomUnderSampler + +# Exactly Balanced Bagging +ebb = BalancedBaggingClassifier(sampler=RandomUnderSampler()) +cv_results = cross_validate(ebb, X, y, scoring="balanced_accuracy") + +print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}") + +# %% +from imblearn.over_sampling import RandomOverSampler + +# Over-bagging +over_bagging = BalancedBaggingClassifier(sampler=RandomOverSampler()) +cv_results = cross_validate(over_bagging, X, y, scoring="balanced_accuracy") + +print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}") + +# %% [markdown] +# SMOTE-Bagging +# ------------- +# +# Instead of using a :class:`~imblearn.over_sampling.RandomOverSampler` that +# make a bootstrap, an alternative is to use +# :class:`~imblearn.over_sampling.SMOTE` as an over-sampler. This is known as +# SMOTE-Bagging [2]_. + +# %% +from imblearn.over_sampling import SMOTE + +# SMOTE-Bagging +smote_bagging = BalancedBaggingClassifier(sampler=SMOTE()) +cv_results = cross_validate(smote_bagging, X, y, scoring="balanced_accuracy") + +print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}") + +# %% [markdown] +# .. topic:: References: +# +# .. [1] R. Maclin, and D. Opitz. "An empirical evaluation of bagging and +# boosting." AAAI/IAAI 1997 (1997): 546-551. +# +# .. [2] S. Wang, and X. Yao. "Diversity analysis on imbalanced data sets by +# using ensemble models." 2009 IEEE symposium on computational +# intelligence and data mining. IEEE, 2009. diff --git a/examples/ensemble/plot_comparison_ensemble_classifier.py b/examples/ensemble/plot_comparison_ensemble_classifier.py index 294283f4e..65bcf9965 100644 --- a/examples/ensemble/plot_comparison_ensemble_classifier.py +++ b/examples/ensemble/plot_comparison_ensemble_classifier.py @@ -3,7 +3,7 @@ Compare ensemble classifiers using resampling ============================================= -Ensembling classifiers have shown to improve classification performance compare +Ensemble classifiers have shown to improve classification performance compare to single learner. However, they will be affected by class imbalance. This example shows the benefit of balancing the training set before to learn learners. We are making the comparison with non-balanced ensemble methods. @@ -11,7 +11,6 @@ We make a comparison using the balanced accuracy and geometric mean which are metrics widely used in the literature to evaluate models learned on imbalanced set. - """ # Authors: Guillaume Lemaitre From 77f9c750a16d00ae4a8c79000a283733bfc79ad7 Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Thu, 18 Feb 2021 01:08:10 +0100 Subject: [PATCH 06/10] iter --- examples/ensemble/plot_bagging_classifier.py | 46 ++++++++++++++++++++ imblearn/ensemble/_bagging.py | 8 +++- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/examples/ensemble/plot_bagging_classifier.py b/examples/ensemble/plot_bagging_classifier.py index 9bfc26135..285dd6d30 100644 --- a/examples/ensemble/plot_bagging_classifier.py +++ b/examples/ensemble/plot_bagging_classifier.py @@ -102,6 +102,48 @@ print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}") +# %% [markdown] +# Roughly Balanced Bagging +# ------------------------ +# FIXME: narration based on [3]_. + +# %% +from collections import Counter +import numpy as np +from imblearn import FunctionSampler + + +def binomial_resampling(X, y): + class_counts = Counter(y) + majority_class = max(class_counts, key=class_counts.get) + minority_class = min(class_counts, key=class_counts.get) + + n_minority_class = class_counts[minority_class] + n_majority_resampled = np.random.negative_binomial(n_minority_class, 0.5) + + majority_indices = np.random.choice( + np.flatnonzero(y == majority_class), + size=n_majority_resampled, + replace=True, + ) + minority_indices = np.random.choice( + np.flatnonzero(y == minority_class), + size=n_minority_class, + replace=True, + ) + indices = np.hstack([majority_indices, minority_indices]) + + X_res, y_res = X[indices], y[indices] + return X_res, y_res + + +# Roughly Balanced Bagging +rbb = BalancedBaggingClassifier(sampler=FunctionSampler(func=binomial_resampling)) +cv_results = cross_validate(rbb, X, y, scoring="balanced_accuracy") + +print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}") + + # %% [markdown] # .. topic:: References: # @@ -111,3 +153,7 @@ # .. [2] S. Wang, and X. Yao. "Diversity analysis on imbalanced data sets by # using ensemble models." 2009 IEEE symposium on computational # intelligence and data mining. IEEE, 2009. +# +# .. [3] S. Hido, H. Kashima, and Y. Takahashi. "Roughly balanced bagging +# for imbalanced data." Statistical Analysis and Data Mining: The ASA +# Data Science Journal 2.5‐6 (2009): 412-426. diff --git a/imblearn/ensemble/_bagging.py b/imblearn/ensemble/_bagging.py index ea570150b..d773fd127 100644 --- a/imblearn/ensemble/_bagging.py +++ b/imblearn/ensemble/_bagging.py @@ -246,7 +246,10 @@ def __init__( def _validate_y(self, y): y_encoded = super()._validate_y(y) - if isinstance(self.sampling_strategy, dict): + if ( + isinstance(self.sampling_strategy, dict) + and self.sampler_._sampling_type != "bypass" + ): self._sampling_strategy = { np.where(self.classes_ == key)[0][0]: value for key, value in check_sampling_strategy( @@ -277,7 +280,8 @@ def _validate_estimator(self, default=DecisionTreeClassifier()): else: base_estimator = clone(default) - self.sampler_.set_params(sampling_strategy=self._sampling_strategy) + if self.sampler_._sampling_type != "bypass": + self.sampler_.set_params(sampling_strategy=self._sampling_strategy) self.base_estimator_ = Pipeline( [ From d1d95464816ac22af181d9b60ea00d54084d612f Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Thu, 18 Feb 2021 10:52:45 +0100 Subject: [PATCH 07/10] iter --- examples/ensemble/plot_bagging_classifier.py | 32 ++++++++--- imblearn/ensemble/tests/test_bagging.py | 56 +++++++++++++++++++- 2 files changed, 79 insertions(+), 9 deletions(-) diff --git a/examples/ensemble/plot_bagging_classifier.py b/examples/ensemble/plot_bagging_classifier.py index 285dd6d30..9e392c506 100644 --- a/examples/ensemble/plot_bagging_classifier.py +++ b/examples/ensemble/plot_bagging_classifier.py @@ -105,7 +105,17 @@ # %% [markdown] # Roughly Balanced Bagging # ------------------------ -# FIXME: narration based on [3]_. +# While using a :class:`~imblearn.under_sampling.RandomUnderSampler` or +# :class:`~imblearn.over_sampling.RandomOverSampler` will create exactly the +# desired number of samples, it does not follow the statistical spirit wanted +# in the bagging framework. The authors in [3]_ proposes to use a negative +# binomial distribution to compute the number of samples of the majority +# class to be selected and then perform a random under-sampling. +# +# Here, we illustrate this method by implementing a function in charge of +# resampling and use the :class:`~imblearn.FunctionSampler` to integrate it +# within a :class:`~imblearn.pipeline.Pipeline` and +# :class:`~sklearn.model_selection.cross_validate`. # %% from collections import Counter @@ -113,32 +123,38 @@ from imblearn import FunctionSampler -def binomial_resampling(X, y): +def roughly_balanced_bagging(X, y, replace=False): + """Implementation of Roughly Balanced Bagging for binary problem.""" + # find the minority and majority classes class_counts = Counter(y) majority_class = max(class_counts, key=class_counts.get) minority_class = min(class_counts, key=class_counts.get) + # compute the number of sample to draw from the majority class using + # a negative binomial distribution n_minority_class = class_counts[minority_class] - n_majority_resampled = np.random.negative_binomial(n_minority_class, 0.5) + n_majority_resampled = np.random.negative_binomial(n=n_minority_class, p=0.5) + # draw randomly with or without replacement majority_indices = np.random.choice( np.flatnonzero(y == majority_class), size=n_majority_resampled, - replace=True, + replace=replace, ) minority_indices = np.random.choice( np.flatnonzero(y == minority_class), size=n_minority_class, - replace=True, + replace=replace, ) indices = np.hstack([majority_indices, minority_indices]) - X_res, y_res = X[indices], y[indices] - return X_res, y_res + return X[indices], y[indices] # Roughly Balanced Bagging -rbb = BalancedBaggingClassifier(sampler=FunctionSampler(func=binomial_resampling)) +rbb = BalancedBaggingClassifier( + sampler=FunctionSampler(func=roughly_balanced_bagging, kw_args={"replace": True}) +) cv_results = cross_validate(rbb, X, y, scoring="balanced_accuracy") print(f"{cv_results['test_score'].mean():.3f} +/- {cv_results['test_score'].std():.3f}") diff --git a/imblearn/ensemble/tests/test_bagging.py b/imblearn/ensemble/tests/test_bagging.py index 28b87c4bc..c999bb372 100644 --- a/imblearn/ensemble/tests/test_bagging.py +++ b/imblearn/ensemble/tests/test_bagging.py @@ -8,7 +8,7 @@ import numpy as np import pytest -from sklearn.datasets import load_iris, make_hastie_10_2 +from sklearn.datasets import load_iris, make_hastie_10_2, make_classification from sklearn.model_selection import ( GridSearchCV, ParameterGrid, @@ -24,6 +24,7 @@ from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose +from imblearn import FunctionSampler from imblearn.datasets import make_imbalance from imblearn.ensemble import BalancedBaggingClassifier from imblearn.over_sampling import RandomOverSampler, SMOTE @@ -550,3 +551,56 @@ def test_balanced_bagging_classifier_samplers(sampler, n_samples_bootstrap): assert_array_equal( list(clf.estimators_[0][-1].class_counts_.values()), n_samples_bootstrap ) + + +@pytest.mark.parametrize("replace", [True, False]) +def test_balanced_bagging_classifier_with_function_sampler(replace): + # check that we can provide a FunctionSampler in BalancedBaggingClassifier + X, y = make_classification( + n_samples=1_000, + n_features=10, + n_classes=2, + weights=[0.3, 0.7], + random_state=0, + ) + + def roughly_balanced_bagging(X, y, replace=False): + """Implementation of Roughly Balanced Bagging for binary problem.""" + # find the minority and majority classes + class_counts = Counter(y) + majority_class = max(class_counts, key=class_counts.get) + minority_class = min(class_counts, key=class_counts.get) + + # compute the number of sample to draw from the majority class using + # a negative binomial distribution + n_minority_class = class_counts[minority_class] + n_majority_resampled = np.random.negative_binomial(n=n_minority_class, p=0.5) + + # draw randomly with or without replacement + majority_indices = np.random.choice( + np.flatnonzero(y == majority_class), + size=n_majority_resampled, + replace=replace, + ) + minority_indices = np.random.choice( + np.flatnonzero(y == minority_class), + size=n_minority_class, + replace=replace, + ) + indices = np.hstack([majority_indices, minority_indices]) + + return X[indices], y[indices] + + # Roughly Balanced Bagging + rbb = BalancedBaggingClassifier( + base_estimator=CountDecisionTreeClassifier(), + n_estimators=2, + sampler=FunctionSampler( + func=roughly_balanced_bagging, kw_args={"replace": replace} + ), + ) + rbb.fit(X, y) + + for estimator in rbb.estimators_: + class_counts = estimator[-1].class_counts_ + assert (class_counts[0] / class_counts[1]) > 0.9 From 41a5720dac8bc0cf1f7f6aa28a0288b758c24389 Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Thu, 18 Feb 2021 11:24:08 +0100 Subject: [PATCH 08/10] iter --- imblearn/ensemble/tests/test_bagging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/imblearn/ensemble/tests/test_bagging.py b/imblearn/ensemble/tests/test_bagging.py index c999bb372..f3eff340a 100644 --- a/imblearn/ensemble/tests/test_bagging.py +++ b/imblearn/ensemble/tests/test_bagging.py @@ -603,4 +603,4 @@ def roughly_balanced_bagging(X, y, replace=False): for estimator in rbb.estimators_: class_counts = estimator[-1].class_counts_ - assert (class_counts[0] / class_counts[1]) > 0.9 + assert (class_counts[0] / class_counts[1]) > 0.8 From 18297c0f73b0768054a21d109ef95447eb438d3d Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Thu, 18 Feb 2021 11:34:01 +0100 Subject: [PATCH 09/10] iter --- doc/bibtex/refs.bib | 29 +++++++++++++++++++++++++++++ doc/ensemble.rst | 6 ++++++ references.bib | 29 +++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) diff --git a/doc/bibtex/refs.bib b/doc/bibtex/refs.bib index 469d4abe8..fc9d9a475 100644 --- a/doc/bibtex/refs.bib +++ b/doc/bibtex/refs.bib @@ -244,3 +244,32 @@ @article{wilson1997improved pages={1--34}, year={1997} } + +@inproceedings{wang2009diversity, + title={Diversity analysis on imbalanced data sets by using ensemble models}, + author={Wang, Shuo and Yao, Xin}, + booktitle={2009 IEEE symposium on computational intelligence and data mining}, + pages={324--331}, + year={2009}, + organization={IEEE} +} + +@article{hido2009roughly, + title={Roughly balanced bagging for imbalanced data}, + author={Hido, Shohei and Kashima, Hisashi and Takahashi, Yutaka}, + journal={Statistical Analysis and Data Mining: The ASA Data Science Journal}, + volume={2}, + number={5-6}, + pages={412--426}, + year={2009}, + publisher={Wiley Online Library} +} + +@article{maclin1997empirical, + title={An empirical evaluation of bagging and boosting}, + author={Maclin, Richard and Opitz, David}, + journal={AAAI/IAAI}, + volume={1997}, + pages={546--551}, + year={1997} +} diff --git a/doc/ensemble.rst b/doc/ensemble.rst index 7b2322124..886a2b02e 100644 --- a/doc/ensemble.rst +++ b/doc/ensemble.rst @@ -60,6 +60,12 @@ sampling is controlled by the parameter `sampler` or the two parameters >>> balanced_accuracy_score(y_test, y_pred) # doctest: +ELLIPSIS 0.8... +Changing the `sampler` will give rise to different known implementation +:cite:`maclin1997empirical`, :cite:`hido2009roughly`, +:cite:`wang2009diversity`. You can refer to the following example shows in +practice these different methods: +:ref:`sphx_glr_auto_examples_ensemble_plot_bagging_classifier.py` + .. _forest: Forest of randomized trees diff --git a/references.bib b/references.bib index 398f9e4c3..c803f0ae9 100644 --- a/references.bib +++ b/references.bib @@ -219,3 +219,32 @@ @article{wilson1997improved pages={1--34}, year={1997} } + +@inproceedings{wang2009diversity, + title={Diversity analysis on imbalanced data sets by using ensemble models}, + author={Wang, Shuo and Yao, Xin}, + booktitle={2009 IEEE symposium on computational intelligence and data mining}, + pages={324--331}, + year={2009}, + organization={IEEE} +} + +@article{hido2009roughly, + title={Roughly balanced bagging for imbalanced data}, + author={Hido, Shohei and Kashima, Hisashi and Takahashi, Yutaka}, + journal={Statistical Analysis and Data Mining: The ASA Data Science Journal}, + volume={2}, + number={5-6}, + pages={412--426}, + year={2009}, + publisher={Wiley Online Library} +} + +@article{maclin1997empirical, + title={An empirical evaluation of bagging and boosting}, + author={Maclin, Richard and Opitz, David}, + journal={AAAI/IAAI}, + volume={1997}, + pages={546--551}, + year={1997} +} From d085512ee7659e492c6d31c2fe0519f7f2238f87 Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Thu, 18 Feb 2021 12:24:00 +0100 Subject: [PATCH 10/10] iter --- doc/whats_new/v0.8.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/whats_new/v0.8.rst b/doc/whats_new/v0.8.rst index 126af8862..c97f1b3c9 100644 --- a/doc/whats_new/v0.8.rst +++ b/doc/whats_new/v0.8.rst @@ -24,6 +24,11 @@ New features only containing categorical features. :pr:`802` by :user:`Guillaume Lemaitre `. +- Add the possibility to pass any type of samplers in + :class:`imblearn.ensemble.BalancedBaggingClassifier` unlocking the + implementation of methods based on resampled bagging. + :pr:`808` by :user:`Guillaume Lemaitre `. + Enhancements ............