From dc42bde3e9588ca7ed0232ff70dc8048a1411869 Mon Sep 17 00:00:00 2001
From: Raghav R V <rvraghav93@gmail.com>
Date: Thu, 19 May 2016 08:50:46 +0200
Subject: [PATCH] FIX Don't warn when scoring param is passed + PEP8 (#6798)

---
 sklearn/model_selection/_search.py            | 20 +++----------------
 sklearn/model_selection/tests/test_search.py  | 18 ++++++-----------
 .../model_selection/tests/test_validation.py  |  4 ++--
 3 files changed, 11 insertions(+), 31 deletions(-)

diff --git a/sklearn/model_selection/_search.py b/sklearn/model_selection/_search.py
index e079a21c56..72d17cd354 100644
--- a/sklearn/model_selection/_search.py
+++ b/sklearn/model_selection/_search.py
@@ -15,12 +15,11 @@ from collections import Mapping, namedtuple, Sized
 from functools import partial, reduce
 from itertools import product
 import operator
-import warnings
 
 import numpy as np
 
 from ..base import BaseEstimator, is_classifier, clone
-from ..base import MetaEstimatorMixin, ChangedBehaviorWarning
+from ..base import MetaEstimatorMixin
 from ._split import check_cv
 from ._validation import _fit_and_score
 from ..externals.joblib import Parallel, delayed
@@ -234,8 +233,8 @@ class ParameterSampler(object):
             if grid_size < self.n_iter:
                 raise ValueError(
                     "The total space of parameters %d is smaller "
-                    "than n_iter=%d." % (grid_size, self.n_iter)
-                    + " For exhaustive searches, use GridSearchCV.")
+                    "than n_iter=%d. For exhaustive searches, use "
+                    "GridSearchCV." % (grid_size, self.n_iter))
             for i in sample_without_replacement(grid_size, self.n_iter,
                                                 random_state=rnd):
                 yield param_grid[i]
@@ -404,24 +403,11 @@ class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
         Returns
         -------
         score : float
-
-        Notes
-        -----
-         * The long-standing behavior of this method changed in version 0.16.
-         * It no longer uses the metric provided by ``estimator.score`` if the
-           ``scoring`` parameter was set when fitting.
-
         """
         if self.scorer_ is None:
             raise ValueError("No score function explicitly defined, "
                              "and the estimator doesn't provide one %s"
                              % self.best_estimator_)
-        if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
-            warnings.warn("The long-standing behavior to use the estimator's "
-                          "score function in {0}.score has changed. The "
-                          "scoring parameter is now used."
-                          "".format(self.__class__.__name__),
-                          ChangedBehaviorWarning)
         return self.scorer_(self.best_estimator_, X, y)
 
     @if_delegate_has_method(delegate='estimator')
diff --git a/sklearn/model_selection/tests/test_search.py b/sklearn/model_selection/tests/test_search.py
index 1cf0abbbd5..21de129d08 100644
--- a/sklearn/model_selection/tests/test_search.py
+++ b/sklearn/model_selection/tests/test_search.py
@@ -20,7 +20,6 @@ from sklearn.utils.testing import assert_false, assert_true
 from sklearn.utils.testing import assert_array_equal
 from sklearn.utils.testing import assert_almost_equal
 from sklearn.utils.testing import assert_array_almost_equal
-from sklearn.utils.testing import assert_no_warnings
 from sklearn.utils.testing import ignore_warnings
 from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
 
@@ -44,8 +43,6 @@ from sklearn.model_selection import RandomizedSearchCV
 from sklearn.model_selection import ParameterGrid
 from sklearn.model_selection import ParameterSampler
 
-# TODO Import from sklearn.exceptions once merged.
-from sklearn.base import ChangedBehaviorWarning
 from sklearn.model_selection._validation import FitFailedWarning
 
 from sklearn.svm import LinearSVC, SVC
@@ -155,8 +152,7 @@ def test_grid_search():
     assert_equal(grid_search.best_estimator_.foo_param, 2)
 
     for i, foo_i in enumerate([1, 2, 3]):
-        assert_true(grid_search.grid_scores_[i][0]
-                    == {'foo_param': foo_i})
+        assert_true(grid_search.grid_scores_[i][0] == {'foo_param': foo_i})
     # Smoke test the score etc:
     grid_search.score(X, y)
     grid_search.predict_proba(X)
@@ -208,13 +204,11 @@ def test_grid_search_score_method():
 
     # Check warning only occurs in situation where behavior changed:
     # estimator requires score method to compete with scoring parameter
-    score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
-    score_accuracy = assert_warns(ChangedBehaviorWarning,
-                                  search_accuracy.score, X, y)
-    score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
-                                            X, y)
-    score_auc = assert_warns(ChangedBehaviorWarning,
-                             search_auc.score, X, y)
+    score_no_scoring = search_no_scoring.score(X, y)
+    score_accuracy = search_accuracy.score(X, y)
+    score_no_score_auc = search_no_score_method_auc.score(X, y)
+    score_auc = search_auc.score(X, y)
+
     # ensure the test is sane
     assert_true(score_auc < 1.0)
     assert_true(score_accuracy < 1.0)
diff --git a/sklearn/model_selection/tests/test_validation.py b/sklearn/model_selection/tests/test_validation.py
index f6d853df32..700c0fa510 100644
--- a/sklearn/model_selection/tests/test_validation.py
+++ b/sklearn/model_selection/tests/test_validation.py
@@ -379,8 +379,8 @@ def test_permutation_score():
 
     # test with custom scoring object
     def custom_score(y_true, y_pred):
-        return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
-                / y_true.shape[0])
+        return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
+                y_true.shape[0])
 
     scorer = make_scorer(custom_score)
     score, _, pvalue = permutation_test_score(
-- 
GitLab