diff --git a/sklearn/tests/test_common.py b/sklearn/tests/test_common.py
index 0875dd5334ebcd3ae33a988d4b5206e2eabd84ba..946bdfbac58cfa63713c2465105dbb527a03133a 100644
--- a/sklearn/tests/test_common.py
+++ b/sklearn/tests/test_common.py
@@ -147,7 +147,8 @@ def test_classifiers():
         yield check_classifiers_train, name, Classifier
         if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
             # TODO some complication with -1 label
-                and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
+                and name not in ["DecisionTreeClassifier",
+                                 "ExtraTreeClassifier"]):
                 # We don't raise a warning in these classifiers, as
                 # the column y interface is used by the forests.
 
@@ -362,9 +363,8 @@ def test_non_transformer_estimators_n_iter():
                     continue
 
                 # Tested in test_transformer_n_iter below
-                elif name in CROSS_DECOMPOSITION or (
-                    name in ['LinearSVC', 'LogisticRegression']
-                    ):
+                elif (name in CROSS_DECOMPOSITION or
+                      name in ['LinearSVC', 'LogisticRegression']):
                     continue
 
                 else:
diff --git a/sklearn/tests/test_cross_validation.py b/sklearn/tests/test_cross_validation.py
index 085720f11fc3a2913d30ff607e2be6e86e98bb26..709f5ba40e5ae85b60c496c4a3a5aca93aedfc79 100644
--- a/sklearn/tests/test_cross_validation.py
+++ b/sklearn/tests/test_cross_validation.py
@@ -54,7 +54,7 @@ class MockClassifier(BaseEstimator):
     def fit(self, X, Y=None, sample_weight=None, class_prior=None,
             sparse_sample_weight=None, sparse_param=None, dummy_int=None,
             dummy_str=None, dummy_obj=None, callback=None):
-        """The dummy arguments are to test that this fit function can 
+        """The dummy arguments are to test that this fit function can
         accept non-array arguments through cross-validation, such as:
             - int
             - str (this is actually array-like)
@@ -82,18 +82,17 @@ class MockClassifier(BaseEstimator):
                         ' is {0}, should be {1}'.format(class_prior.shape[0],
                                                         len(np.unique(y))))
         if sparse_sample_weight is not None:
+            fmt = ('MockClassifier extra fit_param sparse_sample_weight'
+                   '.shape[0] is {0}, should be {1}')
             assert_true(sparse_sample_weight.shape[0] == X.shape[0],
-                        'MockClassifier extra fit_param sparse_sample_weight'
-                        '.shape[0] is {0}, should be {1}'
-                            .format(sparse_sample_weight.shape[0], X.shape[0]))
+                        fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
         if sparse_param is not None:
+            fmt = ('MockClassifier extra fit_param sparse_param.shape '
+                   'is ({0}, {1}), should be ({2}, {3})')
             assert_true(sparse_param.shape == P_sparse.shape,
-                        'MockClassifier extra fit_param sparse_param.shape '
-                        'is ({0}, {1}), should be ({2}, {3})'
-                            .format(sparse_param.shape[0],
-                                    sparse_param.shape[1],
-                                    P_sparse.shape[0],
-                                    P_sparse.shape[1]))
+                        fmt.format(sparse_param.shape[0],
+                                   sparse_param.shape[1],
+                                   P_sparse.shape[0], P_sparse.shape[1]))
         return self
 
     def predict(self, T):
@@ -108,7 +107,7 @@ class MockClassifier(BaseEstimator):
 X = np.ones((10, 2))
 X_sparse = coo_matrix(X)
 W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
-                      shape=(10,1))
+                      shape=(10, 1))
 P_sparse = coo_matrix(np.eye(5))
 y = np.arange(10) // 2
 
@@ -592,6 +591,7 @@ def test_cross_val_score_fit_params():
     DUMMY_INT = 42
     DUMMY_STR = '42'
     DUMMY_OBJ = object()
+
     def assert_fit_params(clf):
         """Function to test that the values are passsed correctly to the
         classifier arguments for non-array type
@@ -691,9 +691,9 @@ def train_test_split_mock_pandas():
     X_train, X_test = cval.train_test_split(X_df)
     assert_true(isinstance(X_train, MockDataFrame))
     assert_true(isinstance(X_test, MockDataFrame))
-    X_train_array, X_test_array = cval.train_test_split(X_df, force_arrays=True)
-    assert_true(isinstance(X_train_array, np.ndarray))
-    assert_true(isinstance(X_test_array, np.ndarray))
+    X_train_arr, X_test_arr = cval.train_test_split(X_df, force_arrays=True)
+    assert_true(isinstance(X_train_arr, np.ndarray))
+    assert_true(isinstance(X_test_arr, np.ndarray))
 
 
 def test_cross_val_score_with_score_func_classification():
diff --git a/sklearn/tests/test_grid_search.py b/sklearn/tests/test_grid_search.py
index b8f20189036137090d3e0bd46acf53f10fd7538e..1caf1854672e466ef227255cf0566b62e2b76b80 100644
--- a/sklearn/tests/test_grid_search.py
+++ b/sklearn/tests/test_grid_search.py
@@ -9,7 +9,6 @@ from sklearn.externals.six.moves import xrange
 from itertools import chain, product
 import pickle
 import sys
-import warnings
 
 import numpy as np
 import scipy.sparse as sp
diff --git a/sklearn/tests/test_learning_curve.py b/sklearn/tests/test_learning_curve.py
index 62a05dd19799e458be6561607b82386be024c64c..c062abfab718903f5a11db24e78b0cfcb5d57258 100644
--- a/sklearn/tests/test_learning_curve.py
+++ b/sklearn/tests/test_learning_curve.py
@@ -47,8 +47,8 @@ class MockImprovingEstimator(BaseEstimator):
 class MockIncrementalImprovingEstimator(MockImprovingEstimator):
     """Dummy classifier that provides partial_fit"""
     def __init__(self, n_max_train_sizes):
-        super(MockIncrementalImprovingEstimator, self).__init__(
-              n_max_train_sizes)
+        super(MockIncrementalImprovingEstimator,
+              self).__init__(n_max_train_sizes)
         self.x = None
 
     def _is_training_data(self, X):
@@ -89,7 +89,7 @@ def test_learning_curve():
         train_sizes, train_scores, test_scores = learning_curve(
             estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
     if len(w) > 0:
-      raise RuntimeError("Unexpected warning: %r" % w[0].message)
+        raise RuntimeError("Unexpected warning: %r" % w[0].message)
     assert_equal(train_scores.shape, (10, 3))
     assert_equal(test_scores.shape, (10, 3))
     assert_array_equal(train_sizes, np.linspace(2, 20, 10))
@@ -245,10 +245,11 @@ def test_validation_curve():
     param_range = np.linspace(0, 1, 10)
     with warnings.catch_warnings(record=True) as w:
         train_scores, test_scores = validation_curve(
-          MockEstimatorWithParameter(), X, y, param_name="param",
-          param_range=param_range, cv=2)
+            MockEstimatorWithParameter(), X, y, param_name="param",
+            param_range=param_range, cv=2
+        )
     if len(w) > 0:
-      raise RuntimeError("Unexpected warning: %r" % w[0].message)
+        raise RuntimeError("Unexpected warning: %r" % w[0].message)
 
     assert_array_almost_equal(train_scores.mean(axis=1), param_range)
     assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
diff --git a/sklearn/tests/test_multiclass.py b/sklearn/tests/test_multiclass.py
index c2897274cc69187b596691c2ae11ab5b5429501c..274f3d0df6efe40021f73cdac4d5de59f4ee7d32 100644
--- a/sklearn/tests/test_multiclass.py
+++ b/sklearn/tests/test_multiclass.py
@@ -94,7 +94,7 @@ def test_ovr_fit_predict_sparse():
                                                        random_state=0)
 
         X_train, Y_train = X[:80], Y[:80]
-        X_test, Y_test = X[80:], Y[80:]
+        X_test = X[80:]
 
         clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
         Y_pred = clf.predict(X_test)
@@ -291,7 +291,7 @@ def test_ovr_multilabel_predict_proba():
                                                        return_indicator=True,
                                                        random_state=0)
         X_train, Y_train = X[:80], Y[:80]
-        X_test, Y_test = X[80:], Y[80:]
+        X_test = X[80:]
         clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
 
         # decision function only estimator. Fails in current implementation.
@@ -316,7 +316,7 @@ def test_ovr_single_label_predict_proba():
     base_clf = MultinomialNB(alpha=1)
     X, Y = iris.data, iris.target
     X_train, Y_train = X[:80], Y[:80]
-    X_test, Y_test = X[80:], Y[80:]
+    X_test = X[80:]
     clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
 
     # decision function only estimator. Fails in current implementation.
@@ -343,7 +343,7 @@ def test_ovr_multilabel_decision_function():
                                                    return_indicator=True,
                                                    random_state=0)
     X_train, Y_train = X[:80], Y[:80]
-    X_test, Y_test = X[80:], Y[80:]
+    X_test = X[80:]
     clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
     assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
                        clf.predict(X_test))
@@ -354,7 +354,7 @@ def test_ovr_single_label_decision_function():
                                         n_features=20,
                                         random_state=0)
     X_train, Y_train = X[:80], Y[:80]
-    X_test, Y_test = X[80:], Y[80:]
+    X_test = X[80:]
     clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
     assert_array_equal(clf.decision_function(X_test).ravel() > 0,
                        clf.predict(X_test))
@@ -521,12 +521,13 @@ def test_ecoc_gridsearch():
     best_C = cv.best_estimator_.estimators_[0].C
     assert_true(best_C in Cs)
 
+
 @ignore_warnings
 def test_deprecated():
     base_estimator = DecisionTreeClassifier(random_state=0)
     X, Y = iris.data, iris.target
     X_train, Y_train = X[:80], Y[:80]
-    X_test, Y_test = X[80:], Y[80:]
+    X_test = X[80:]
 
     all_metas = [
         (OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
@@ -545,10 +546,10 @@ def test_deprecated():
             meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
             fitted_return = fit_func(base_estimator, X_train, Y_train)
 
-
         if len(fitted_return) == 2:
             estimators_, classes_or_lb = fitted_return
-            assert_almost_equal(predict_func(estimators_, classes_or_lb, X_test),
+            assert_almost_equal(predict_func(estimators_, classes_or_lb,
+                                             X_test),
                                 meta_est.predict(X_test))
 
             if proba_func is not None:
diff --git a/sklearn/tests/test_naive_bayes.py b/sklearn/tests/test_naive_bayes.py
index d80353c5ee8c8d485c8dfdae9a6be8c4a628b86c..603e49e6bc03085bec9d909d2333da1bc4cb2421 100644
--- a/sklearn/tests/test_naive_bayes.py
+++ b/sklearn/tests/test_naive_bayes.py
@@ -52,6 +52,7 @@ def test_gnb():
     # FIXME Remove this test once the more general partial_fit tests are merged
     assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
 
+
 def test_gnb_prior():
     """Test whether class priors are properly set. """
     clf = GaussianNB().fit(X, y)