From a7279003e35462ad709d4336faf216b2e79c8e9a Mon Sep 17 00:00:00 2001
From: Mathieu Blondel <mathieu@mblondel.org>
Date: Mon, 29 Aug 2011 19:27:09 +0900
Subject: [PATCH] Address @agramfort and @ogrisel's comments.

---
 doc/modules/multiclass.rst  | 19 ++++++++++++-------
 scikits/learn/multiclass.py | 38 ++++++++++++++-----------------------
 2 files changed, 26 insertions(+), 31 deletions(-)

diff --git a/doc/modules/multiclass.rst b/doc/modules/multiclass.rst
index b84c0bf46f..411769042e 100644
--- a/doc/modules/multiclass.rst
+++ b/doc/modules/multiclass.rst
@@ -123,10 +123,15 @@ Example::
 .. topic:: References:
 
  * [1] "Solving multiclass learning problems via error-correcting ouput codes",
-    Dietterich T., Bakiri G., Journal of Artificial Intelligence Research 2.
-
- * [2] "The error coding method and PICTs", James G., Hastie T., Journal of
-    Computational and Graphical statistics 7.
-
- * [3] "The Elements of Statistical Learning", Hastie T., Tibshirani R.,
-    Friedman J., page 606 (second-edition).
+    Dietterich T., Bakiri G., 
+    Journal of Artificial Intelligence Research 2, 
+    1995.
+
+ * [2] "The error coding method and PICTs", 
+   James G., Hastie T., 
+   Journal of Computational and Graphical statistics 7, 
+   1998.
+
+ * [3] "The Elements of Statistical Learning", 
+   Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
+   2008.
diff --git a/scikits/learn/multiclass.py b/scikits/learn/multiclass.py
index 953cc7e346..6e7722e59e 100644
--- a/scikits/learn/multiclass.py
+++ b/scikits/learn/multiclass.py
@@ -25,16 +25,14 @@ from scikits.learn.metrics.pairwise import euclidean_distances
 
 
 def fit_binary(estimator, X, y):
-    """Fit a single binary estimator.
-    """
+    """Fit a single binary estimator."""
     estimator = clone(estimator)
     estimator.fit(X, y)
     return estimator
 
 
 def predict_binary(estimator, X):
-    """Make predictions using a single binary estimator.
-    """
+    """Make predictions using a single binary estimator."""
     if hasattr(estimator, "decision_function"):
         return np.ravel(estimator.decision_function(X))
     else:
@@ -43,8 +41,7 @@ def predict_binary(estimator, X):
 
 
 def check_estimator(estimator):
-    """Make sure that an estimator implements the necessary methods.
-    """
+    """Make sure that an estimator implements the necessary methods."""
     if not hasattr(estimator, "decision_function") and \
        not hasattr(estimator, "predict_proba"):
         raise ValueError("The base estimator should implement "
@@ -52,8 +49,7 @@ def check_estimator(estimator):
 
 
 def fit_ovr(estimator, X, y):
-    """Fit a one-vs-the-rest strategy.
-    """
+    """Fit a one-vs-the-rest strategy."""
     check_estimator(estimator)
 
     lb = LabelBinarizer()
@@ -63,8 +59,7 @@ def fit_ovr(estimator, X, y):
 
 
 def predict_ovr(estimators, label_binarizer, X):
-    """Make predictions using the one-vs-the-rest strategy.
-    """
+    """Make predictions using the one-vs-the-rest strategy."""
     Y = np.array([predict_binary(e, X) for e in estimators]).T
     return label_binarizer.inverse_transform(Y)
 
@@ -139,10 +134,9 @@ class OneVsRestClassifier(BaseEstimator, ClassifierMixin):
 
 
 def fit_ovo_binary(estimator, X, y, i, j):
-    """Fit a single binary estimator (one-vs-one).
-    """
+    """Fit a single binary estimator (one-vs-one)."""
     cond = np.logical_or(y == i, y == j)
-    y = y[cond].copy()
+    y = y[cond]
     y[y == i] = 0
     y[y == j] = 1
     ind = np.arange(X.shape[0])
@@ -150,8 +144,7 @@ def fit_ovo_binary(estimator, X, y, i, j):
 
 
 def fit_ovo(estimator, X, y):
-    """Fit a one-vs-one strategy.
-    """
+    """Fit a one-vs-one strategy."""
     classes = np.unique(y)
     n_classes = classes.shape[0]
     estimators = [fit_ovo_binary(estimator, X, y, classes[i], classes[j])
@@ -161,8 +154,7 @@ def fit_ovo(estimator, X, y):
 
 
 def predict_ovo(estimators, classes, X):
-    """Make predictions using the one-vs-one strategy.
-    """
+    """Make predictions using the one-vs-one strategy."""
     n_samples = X.shape[0]
     n_classes = classes.shape[0]
     votes = np.zeros((n_samples, n_classes))
@@ -247,8 +239,7 @@ class OneVsOneClassifier(BaseEstimator, ClassifierMixin):
 
 
 def fit_ecoc(estimator, X, y, code_size):
-    """Fit an error-correcting output-code strategy.
-    """
+    """Fit an error-correcting output-code strategy."""
     check_estimator(estimator)
 
     classes = np.unique(y)
@@ -276,8 +267,7 @@ def fit_ecoc(estimator, X, y, code_size):
 
 
 def predict_ecoc(estimators, classes, code_book, X):
-    """Make predictions using the error-correcting output-code strategy.
-    """
+    """Make predictions using the error-correcting output-code strategy."""
     Y = np.array([predict_binary(e, X) for e in estimators]).T
     pred = euclidean_distances(Y, code_book).argmin(axis=1)
     return classes[pred]
@@ -322,13 +312,13 @@ class OutputCodeClassifier(BaseEstimator, ClassifierMixin):
     ----------
     [1] "Solving multiclass learning problems via error-correcting ouput
         codes", Dietterich T., Bakiri G., Journal of Artificial Intelligence
-        Research 2.
+        Research 2, 1995.
 
     [2] "The error coding method and PICTs", James G., Hastie T., Journal of
-    Computational and Graphical statistics 7.
+    Computational and Graphical statistics 7, 1998.
 
     [3] "The Elements of Statistical Learning", Hastie T., Tibshirani R.,
-    Friedman J., page 606 (second-edition).
+    Friedman J., page 606 (second-edition), 2008.
     """
 
     def __init__(self, estimator, code_size=1.5):
-- 
GitLab