From e55d0bcba5ed5a270d140fefdfa4b1ee2ed1a1eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=20Dupr=C3=A9=20la=20Tour?= <tom.dupre-la-tour@m4x.org> Date: Wed, 12 Jul 2017 16:37:49 +0200 Subject: [PATCH] FIX broken link in gallery and bad title rendering --- doc/modules/model_evaluation.rst | 4 ++-- examples/model_selection/plot_multi_metric_evaluation.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst index dee5865bdd..813a39339e 100644 --- a/doc/modules/model_evaluation.rst +++ b/doc/modules/model_evaluation.rst @@ -212,8 +212,8 @@ the following two rules: .. _multimetric_scoring: -Using mutiple metric evaluation -------------------------------- +Using multiple metric evaluation +-------------------------------- Scikit-learn also permits evaluation of multiple metrics in ``GridSearchCV``, ``RandomizedSearchCV`` and ``cross_validate``. diff --git a/examples/model_selection/plot_multi_metric_evaluation.py b/examples/model_selection/plot_multi_metric_evaluation.py index 5f4491e51f..ea7d60dc20 100644 --- a/examples/model_selection/plot_multi_metric_evaluation.py +++ b/examples/model_selection/plot_multi_metric_evaluation.py @@ -1,4 +1,7 @@ -"""Demonstration of multi-metric evaluation on cross_val_score and GridSearchCV +""" +============================================================================ +Demonstration of multi-metric evaluation on cross_val_score and GridSearchCV +============================================================================ Multiple metric parameter search can be done by setting the ``scoring`` parameter to a list of metric scorer names or a dict mapping the scorer names -- GitLab