From 2fc4c96a9014ab37031f9aac45e2025aa32eb65d Mon Sep 17 00:00:00 2001 From: Fabian Pedregosa <fabian.pedregosa@inria.fr> Date: Sat, 30 Jul 2011 07:49:56 +0200 Subject: [PATCH] Revert "Remove hardcoded n_jobs from examples." This reverts commit 68871955639b19b1e66427c814b0a41bb9f3e18e. --- .../plot_feature_agglomeration_vs_univariate_selection.py | 2 +- examples/gaussian_process/gp_diabetes_dataset.py | 2 +- examples/grid_search_text_feature_extraction.py | 2 +- examples/plot_permutation_test_for_classification.py | 2 +- examples/svm/plot_svm_anova.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py b/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py index fd39f8a81d..951a4f2c6c 100644 --- a/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py +++ b/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py @@ -67,7 +67,7 @@ ward = WardAgglomeration(n_clusters=10, connectivity=A, memory=mem, n_components=1) clf = Pipeline([('ward', ward), ('ridge', ridge)]) # Select the optimal number of parcels with grid search -clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}) +clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1) clf.fit(X, y, cv=cv) # set the best parameters coef_ = clf.best_estimator.steps[-1][1].coef_ coef_ = clf.best_estimator.steps[0][1].inverse_transform(coef_) diff --git a/examples/gaussian_process/gp_diabetes_dataset.py b/examples/gaussian_process/gp_diabetes_dataset.py index 61e4b79d97..d814fd1a1d 100644 --- a/examples/gaussian_process/gp_diabetes_dataset.py +++ b/examples/gaussian_process/gp_diabetes_dataset.py @@ -46,6 +46,6 @@ gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE # Perform a cross-validation estimate of the coefficient of determination using # the cross_val module using all CPUs available on the machine K = 20 # folds -R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K)).mean() +R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=-1).mean() print("The %d-Folds estimate of the coefficient of determination is R2 = %s" % (K, R2)) diff --git a/examples/grid_search_text_feature_extraction.py b/examples/grid_search_text_feature_extraction.py index 054f3022b3..164cd934bd 100644 --- a/examples/grid_search_text_feature_extraction.py +++ b/examples/grid_search_text_feature_extraction.py @@ -105,7 +105,7 @@ parameters = { # find the best parameters for both the feature extraction and the # classifier -grid_search = GridSearchCV(pipeline, parameters) +grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1) # cross-validation doesn't work if the length of the data is not known, # hence use lists instead of iterators diff --git a/examples/plot_permutation_test_for_classification.py b/examples/plot_permutation_test_for_classification.py index 854ec23b7f..d41eb0c3c7 100644 --- a/examples/plot_permutation_test_for_classification.py +++ b/examples/plot_permutation_test_for_classification.py @@ -44,7 +44,7 @@ cv = StratifiedKFold(y, 2) score, permutation_scores, pvalue = permutation_test_score(svm, X, y, zero_one_score, cv=cv, - n_permutations=100) + n_permutations=100, n_jobs=1) print "Classification score %s (pvalue : %s)" % (score, pvalue) diff --git a/examples/svm/plot_svm_anova.py b/examples/svm/plot_svm_anova.py index f018613fc3..6187afe801 100644 --- a/examples/svm/plot_svm_anova.py +++ b/examples/svm/plot_svm_anova.py @@ -42,7 +42,7 @@ percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) for percentile in percentiles: clf._set_params(anova__percentile=percentile) # Compute cross-validation score using all CPUs - this_scores = cross_val.cross_val_score(clf, X, y) + this_scores = cross_val.cross_val_score(clf, X, y, n_jobs=1) score_means.append(this_scores.mean()) score_stds.append(this_scores.std()) -- GitLab