diff --git a/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py b/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py index fd39f8a81d46ba04e216ad59e30c648d64259dd6..951a4f2c6c71f912c8d817379661400c81d42c6f 100644 --- a/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py +++ b/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py @@ -67,7 +67,7 @@ ward = WardAgglomeration(n_clusters=10, connectivity=A, memory=mem, n_components=1) clf = Pipeline([('ward', ward), ('ridge', ridge)]) # Select the optimal number of parcels with grid search -clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}) +clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1) clf.fit(X, y, cv=cv) # set the best parameters coef_ = clf.best_estimator.steps[-1][1].coef_ coef_ = clf.best_estimator.steps[0][1].inverse_transform(coef_) diff --git a/examples/gaussian_process/gp_diabetes_dataset.py b/examples/gaussian_process/gp_diabetes_dataset.py index 61e4b79d978a858b880ededc8d12f050e106eb36..d814fd1a1d1bde2c6d46b36d3d44acaa012549cb 100644 --- a/examples/gaussian_process/gp_diabetes_dataset.py +++ b/examples/gaussian_process/gp_diabetes_dataset.py @@ -46,6 +46,6 @@ gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE # Perform a cross-validation estimate of the coefficient of determination using # the cross_val module using all CPUs available on the machine K = 20 # folds -R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K)).mean() +R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=-1).mean() print("The %d-Folds estimate of the coefficient of determination is R2 = %s" % (K, R2)) diff --git a/examples/grid_search_text_feature_extraction.py b/examples/grid_search_text_feature_extraction.py index 054f3022b30df68e0f79104993e47c363f0621d4..164cd934bd7b817093f5042947c0d41815f98b5f 100644 --- a/examples/grid_search_text_feature_extraction.py +++ b/examples/grid_search_text_feature_extraction.py @@ -105,7 +105,7 @@ parameters = { # find the best parameters for both the feature extraction and the # classifier -grid_search = GridSearchCV(pipeline, parameters) +grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1) # cross-validation doesn't work if the length of the data is not known, # hence use lists instead of iterators diff --git a/examples/plot_permutation_test_for_classification.py b/examples/plot_permutation_test_for_classification.py index 854ec23b7fdc0ea49250af82afba720968d76964..d41eb0c3c78d24704a14ae59fc7bdfe95f2258eb 100644 --- a/examples/plot_permutation_test_for_classification.py +++ b/examples/plot_permutation_test_for_classification.py @@ -44,7 +44,7 @@ cv = StratifiedKFold(y, 2) score, permutation_scores, pvalue = permutation_test_score(svm, X, y, zero_one_score, cv=cv, - n_permutations=100) + n_permutations=100, n_jobs=1) print "Classification score %s (pvalue : %s)" % (score, pvalue) diff --git a/examples/svm/plot_svm_anova.py b/examples/svm/plot_svm_anova.py index f018613fc3e86e552df1ff72d3fc1b98b1cf07d0..6187afe801218d78b39c388146e0e357cf133f22 100644 --- a/examples/svm/plot_svm_anova.py +++ b/examples/svm/plot_svm_anova.py @@ -42,7 +42,7 @@ percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) for percentile in percentiles: clf._set_params(anova__percentile=percentile) # Compute cross-validation score using all CPUs - this_scores = cross_val.cross_val_score(clf, X, y) + this_scores = cross_val.cross_val_score(clf, X, y, n_jobs=1) score_means.append(this_scores.mean()) score_stds.append(this_scores.std())