Skip to content
Snippets Groups Projects
Commit 2fc4c96a authored by Fabian Pedregosa's avatar Fabian Pedregosa
Browse files

Revert "Remove hardcoded n_jobs from examples."

This reverts commit 68871955.
parent 18936410
No related branches found
No related tags found
No related merge requests found
...@@ -67,7 +67,7 @@ ward = WardAgglomeration(n_clusters=10, connectivity=A, memory=mem, ...@@ -67,7 +67,7 @@ ward = WardAgglomeration(n_clusters=10, connectivity=A, memory=mem,
n_components=1) n_components=1)
clf = Pipeline([('ward', ward), ('ridge', ridge)]) clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search # Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}) clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1)
clf.fit(X, y, cv=cv) # set the best parameters clf.fit(X, y, cv=cv) # set the best parameters
coef_ = clf.best_estimator.steps[-1][1].coef_ coef_ = clf.best_estimator.steps[-1][1].coef_
coef_ = clf.best_estimator.steps[0][1].inverse_transform(coef_) coef_ = clf.best_estimator.steps[0][1].inverse_transform(coef_)
......
...@@ -46,6 +46,6 @@ gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE ...@@ -46,6 +46,6 @@ gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using # Perform a cross-validation estimate of the coefficient of determination using
# the cross_val module using all CPUs available on the machine # the cross_val module using all CPUs available on the machine
K = 20 # folds K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K)).mean() R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=-1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s" print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2)) % (K, R2))
...@@ -105,7 +105,7 @@ parameters = { ...@@ -105,7 +105,7 @@ parameters = {
# find the best parameters for both the feature extraction and the # find the best parameters for both the feature extraction and the
# classifier # classifier
grid_search = GridSearchCV(pipeline, parameters) grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
# cross-validation doesn't work if the length of the data is not known, # cross-validation doesn't work if the length of the data is not known,
# hence use lists instead of iterators # hence use lists instead of iterators
......
...@@ -44,7 +44,7 @@ cv = StratifiedKFold(y, 2) ...@@ -44,7 +44,7 @@ cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(svm, X, y, score, permutation_scores, pvalue = permutation_test_score(svm, X, y,
zero_one_score, cv=cv, zero_one_score, cv=cv,
n_permutations=100) n_permutations=100, n_jobs=1)
print "Classification score %s (pvalue : %s)" % (score, pvalue) print "Classification score %s (pvalue : %s)" % (score, pvalue)
......
...@@ -42,7 +42,7 @@ percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) ...@@ -42,7 +42,7 @@ percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles: for percentile in percentiles:
clf._set_params(anova__percentile=percentile) clf._set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs # Compute cross-validation score using all CPUs
this_scores = cross_val.cross_val_score(clf, X, y) this_scores = cross_val.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean()) score_means.append(this_scores.mean())
score_stds.append(this_scores.std()) score_stds.append(this_scores.std())
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment