diff --git a/examples/applications/plot_species_distribution_modeling.py b/examples/applications/plot_species_distribution_modeling.py index bb726ad644996a96b955feb1cb3bd1d5827d88b8..fedfd2c1e98c63cfd8406303436533a658c134f9 100644 --- a/examples/applications/plot_species_distribution_modeling.py +++ b/examples/applications/plot_species_distribution_modeling.py @@ -157,8 +157,8 @@ def plot_species_distribution(species=["bradypus_variegatus_0", else: print(" - plot coastlines from coverage") plt.contour(X, Y, land_reference, - levels=[-9999], colors="k", - linestyles="solid") + levels=[-9999], colors="k", + linestyles="solid") plt.xticks([]) plt.yticks([]) @@ -184,11 +184,11 @@ def plot_species_distribution(species=["bradypus_variegatus_0", # scatter training/testing points plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'], - s=2 ** 2, c='black', - marker='^', label='train') + s=2 ** 2, c='black', + marker='^', label='train') plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'], - s=2 ** 2, c='black', - marker='x', label='test') + s=2 ** 2, c='black', + marker='x', label='test') plt.legend() plt.title(species.name) plt.axis('equal') diff --git a/examples/cluster/plot_adjusted_for_chance_measures.py b/examples/cluster/plot_adjusted_for_chance_measures.py index 64ab40537291e7cc18dc233d67bb7703a49bd2fb..66298d3c5ba96dbd2bdaa3abffeba0e8e4b50bca 100644 --- a/examples/cluster/plot_adjusted_for_chance_measures.py +++ b/examples/cluster/plot_adjusted_for_chance_measures.py @@ -84,7 +84,7 @@ for score_func in score_funcs: names.append(score_func.__name__) plt.title("Clustering measures for 2 random uniform labelings\n" - "with equal number of clusters") + "with equal number of clusters") plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples) plt.ylabel('Score value') plt.legend(plots, names) @@ -115,7 +115,7 @@ for score_func in score_funcs: names.append(score_func.__name__) plt.title("Clustering measures for random uniform labeling\n" - "against reference assignment with %d classes" % n_classes) + "against reference assignment with %d classes" % n_classes) plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples) plt.ylabel('Score value') plt.ylim(ymin=-0.05, ymax=1.05) diff --git a/examples/cluster/plot_affinity_propagation.py b/examples/cluster/plot_affinity_propagation.py index 4c62f72c3d31a9ea025e504ade3e8f904ddf37fa..0d6c395a4e4bfd07a835f042688e717c24fbf428 100644 --- a/examples/cluster/plot_affinity_propagation.py +++ b/examples/cluster/plot_affinity_propagation.py @@ -54,7 +54,7 @@ for k, col in zip(range(n_clusters_), colors): cluster_center = X[cluster_centers_indices[k]] plt.plot(X[class_members, 0], X[class_members, 1], col + '.') plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, - markeredgecolor='k', markersize=14) + markeredgecolor='k', markersize=14) for x in X[class_members]: plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col) diff --git a/examples/cluster/plot_cluster_comparison.py b/examples/cluster/plot_cluster_comparison.py index 59aa5060ad037e405414eae781565eb7fec6697a..b15c28aef2f63fd882b199392adb33564ded6313 100644 --- a/examples/cluster/plot_cluster_comparison.py +++ b/examples/cluster/plot_cluster_comparison.py @@ -48,7 +48,7 @@ colors = np.hstack([colors] * 20) plt.figure(figsize=(17, 9.5)) plt.subplots_adjust(left=.001, right=.999, bottom=.001, top=.96, wspace=.05, - hspace=.01) + hspace=.01) plot_num = 1 for i_dataset, dataset in enumerate([noisy_circles, noisy_moons, blobs, @@ -118,8 +118,8 @@ for i_dataset, dataset in enumerate([noisy_circles, noisy_moons, blobs, plt.xticks(()) plt.yticks(()) plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'), - transform=plt.gca().transAxes, size=15, - horizontalalignment='right') + transform=plt.gca().transAxes, size=15, + horizontalalignment='right') plot_num += 1 plt.show() diff --git a/examples/cluster/plot_dbscan.py b/examples/cluster/plot_dbscan.py index cdec595afed5438166ebb853279776b6a6988034..46e315835a0483bc339e1443811f0204dfc2edca 100644 --- a/examples/cluster/plot_dbscan.py +++ b/examples/cluster/plot_dbscan.py @@ -62,11 +62,11 @@ for k, col in zip(unique_labels, colors): xy = X[class_member_mask & core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, - markeredgecolor='k', markersize=14) + markeredgecolor='k', markersize=14) xy = X[class_member_mask & ~core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, - markeredgecolor='k', markersize=6) + markeredgecolor='k', markersize=6) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show() diff --git a/examples/cluster/plot_dict_face_patches.py b/examples/cluster/plot_dict_face_patches.py index 05db8c95b2007e037f199d372b99e0f0b878625b..18c4fcf43c342dcf441e6e1c2882cd7e0e656af0 100644 --- a/examples/cluster/plot_dict_face_patches.py +++ b/examples/cluster/plot_dict_face_patches.py @@ -72,13 +72,13 @@ plt.figure(figsize=(4.2, 4)) for i, patch in enumerate(kmeans.cluster_centers_): plt.subplot(9, 9, i + 1) plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray, - interpolation='nearest') + interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' % - (dt, 8 * len(faces.images)), fontsize=16) + (dt, 8 * len(faces.images)), fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) plt.show() diff --git a/examples/cluster/plot_digits_agglomeration.py b/examples/cluster/plot_digits_agglomeration.py index f058f3aca376b6c287acb2f476146cc04f3805b5..31d8094e1cf2d17b2a1affac4b9046077bc23197 100644 --- a/examples/cluster/plot_digits_agglomeration.py +++ b/examples/cluster/plot_digits_agglomeration.py @@ -46,7 +46,7 @@ for i in range(4): plt.title('Original data') plt.subplot(3, 4, 4 + i + 1) plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16, - interpolation='nearest') + interpolation='nearest') if i == 1: plt.title('Agglomerated data') plt.xticks(()) @@ -54,7 +54,7 @@ for i in range(4): plt.subplot(3, 4, 10) plt.imshow(np.reshape(agglo.labels_, images[0].shape), - interpolation='nearest', cmap=plt.cm.spectral) + interpolation='nearest', cmap=plt.cm.spectral) plt.xticks(()) plt.yticks(()) plt.title('Labels') diff --git a/examples/cluster/plot_kmeans_digits.py b/examples/cluster/plot_kmeans_digits.py index 10e468b64deb9000de0d99a0075317197c4e9a58..e56634d7ee202a75a8bf8759c732b680684e9714 100644 --- a/examples/cluster/plot_kmeans_digits.py +++ b/examples/cluster/plot_kmeans_digits.py @@ -108,18 +108,18 @@ Z = Z.reshape(xx.shape) plt.figure(1) plt.clf() plt.imshow(Z, interpolation='nearest', - extent=(xx.min(), xx.max(), yy.min(), yy.max()), - cmap=plt.cm.Paired, - aspect='auto', origin='lower') + extent=(xx.min(), xx.max(), yy.min(), yy.max()), + cmap=plt.cm.Paired, + aspect='auto', origin='lower') plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2) # Plot the centroids as a white X centroids = kmeans.cluster_centers_ plt.scatter(centroids[:, 0], centroids[:, 1], - marker='x', s=169, linewidths=3, - color='w', zorder=10) + marker='x', s=169, linewidths=3, + color='w', zorder=10) plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n' - 'Centroids are marked with white cross') + 'Centroids are marked with white cross') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) diff --git a/examples/cluster/plot_kmeans_stability_low_dim_dense.py b/examples/cluster/plot_kmeans_stability_low_dim_dense.py index 8fc8599f65955952668716267e7b05ff7251c7e5..2783ce01a35c21b9919d408a7ac3d057b85353cb 100644 --- a/examples/cluster/plot_kmeans_stability_low_dim_dense.py +++ b/examples/cluster/plot_kmeans_stability_low_dim_dense.py @@ -112,8 +112,8 @@ for k in range(n_clusters): plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color) cluster_center = km.cluster_centers_[k] plt.plot(cluster_center[0], cluster_center[1], 'o', - markerfacecolor=color, markeredgecolor='k', markersize=6) + markerfacecolor=color, markeredgecolor='k', markersize=6) plt.title("Example cluster allocation with a single random init\n" - "with MiniBatchKMeans") + "with MiniBatchKMeans") plt.show() diff --git a/examples/cluster/plot_lena_segmentation.py b/examples/cluster/plot_lena_segmentation.py index 11ec1e49c1d3269e5c1872aefcce2db01c83ce83..0a7ffe09e9ad622b6274c6982990839e351c5034 100644 --- a/examples/cluster/plot_lena_segmentation.py +++ b/examples/cluster/plot_lena_segmentation.py @@ -66,7 +66,7 @@ for assign_labels in ('kmeans', 'discretize'): plt.imshow(lena, cmap=plt.cm.gray) for l in range(N_REGIONS): plt.contour(labels == l, contours=1, - colors=[plt.cm.spectral(l / float(N_REGIONS)), ]) + colors=[plt.cm.spectral(l / float(N_REGIONS)), ]) plt.xticks(()) plt.yticks(()) plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))) diff --git a/examples/cluster/plot_lena_ward_segmentation.py b/examples/cluster/plot_lena_ward_segmentation.py index 060104f14705aefac2a4d4362878285e2ff74067..2d313255888956e7d69c7d96df9b32a9445f8132 100644 --- a/examples/cluster/plot_lena_ward_segmentation.py +++ b/examples/cluster/plot_lena_ward_segmentation.py @@ -50,7 +50,7 @@ plt.figure(figsize=(5, 5)) plt.imshow(lena, cmap=plt.cm.gray) for l in range(n_clusters): plt.contour(label == l, contours=1, - colors=[plt.cm.spectral(l / float(n_clusters)), ]) + colors=[plt.cm.spectral(l / float(n_clusters)), ]) plt.xticks(()) plt.yticks(()) plt.show() diff --git a/examples/cluster/plot_mean_shift.py b/examples/cluster/plot_mean_shift.py index a4cae0753de9bfad884de042c8b770eec30f175f..775cd98e595279ba45465ce53bf166075f4e2c9d 100644 --- a/examples/cluster/plot_mean_shift.py +++ b/examples/cluster/plot_mean_shift.py @@ -51,6 +51,6 @@ for k, col in zip(range(n_clusters_), colors): cluster_center = cluster_centers[k] plt.plot(X[my_members, 0], X[my_members, 1], col + '.') plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, - markeredgecolor='k', markersize=14) + markeredgecolor='k', markersize=14) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show() diff --git a/examples/cluster/plot_mini_batch_kmeans.py b/examples/cluster/plot_mini_batch_kmeans.py index abf2afcb7a440c411f6b90333d4a86a8465a53a4..ed4afcf9957944d281ff329c5c3c6d9a5934473d 100644 --- a/examples/cluster/plot_mini_batch_kmeans.py +++ b/examples/cluster/plot_mini_batch_kmeans.py @@ -97,7 +97,7 @@ ax.set_title('MiniBatchKMeans') ax.set_xticks(()) ax.set_yticks(()) plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % - (t_mini_batch, mbk.inertia_)) + (t_mini_batch, mbk.inertia_)) # Initialise the different array to all False different = (mbk_means_labels == 4) diff --git a/examples/covariance/plot_covariance_estimation.py b/examples/covariance/plot_covariance_estimation.py index b90b07a7e7a81112b8a2e1b801134e493f78d602..85e26705b03e955e7c43f0fbdb20c4d016c65a93 100644 --- a/examples/covariance/plot_covariance_estimation.py +++ b/examples/covariance/plot_covariance_estimation.py @@ -104,7 +104,7 @@ plt.ylabel('Error: negative log-likelihood on test data') plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood") plt.plot(plt.xlim(), 2 * [loglik_real], '--r', - label="Real covariance likelihood") + label="Real covariance likelihood") # adjust view lik_max = np.amax(negative_logliks) @@ -115,14 +115,14 @@ xmin = shrinkages[0] xmax = shrinkages[-1] # LW likelihood plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta', - linewidth=3, label='Ledoit-Wolf estimate') + linewidth=3, label='Ledoit-Wolf estimate') # OAS likelihood plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple', - linewidth=3, label='OAS estimate') + linewidth=3, label='OAS estimate') # best CV estimator likelihood plt.vlines(cv.best_estimator_.shrinkage, ymin, - -cv.best_estimator_.score(X_test), color='cyan', - linewidth=3, label='Cross-validation best estimate') + -cv.best_estimator_.score(X_test), color='cyan', + linewidth=3, label='Cross-validation best estimate') plt.ylim(ymin, ymax) plt.xlim(xmin, xmax) diff --git a/examples/covariance/plot_lw_vs_oas.py b/examples/covariance/plot_lw_vs_oas.py index 59267bf9c253945fe93a5fa56b5fd3e67618e2f8..d7b21f2bbe68d48c9e818c58a17761f42743f3c0 100644 --- a/examples/covariance/plot_lw_vs_oas.py +++ b/examples/covariance/plot_lw_vs_oas.py @@ -60,9 +60,9 @@ for i, n_samples in enumerate(n_samples_range): # plot MSE plt.subplot(2, 1, 1) plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1), - label='Ledoit-Wolf', color='g') + label='Ledoit-Wolf', color='g') plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1), - label='OAS', color='r') + label='OAS', color='r') plt.ylabel("Squared error") plt.legend(loc="upper right") plt.title("Comparison of covariance estimators") @@ -71,9 +71,9 @@ plt.xlim(5, 31) # plot shrinkage coefficient plt.subplot(2, 1, 2) plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1), - label='Ledoit-Wolf', color='g') + label='Ledoit-Wolf', color='g') plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1), - label='OAS', color='r') + label='OAS', color='r') plt.xlabel("n_samples") plt.ylabel("Shrinkage") plt.legend(loc="lower right") diff --git a/examples/covariance/plot_robust_vs_empirical_covariance.py b/examples/covariance/plot_robust_vs_empirical_covariance.py index 6ac597912445a4de2f0edca2b3dbe97fc15f9a3f..234396e50f0909d0bf55398470e61ef854cab920 100644 --- a/examples/covariance/plot_robust_vs_empirical_covariance.py +++ b/examples/covariance/plot_robust_vs_empirical_covariance.py @@ -113,14 +113,14 @@ for i, n_outliers in enumerate(range_n_outliers): font_prop = matplotlib.font_manager.FontProperties(size=11) plt.subplot(2, 1, 1) plt.errorbar(range_n_outliers, err_loc_mcd.mean(1), - yerr=err_loc_mcd.std(1) / np.sqrt(repeat), - label="Robust location", color='m') + yerr=err_loc_mcd.std(1) / np.sqrt(repeat), + label="Robust location", color='m') plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1), - yerr=err_loc_emp_full.std(1) / np.sqrt(repeat), - label="Full data set mean", color='green') + yerr=err_loc_emp_full.std(1) / np.sqrt(repeat), + label="Full data set mean", color='green') plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1), - yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat), - label="Pure data set mean", color='black') + yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat), + label="Pure data set mean", color='black') plt.title("Influence of outliers on the location estimation") plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)") plt.legend(loc="upper left", prop=font_prop) @@ -128,18 +128,18 @@ plt.legend(loc="upper left", prop=font_prop) plt.subplot(2, 1, 2) x_size = range_n_outliers.size plt.errorbar(range_n_outliers, err_cov_mcd.mean(1), - yerr=err_cov_mcd.std(1), - label="Robust covariance (mcd)", color='m') + yerr=err_cov_mcd.std(1), + label="Robust covariance (mcd)", color='m') plt.errorbar(range_n_outliers[:(x_size / 5 + 1)], - err_cov_emp_full.mean(1)[:(x_size / 5 + 1)], - yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)], - label="Full data set empirical covariance", color='green') + err_cov_emp_full.mean(1)[:(x_size / 5 + 1)], + yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)], + label="Full data set empirical covariance", color='green') plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)], - err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green', - ls='--') + err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green', + ls='--') plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1), - yerr=err_cov_emp_pure.std(1), - label="Pure data set empirical covariance", color='black') + yerr=err_cov_emp_pure.std(1), + label="Pure data set empirical covariance", color='black') plt.title("Influence of outliers on the covariance estimation") plt.xlabel("Amount of contamination (%)") plt.ylabel("RMSE") diff --git a/examples/covariance/plot_sparse_cov.py b/examples/covariance/plot_sparse_cov.py index ee7bfe4dd3e8cb5c8c2067959cbfa3057133b000..237b33a9ba1b954ad433dc5a57e7980f528029f2 100644 --- a/examples/covariance/plot_sparse_cov.py +++ b/examples/covariance/plot_sparse_cov.py @@ -103,7 +103,7 @@ vmax = cov_.max() for i, (name, this_cov) in enumerate(covs): plt.subplot(2, 4, i + 1) plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax, - cmap=plt.cm.RdBu_r) + cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.title('%s covariance' % name) @@ -116,8 +116,8 @@ vmax = .9 * prec_.max() for i, (name, this_prec) in enumerate(precs): ax = plt.subplot(2, 4, i + 5) plt.imshow(np.ma.masked_equal(this_prec, 0), - interpolation='nearest', vmin=-vmax, vmax=vmax, - cmap=plt.cm.RdBu_r) + interpolation='nearest', vmin=-vmax, vmax=vmax, + cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.title('%s precision' % name) diff --git a/examples/cross_decomposition/plot_compare_cross_decomposition.py b/examples/cross_decomposition/plot_compare_cross_decomposition.py index b9284a95fcb0c74a2d0440732cc2f4e3d6ff8304..f582be9e000489e1c22ea862039662b100f787d5 100644 --- a/examples/cross_decomposition/plot_compare_cross_decomposition.py +++ b/examples/cross_decomposition/plot_compare_cross_decomposition.py @@ -66,7 +66,7 @@ plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 1: X vs Y (test corr = %.2f)' % - np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1]) + np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") @@ -77,7 +77,7 @@ plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 2: X vs Y (test corr = %.2f)' % - np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1]) + np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") @@ -89,7 +89,7 @@ plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test") plt.xlabel("X comp. 1") plt.ylabel("X comp. 2") plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)' - % np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1]) + % np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) @@ -100,7 +100,7 @@ plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test") plt.xlabel("Y comp. 1") plt.ylabel("Y comp. 2") plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)' - % np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1]) + % np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) diff --git a/examples/datasets/plot_random_dataset.py b/examples/datasets/plot_random_dataset.py index 7a033a3128f83e9b3774b10de29a06dd6e78d302..be5108754c145fe1537581856dfa7e0cd4d619a4 100644 --- a/examples/datasets/plot_random_dataset.py +++ b/examples/datasets/plot_random_dataset.py @@ -41,7 +41,7 @@ plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2) plt.subplot(224) plt.title("Multi-class, two informative features, one cluster", - fontsize='small') + fontsize='small') X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2, n_clusters_per_class=1, n_classes=3) plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1) diff --git a/examples/decomposition/plot_faces_decomposition.py b/examples/decomposition/plot_faces_decomposition.py index 1ec454eba64eb05f6a811f09ef0ebe87464ef31a..e85da1b952253754453a3d849190e60123b7266e 100644 --- a/examples/decomposition/plot_faces_decomposition.py +++ b/examples/decomposition/plot_faces_decomposition.py @@ -56,8 +56,8 @@ def plot_gallery(title, images, n_col=n_col, n_row=n_row): plt.subplot(n_row, n_col, i + 1) vmax = max(comp.max(), -comp.min()) plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray, - interpolation='nearest', - vmin=-vmax, vmax=vmax) + interpolation='nearest', + vmin=-vmax, vmax=vmax) plt.xticks(()) plt.yticks(()) plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.) diff --git a/examples/decomposition/plot_image_denoising.py b/examples/decomposition/plot_image_denoising.py index ca545005780c39ab6656507411686dc35aea24c4..01cfdfbe1f0a0198e274eb3ad6c016bab889fba6 100644 --- a/examples/decomposition/plot_image_denoising.py +++ b/examples/decomposition/plot_image_denoising.py @@ -83,12 +83,12 @@ plt.figure(figsize=(4.2, 4)) for i, comp in enumerate(V[:100]): plt.subplot(10, 10, i + 1) plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r, - interpolation='nearest') + interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle('Dictionary learned from Lena patches\n' + - 'Train time %.1fs on %d patches' % (dt, len(data)), - fontsize=16) + 'Train time %.1fs on %d patches' % (dt, len(data)), + fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) @@ -108,7 +108,7 @@ def show_with_diff(image, reference, title): plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2))) plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr, - interpolation='nearest') + interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle(title, size=16) diff --git a/examples/decomposition/plot_pca_vs_fa_model_selection.py b/examples/decomposition/plot_pca_vs_fa_model_selection.py index aaa21c1fd1aa3267cef7f1b1c8bc3a33363cfaa9..ec01d91cb140759f9b7aff66756abacfc30752ca 100755 --- a/examples/decomposition/plot_pca_vs_fa_model_selection.py +++ b/examples/decomposition/plot_pca_vs_fa_model_selection.py @@ -106,17 +106,17 @@ for X, title in [(X_homo, 'Homoscedastic Noise'), plt.plot(n_components, fa_scores, 'r', label='FA scores') plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-') plt.axvline(n_components_pca, color='b', - label='PCA CV: %d' % n_components_pca, linestyle='--') + label='PCA CV: %d' % n_components_pca, linestyle='--') plt.axvline(n_components_fa, color='r', - label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--') + label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--') plt.axvline(n_components_pca_mle, color='k', - label='PCA MLE: %d' % n_components_pca_mle, linestyle='--') + label='PCA MLE: %d' % n_components_pca_mle, linestyle='--') # compare with other covariance estimators plt.axhline(shrunk_cov_score(X), color='violet', - label='Shrunk Covariance MLE', linestyle='-.') + label='Shrunk Covariance MLE', linestyle='-.') plt.axhline(lw_score(X), color='orange', - label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.') + label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.') plt.xlabel('nb of components') plt.ylabel('CV scores') diff --git a/examples/exercises/plot_cv_diabetes.py b/examples/exercises/plot_cv_diabetes.py index edccbc33d37fecb2c705b11d0ca5c7d8b2e7ef40..5e07a4f2dc99648180a6cfcd7dc52bc042190d5e 100644 --- a/examples/exercises/plot_cv_diabetes.py +++ b/examples/exercises/plot_cv_diabetes.py @@ -38,9 +38,9 @@ plt.figure(figsize=(4, 3)) plt.semilogx(alphas, scores) # plot error lines showing +/- std. errors of the scores plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)), - 'b--') + 'b--') plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)), - 'b--') + 'b--') plt.ylabel('CV score') plt.xlabel('alpha') plt.axhline(np.max(scores), linestyle='--', color='.5') diff --git a/examples/exercises/plot_iris_exercise.py b/examples/exercises/plot_iris_exercise.py index 1c73c0272a05d5bbef2e53978302cbfeb739e21b..cf1da9109aaaf0b210ca481c8440275e087eeaf1 100644 --- a/examples/exercises/plot_iris_exercise.py +++ b/examples/exercises/plot_iris_exercise.py @@ -59,7 +59,7 @@ for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')): Z = Z.reshape(XX.shape) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], - levels=[-.5, 0, .5]) + levels=[-.5, 0, .5]) plt.title(kernel) plt.show() diff --git a/examples/linear_model/plot_ard.py b/examples/linear_model/plot_ard.py index 8927022070a12ef2a9ac188babd16f6865e8ae1c..89a8fa07f48bed1b32b321880f5ee36d7db1e3c4 100644 --- a/examples/linear_model/plot_ard.py +++ b/examples/linear_model/plot_ard.py @@ -69,7 +69,7 @@ plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), - 'ro', label="Relevant features") + 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc=1) diff --git a/examples/linear_model/plot_bayesian_ridge.py b/examples/linear_model/plot_bayesian_ridge.py index 0d2bdb81c3c8976925bbaec63397624f4f3fd3e8..c268ab95f119fe53cde7e2ef8ba2b8986aedf178 100644 --- a/examples/linear_model/plot_bayesian_ridge.py +++ b/examples/linear_model/plot_bayesian_ridge.py @@ -65,7 +65,7 @@ plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), - 'ro', label="Relevant features") + 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc="lower left") diff --git a/examples/linear_model/plot_lasso_and_elasticnet.py b/examples/linear_model/plot_lasso_and_elasticnet.py index cd032d83ecbc0024e494a9748c4026a629643904..c9a1fbb2c3196cf0ca83fa485c51dda18a7a0685 100644 --- a/examples/linear_model/plot_lasso_and_elasticnet.py +++ b/examples/linear_model/plot_lasso_and_elasticnet.py @@ -63,5 +63,5 @@ plt.plot(lasso.coef_, label='Lasso coefficients') plt.plot(coef, '--', label='original coefficients') plt.legend(loc='best') plt.title("Lasso R^2: %f, Elastic Net R^2: %f" - % (r2_score_lasso, r2_score_enet)) + % (r2_score_lasso, r2_score_enet)) plt.show() diff --git a/examples/linear_model/plot_lasso_coordinate_descent_path.py b/examples/linear_model/plot_lasso_coordinate_descent_path.py index d8afd3c479516e99436a7bdfcf5f742020f72677..d4c1bd79f7f8a84887ac98f92055756c5cbccfd1 100644 --- a/examples/linear_model/plot_lasso_coordinate_descent_path.py +++ b/examples/linear_model/plot_lasso_coordinate_descent_path.py @@ -83,6 +83,6 @@ plt.xlabel('-Log(alpha)') plt.ylabel('coefficients') plt.title('Elastic-Net and positive Elastic-Net') plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'), - loc='lower left') + loc='lower left') plt.axis('tight') plt.show() diff --git a/examples/linear_model/plot_lasso_model_selection.py b/examples/linear_model/plot_lasso_model_selection.py index 8fb6b32d13a406d0cf16e060f7e41db9117a65f1..10bdc9b5f1d406a93bb09ef64d7129d6190caaf8 100644 --- a/examples/linear_model/plot_lasso_model_selection.py +++ b/examples/linear_model/plot_lasso_model_selection.py @@ -83,9 +83,9 @@ def plot_ic_criterion(model, name, color): alphas_ = model.alphas_ criterion_ = model.criterion_ plt.plot(-np.log10(alphas_), criterion_, '--', color=color, - linewidth=3, label='%s criterion' % name) + linewidth=3, label='%s criterion' % name) plt.axvline(-np.log10(alpha_), color=color, linewidth=3, - label='alpha: %s estimate' % name) + label='alpha: %s estimate' % name) plt.xlabel('-log(alpha)') plt.ylabel('criterion') @@ -94,7 +94,7 @@ plot_ic_criterion(model_aic, 'AIC', 'b') plot_ic_criterion(model_bic, 'BIC', 'r') plt.legend() plt.title('Information-criterion for model selection (training time %.3fs)' - % t_bic) + % t_bic) ############################################################################## # LassoCV: coordinate descent @@ -112,16 +112,16 @@ plt.figure() ymin, ymax = 2300, 3800 plt.plot(m_log_alphas, model.mse_path_, ':') plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k', - label='Average across the folds', linewidth=2) + label='Average across the folds', linewidth=2) plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k', - label='alpha: CV estimate') + label='alpha: CV estimate') plt.legend() plt.xlabel('-log(alpha)') plt.ylabel('Mean square error') plt.title('Mean square error on each fold: coordinate descent ' - '(train time: %.2fs)' % t_lasso_cv) + '(train time: %.2fs)' % t_lasso_cv) plt.axis('tight') plt.ylim(ymin, ymax) @@ -140,15 +140,15 @@ m_log_alphas = -np.log10(model.cv_alphas_) plt.figure() plt.plot(m_log_alphas, model.cv_mse_path_, ':') plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k', - label='Average across the folds', linewidth=2) + label='Average across the folds', linewidth=2) plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k', - label='alpha CV') + label='alpha CV') plt.legend() plt.xlabel('-log(alpha)') plt.ylabel('Mean square error') plt.title('Mean square error on each fold: Lars (train time: %.2fs)' - % t_lasso_lars_cv) + % t_lasso_lars_cv) plt.axis('tight') plt.ylim(ymin, ymax) diff --git a/examples/linear_model/plot_multi_task_lasso_support.py b/examples/linear_model/plot_multi_task_lasso_support.py index 0f6a1b7c5a6239473a173fbceb790a54391249db..940282d678d5ba9abb77b01141a3ea61b45c0367 100644 --- a/examples/linear_model/plot_multi_task_lasso_support.py +++ b/examples/linear_model/plot_multi_task_lasso_support.py @@ -59,7 +59,7 @@ plt.figure() plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth') plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso') plt.plot(coef_multi_task_lasso_[:, feature_to_plot], - 'r', label='MultiTaskLasso') + 'r', label='MultiTaskLasso') plt.legend(loc='upper center') plt.axis('tight') plt.ylim([-1.1, 1.1]) diff --git a/examples/linear_model/plot_ols.py b/examples/linear_model/plot_ols.py index f107da1adb023a741320888a27c32ae67803a86b..39a03d1fa0ad4e992d005689cdffc96dc510803f 100644 --- a/examples/linear_model/plot_ols.py +++ b/examples/linear_model/plot_ols.py @@ -60,7 +60,7 @@ print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test)) # Plot outputs plt.scatter(diabetes_X_test, diabetes_y_test, color='black') plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue', - linewidth=3) + linewidth=3) plt.xticks(()) plt.yticks(()) diff --git a/examples/linear_model/plot_omp.py b/examples/linear_model/plot_omp.py index 1ae2b6db7eea8e29163515be03493c51de2ac6c8..f07b7d723340a3f59693a5723e590ba1b5963cc5 100644 --- a/examples/linear_model/plot_omp.py +++ b/examples/linear_model/plot_omp.py @@ -78,5 +78,5 @@ plt.stem(idx_r, coef[idx_r]) plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38) plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit', - fontsize=16) + fontsize=16) plt.show() diff --git a/examples/linear_model/plot_sgd_iris.py b/examples/linear_model/plot_sgd_iris.py index d48dea9cc2c32fbb32394be763ad42f2e7262642..cd2c8dd6b03f57418babbf1f76e3ed44a570693c 100644 --- a/examples/linear_model/plot_sgd_iris.py +++ b/examples/linear_model/plot_sgd_iris.py @@ -56,7 +56,7 @@ plt.axis('tight') for i, color in zip(clf.classes_, colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i], - cmap=plt.cm.Paired) + cmap=plt.cm.Paired) plt.title("Decision surface of multi-class SGD") plt.axis('tight') @@ -72,7 +72,7 @@ def plot_hyperplane(c, color): return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1] plt.plot([xmin, xmax], [line(xmin), line(xmax)], - ls="--", color=color) + ls="--", color=color) for i, color in zip(clf.classes_, colors): plot_hyperplane(i, color) diff --git a/examples/linear_model/plot_sgd_loss_functions.py b/examples/linear_model/plot_sgd_loss_functions.py index aa7d1bfa015a789f4582b9caeeba63e0deea5a91..c2668dd6d760b9f3dc43a3a9e4fc8c746d0a510b 100644 --- a/examples/linear_model/plot_sgd_loss_functions.py +++ b/examples/linear_model/plot_sgd_loss_functions.py @@ -23,17 +23,17 @@ def modified_huber_loss(y_true, y_pred): xmin, xmax = -4, 4 xx = np.linspace(xmin, xmax, 100) plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-', - label="Zero-one loss") + label="Zero-one loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-', - label="Hinge loss") + label="Hinge loss") plt.plot(xx, -np.minimum(xx, 0), 'm-', - label="Perceptron loss") + label="Perceptron loss") plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-', - label="Log loss") + label="Log loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-', - label="Squared hinge loss") + label="Squared hinge loss") plt.plot(xx, modified_huber_loss(xx, 1), 'y--', - label="Modified Huber loss") + label="Modified Huber loss") plt.ylim((0, 8)) plt.legend(loc="upper right") plt.xlabel(r"Decision function $f(x)$") diff --git a/examples/linear_model/plot_sgd_weighted_samples.py b/examples/linear_model/plot_sgd_weighted_samples.py index 6e4e5f2664d520637067560186b766d2a71801fe..15dd72866f4dfb909b2038362b934a02726f0b6c 100644 --- a/examples/linear_model/plot_sgd_weighted_samples.py +++ b/examples/linear_model/plot_sgd_weighted_samples.py @@ -24,7 +24,7 @@ sample_weight[:10] *= 10 xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) plt.figure() plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9, - cmap=plt.cm.bone) + cmap=plt.cm.bone) ## fit the unweighted model clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100) @@ -41,7 +41,7 @@ Z = Z.reshape(xx.shape) samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed']) plt.legend([no_weights.collections[0], samples_weights.collections[0]], - ["no weights", "with weights"], loc="lower left") + ["no weights", "with weights"], loc="lower left") plt.xticks(()) plt.yticks(()) diff --git a/examples/manifold/plot_compare_methods.py b/examples/manifold/plot_compare_methods.py index e321c812b5b325c380c6d54924f1f97b68eb040d..62a8f318d35948de3bddfb3d299033f7747e3b44 100644 --- a/examples/manifold/plot_compare_methods.py +++ b/examples/manifold/plot_compare_methods.py @@ -41,7 +41,7 @@ n_components = 2 fig = plt.figure(figsize=(15, 8)) plt.suptitle("Manifold Learning with %i points, %i neighbors" - % (1000, n_neighbors), fontsize=14) + % (1000, n_neighbors), fontsize=14) try: # compatibility matplotlib < 1.0 diff --git a/examples/manifold/plot_lle_digits.py b/examples/manifold/plot_lle_digits.py index 928a175ae73fcf298905dfdb9bc1aa406f1af024..1e1800918de1071e33384e7fdee79f59f4eb91de 100644 --- a/examples/manifold/plot_lle_digits.py +++ b/examples/manifold/plot_lle_digits.py @@ -44,8 +44,8 @@ def plot_embedding(X, title=None): ax = plt.subplot(111) for i in range(X.shape[0]): plt.text(X[i, 0], X[i, 1], str(digits.target[i]), - color=plt.cm.Set1(y[i] / 10.), - fontdict={'weight': 'bold', 'size': 9}) + color=plt.cm.Set1(y[i] / 10.), + fontdict={'weight': 'bold', 'size': 9}) if hasattr(offsetbox, 'AnnotationBbox'): # only print thumbnails with matplotlib > 1.0 diff --git a/examples/manifold/plot_manifold_sphere.py b/examples/manifold/plot_manifold_sphere.py index 83d91e59c2f0270f9bf539dfc299dac9f0212171..b6720849fb911bfa8109cd281c1c29fa0d44ccf9 100644 --- a/examples/manifold/plot_manifold_sphere.py +++ b/examples/manifold/plot_manifold_sphere.py @@ -64,7 +64,7 @@ x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \ # Plot our dataset. fig = plt.figure(figsize=(15, 8)) plt.suptitle("Manifold Learning with %i points, %i neighbors" - % (1000, n_neighbors), fontsize=14) + % (1000, n_neighbors), fontsize=14) ax = fig.add_subplot(241, projection='3d') ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow) diff --git a/examples/mixture/plot_gmm_classifier.py b/examples/mixture/plot_gmm_classifier.py index 0aa6c3c5fce2bca0500ffd85c85e48850f08d548..cebbe36cada69abab7d8510413be87f4c94bb09e 100644 --- a/examples/mixture/plot_gmm_classifier.py +++ b/examples/mixture/plot_gmm_classifier.py @@ -76,7 +76,7 @@ n_classifiers = len(classifiers) plt.figure(figsize=(3 * n_classifiers / 2, 6)) plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05, - left=.01, right=.99) + left=.01, right=.99) for index, (name, classifier) in enumerate(classifiers.items()): @@ -94,7 +94,7 @@ for index, (name, classifier) in enumerate(classifiers.items()): for n, color in enumerate('rgb'): data = iris.data[iris.target == n] plt.scatter(data[:, 0], data[:, 1], 0.8, color=color, - label=iris.target_names[n]) + label=iris.target_names[n]) # Plot the test data with crosses for n, color in enumerate('rgb'): data = X_test[y_test == n] @@ -103,12 +103,12 @@ for index, (name, classifier) in enumerate(classifiers.items()): y_train_pred = classifier.predict(X_train) train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100 plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy, - transform=h.transAxes) + transform=h.transAxes) y_test_pred = classifier.predict(X_test) test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100 plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy, - transform=h.transAxes) + transform=h.transAxes) plt.xticks(()) plt.yticks(()) diff --git a/examples/mixture/plot_gmm_selection.py b/examples/mixture/plot_gmm_selection.py index e74b52ecb94d3a167766f7b447da4474deaf0500..ad175cf17187b90d702065228af9edbf1f4b5dc8 100644 --- a/examples/mixture/plot_gmm_selection.py +++ b/examples/mixture/plot_gmm_selection.py @@ -58,8 +58,8 @@ spl = plt.subplot(2, 1, 1) for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)): xpos = np.array(n_components_range) + .2 * (i - 2) bars.append(plt.bar(xpos, bic[i * len(n_components_range): - (i + 1) * len(n_components_range)], - width=.2, color=color)) + (i + 1) * len(n_components_range)], + width=.2, color=color)) plt.xticks(n_components_range) plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()]) plt.title('BIC score per model') diff --git a/examples/neighbors/plot_classification.py b/examples/neighbors/plot_classification.py index 1f479c3ec9394c8a47754843f0770d056fe57ce9..9bd7c92f51117b3c0635b5c96e04bfb6805035c8 100644 --- a/examples/neighbors/plot_classification.py +++ b/examples/neighbors/plot_classification.py @@ -50,6 +50,6 @@ for weights in ['uniform', 'distance']: plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.title("3-Class classification (k = %i, weights = '%s')" - % (n_neighbors, weights)) + % (n_neighbors, weights)) plt.show() diff --git a/examples/neighbors/plot_nearest_centroid.py b/examples/neighbors/plot_nearest_centroid.py index dd0fd2d1eafb115ff504478d7d205255aedf7184..05d277db08f02a8dcb362593cfe882ea517a8f9f 100644 --- a/examples/neighbors/plot_nearest_centroid.py +++ b/examples/neighbors/plot_nearest_centroid.py @@ -50,7 +50,7 @@ for shrinkage in [None, 0.1]: # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold) plt.title("3-Class classification (shrink_threshold=%r)" - % shrinkage) + % shrinkage) plt.axis('tight') plt.show() diff --git a/examples/neighbors/plot_regression.py b/examples/neighbors/plot_regression.py index e7a0e86d84cf6d60fe390e8e985fc085ced41fc9..c664d7f173b0e34ac371f463065214df0629c6a0 100644 --- a/examples/neighbors/plot_regression.py +++ b/examples/neighbors/plot_regression.py @@ -44,6 +44,6 @@ for i, weights in enumerate(['uniform', 'distance']): plt.axis('tight') plt.legend() plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors, - weights)) + weights)) plt.show() diff --git a/examples/plot_classification_probability.py b/examples/plot_classification_probability.py index 860d7754cdaa623b9c2aa991ea5acbbd8edd117a..e63bc1d376099b3d86ea24bbc7cefcfbd64c565a 100644 --- a/examples/plot_classification_probability.py +++ b/examples/plot_classification_probability.py @@ -62,7 +62,7 @@ for index, (name, classifier) in enumerate(classifiers.items()): if k == 0: plt.ylabel(name) imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)), - extent=(3, 9, 1, 5), origin='lower') + extent=(3, 9, 1, 5), origin='lower') plt.xticks(()) plt.yticks(()) idx = (y_pred == k) diff --git a/examples/plot_digits_pipe.py b/examples/plot_digits_pipe.py index 81f0d3f63eb10d4fbbee6fea1714cc3f94fa6582..139ade15ba7c2512faab3277fb87503446f93a92 100644 --- a/examples/plot_digits_pipe.py +++ b/examples/plot_digits_pipe.py @@ -62,6 +62,6 @@ estimator = GridSearchCV(pipe, estimator.fit(X_digits, y_digits) plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components, - linestyle=':', label='n_components chosen') + linestyle=':', label='n_components chosen') plt.legend(prop=dict(size=12)) plt.show() diff --git a/examples/plot_feature_selection.py b/examples/plot_feature_selection.py index 46968c6186fe64983464f6110975cd5d4e3edd7f..9361791ce81d13c2954f9a4760e33eb72f65b780 100644 --- a/examples/plot_feature_selection.py +++ b/examples/plot_feature_selection.py @@ -54,7 +54,7 @@ selector.fit(X, y) scores = -np.log10(selector.pvalues_) scores /= scores.max() plt.bar(X_indices - .45, scores, width=.2, - label=r'Univariate score ($-Log(p_{value})$)', color='g') + label=r'Univariate score ($-Log(p_{value})$)', color='g') ############################################################################### # Compare to the weights of an SVM @@ -72,8 +72,8 @@ clf_selected.fit(selector.transform(X), y) svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0) svm_weights_selected /= svm_weights_selected.max() -plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected, width=.2, - label='SVM weights after selection', color='b') +plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected, + width=.2, label='SVM weights after selection', color='b') plt.title("Comparing feature selection") diff --git a/examples/plot_johnson_lindenstrauss_bound.py b/examples/plot_johnson_lindenstrauss_bound.py index 658d3ebef23af3d12c60cf6fe937087a4cdefabf..9849991598817afe2333916d49bba771ce475328 100644 --- a/examples/plot_johnson_lindenstrauss_bound.py +++ b/examples/plot_johnson_lindenstrauss_bound.py @@ -178,7 +178,7 @@ for n_components in n_components_range: plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % - n_components) + n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') @@ -191,7 +191,7 @@ for n_components in n_components_range: plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % - n_components) + n_components) plt.show() # TODO: compute the expected value of eps and add them to the previous plot diff --git a/examples/plot_lda_qda.py b/examples/plot_lda_qda.py index 3700d77489bbbe85dda002185d0d05e649e87274..bad9f3da7840712d4f451bcd989b7bdb49db6062 100644 --- a/examples/plot_lda_qda.py +++ b/examples/plot_lda_qda.py @@ -87,14 +87,14 @@ def plot_data(lda, X, y, y_pred, fig_index): Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()]) Z = Z[:, 1].reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes', - norm=colors.Normalize(0., 1.)) + norm=colors.Normalize(0., 1.)) plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k') # means plt.plot(lda.means_[0][0], lda.means_[0][1], - 'o', color='black', markersize=10) + 'o', color='black', markersize=10) plt.plot(lda.means_[1][0], lda.means_[1][1], - 'o', color='black', markersize=10) + 'o', color='black', markersize=10) return splot diff --git a/examples/plot_permutation_test_for_classification.py b/examples/plot_permutation_test_for_classification.py index 833fd1cc38d4691fef424f3d3171771a03e51c99..4df102578c9da4cce8c89d25f4485117c295024f 100644 --- a/examples/plot_permutation_test_for_classification.py +++ b/examples/plot_permutation_test_for_classification.py @@ -57,8 +57,8 @@ ylim = plt.ylim() #plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--', # color='k', linewidth=3, label='Luck') plt.plot(2 * [score], ylim, '--g', linewidth=3, - label='Classification Score' - ' (pvalue %s)' % pvalue) + label='Classification Score' + ' (pvalue %s)' % pvalue) plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck') plt.ylim(ylim) diff --git a/examples/plot_precision_recall.py b/examples/plot_precision_recall.py index c1a47cae46b782aa572c5e2cfdfae7826e378d00..6617a6b5d88fae352e4c2b1357146ba4e10a742f 100644 --- a/examples/plot_precision_recall.py +++ b/examples/plot_precision_recall.py @@ -134,12 +134,12 @@ plt.show() # Plot Precision-Recall curve for each class plt.clf() plt.plot(recall["micro"], precision["micro"], - label='micro-average Precision-recall curve (area = {0:0.2f})' - ''.format(average_precision["micro"])) + label='micro-average Precision-recall curve (area = {0:0.2f})' + ''.format(average_precision["micro"])) for i in range(n_classes): plt.plot(recall[i], precision[i], - label='Precision-recall curve of class {0} (area = {1:0.2f})' - ''.format(i, average_precision[i])) + label='Precision-recall curve of class {0} (area = {1:0.2f})' + ''.format(i, average_precision[i])) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) diff --git a/examples/plot_roc.py b/examples/plot_roc.py index 22702527187435542066b761f8b7d70b3b920e56..de8ad92f6ad94208ffc3d6e9fc52a2d46f0b3123 100644 --- a/examples/plot_roc.py +++ b/examples/plot_roc.py @@ -88,11 +88,11 @@ plt.show() # Plot ROC curve plt.clf() plt.plot(fpr["micro"], tpr["micro"], - label='micro-average ROC curve (area = {0:0.2f})' - ''.format(roc_auc["micro"])) + label='micro-average ROC curve (area = {0:0.2f})' + ''.format(roc_auc["micro"])) for i in range(n_classes): plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})' - ''.format(i, roc_auc[i])) + ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) diff --git a/examples/plot_roc_crossval.py b/examples/plot_roc_crossval.py index 2396d30cca7527bb99a4067017dd4baaf9fe516c..9557334ccd9431f061417a0569e579b839d7a922 100644 --- a/examples/plot_roc_crossval.py +++ b/examples/plot_roc_crossval.py @@ -80,7 +80,7 @@ mean_tpr /= len(cv) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) plt.plot(mean_fpr, mean_tpr, 'k--', - label='Mean ROC (area = %0.2f)' % mean_auc, lw=2) + label='Mean ROC (area = %0.2f)' % mean_auc, lw=2) plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) diff --git a/examples/plot_train_error_vs_test_error.py b/examples/plot_train_error_vs_test_error.py index b423efcbf02e2843cc6cacdc9fd5d29c172d97cc..9002a0a3a5f30cfa15f7e3878be8b28fe08eeb25 100644 --- a/examples/plot_train_error_vs_test_error.py +++ b/examples/plot_train_error_vs_test_error.py @@ -60,7 +60,7 @@ plt.subplot(2, 1, 1) plt.semilogx(alphas, train_errors, label='Train') plt.semilogx(alphas, test_errors, label='Test') plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k', - linewidth=3, label='Optimum on test') + linewidth=3, label='Optimum on test') plt.legend(loc='lower left') plt.ylim([0, 1.2]) plt.xlabel('Regularization parameter') diff --git a/examples/semi_supervised/plot_label_propagation_structure.py b/examples/semi_supervised/plot_label_propagation_structure.py index 2935edd643079b73a2e3a180ca8f6863a9cc1653..24ab31ba33d6df27920a89229d4a8b8d11dc9ecb 100644 --- a/examples/semi_supervised/plot_label_propagation_structure.py +++ b/examples/semi_supervised/plot_label_propagation_structure.py @@ -39,13 +39,13 @@ output_labels = label_spread.transduction_ plt.figure(figsize=(8.5, 4)) plt.subplot(1, 2, 1) plot_outer_labeled, = plt.plot(X[labels == outer, 0], - X[labels == outer, 1], 'rs') + X[labels == outer, 1], 'rs') plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.') plot_inner_labeled, = plt.plot(X[labels == inner, 0], - X[labels == inner, 1], 'bs') + X[labels == inner, 1], 'bs') plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled), - ('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left', - numpoints=1, shadow=False) + ('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left', + numpoints=1, shadow=False) plt.title("Raw data (2 classes=red and blue)") plt.subplot(1, 2, 2) @@ -55,7 +55,7 @@ inner_numbers = np.where(output_label_array == inner)[0] plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs') plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs') plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'), - 'upper left', numpoints=1, shadow=False) + 'upper left', numpoints=1, shadow=False) plt.title("Labels learned with Label Spreading (KNN)") plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92) diff --git a/examples/svm/plot_custom_kernel.py b/examples/svm/plot_custom_kernel.py index 4a80a55a23592480a87d7b72d886ec504af43713..d36a48f561abf6912304ef1027b404ac2e32400d 100644 --- a/examples/svm/plot_custom_kernel.py +++ b/examples/svm/plot_custom_kernel.py @@ -52,6 +52,6 @@ plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.title('3-Class classification using Support Vector Machine with custom' - ' kernel') + ' kernel') plt.axis('tight') plt.show() diff --git a/examples/svm/plot_oneclass.py b/examples/svm/plot_oneclass.py index ac785bcea75f0014a84bd02d9785a56b830c67e2..8c765673a63cd67229b3a00fafd62b00ee60f231 100644 --- a/examples/svm/plot_oneclass.py +++ b/examples/svm/plot_oneclass.py @@ -52,10 +52,10 @@ plt.axis('tight') plt.xlim((-5, 5)) plt.ylim((-5, 5)) plt.legend([a.collections[0], b1, b2, c], - ["learned frontier", "training observations", - "new regular observations", "new abnormal observations"], - loc="upper left", - prop=matplotlib.font_manager.FontProperties(size=11)) + ["learned frontier", "training observations", + "new regular observations", "new abnormal observations"], + loc="upper left", + prop=matplotlib.font_manager.FontProperties(size=11)) plt.xlabel( "error train: %d/200 ; errors novel regular: %d/40 ; " "errors novel abnormal: %d/40" diff --git a/examples/svm/plot_rbf_parameters.py b/examples/svm/plot_rbf_parameters.py index 8bdecffc8b3717a0300f835c1db3bee38c7ae7e0..072f4559d56f3df2d3a225e34791f1ecb44b979c 100644 --- a/examples/svm/plot_rbf_parameters.py +++ b/examples/svm/plot_rbf_parameters.py @@ -97,7 +97,7 @@ for (k, (C, gamma, clf)) in enumerate(classifiers): # visualize decision function for these parameters plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1) plt.title("gamma 10^%d, C 10^%d" % (np.log10(gamma), np.log10(C)), - size='medium') + size='medium') # visualize parameter's effect on decision function plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.jet) diff --git a/examples/svm/plot_separating_hyperplane.py b/examples/svm/plot_separating_hyperplane.py index ca3a0f69a7e5f46ff72c2baec68fdfa014112f25..254368fb421d45beeb106c19d8e272bcded50b69 100644 --- a/examples/svm/plot_separating_hyperplane.py +++ b/examples/svm/plot_separating_hyperplane.py @@ -41,7 +41,7 @@ plt.plot(xx, yy_down, 'k--') plt.plot(xx, yy_up, 'k--') plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], - s=80, facecolors='none') + s=80, facecolors='none') plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.axis('tight') diff --git a/examples/svm/plot_svm_kernels.py b/examples/svm/plot_svm_kernels.py index 62b987a791910b7188d8e82cb3d28141ef779a86..f0d06dd7968a6b27613e7ddf2f08d2a6e5ea27bd 100644 --- a/examples/svm/plot_svm_kernels.py +++ b/examples/svm/plot_svm_kernels.py @@ -56,7 +56,7 @@ for kernel in ('linear', 'poly', 'rbf'): plt.clf() plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, - facecolors='none', zorder=10) + facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') @@ -73,7 +73,7 @@ for kernel in ('linear', 'poly', 'rbf'): plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], - levels=[-.5, 0, .5]) + levels=[-.5, 0, .5]) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) diff --git a/examples/svm/plot_svm_margin.py b/examples/svm/plot_svm_margin.py index 8dec13ad65bd252c4b2ba9148b393b0db82842eb..a999ea34d33e299de5b8eebea145b8fe69e06d35 100644 --- a/examples/svm/plot_svm_margin.py +++ b/examples/svm/plot_svm_margin.py @@ -60,7 +60,7 @@ for name, penalty in (('unreg', 1), ('reg', 0.05)): plt.plot(xx, yy_up, 'k--') plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, - facecolors='none', zorder=10) + facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') diff --git a/examples/svm/plot_svm_nonlinear.py b/examples/svm/plot_svm_nonlinear.py index 74c38451149c4540638bfc4f57ef32dc89ba8fa5..672a19d30705b5e0490fd3f9d69dd1acb38d1c99 100644 --- a/examples/svm/plot_svm_nonlinear.py +++ b/examples/svm/plot_svm_nonlinear.py @@ -30,10 +30,10 @@ Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.imshow(Z, interpolation='nearest', - extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto', - origin='lower', cmap=plt.cm.PuOr_r) + extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto', + origin='lower', cmap=plt.cm.PuOr_r) contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2, - linetypes='--') + linetypes='--') plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired) plt.xticks(()) plt.yticks(()) diff --git a/examples/svm/plot_svm_scale_c.py b/examples/svm/plot_svm_scale_c.py index e1ab5cb5c32bd125d9b7206e7c7347fda0ad5b9c..bd3fd69deb41e5068fbfa0a7749a0b5a187a1b08 100644 --- a/examples/svm/plot_svm_scale_c.py +++ b/examples/svm/plot_svm_scale_c.py @@ -143,9 +143,9 @@ for fignum, (clf, cs, X, y) in enumerate(clf_sets): plt.ylabel('CV Score') grid_cs = cs * float(scaler) # scale the C's plt.semilogx(grid_cs, scores, label="fraction %.2f" % - train_size) + train_size) plt.title('scaling=%s, penalty=%s, loss=%s' % - (name, clf.penalty, clf.loss)) + (name, clf.penalty, clf.loss)) plt.legend(loc="best") plt.show() diff --git a/examples/tree/plot_iris.py b/examples/tree/plot_iris.py index 5f01c02cf26dbf4d024d72f54052ad4771f9e19b..b649f0ecc0194c28d009e0a1079c92a5b9a12a8f 100644 --- a/examples/tree/plot_iris.py +++ b/examples/tree/plot_iris.py @@ -69,7 +69,7 @@ for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3], for i, color in zip(range(n_classes), plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i], - cmap=plt.cm.Paired) + cmap=plt.cm.Paired) plt.axis("tight")