diff --git a/doc/datasets/index.rst b/doc/datasets/index.rst
index 09abd44bcebcacdc5c4f2694a9586f16d590226b..cbc0cedbc02a5b1c0909cc406269c11922d680b1 100644
--- a/doc/datasets/index.rst
+++ b/doc/datasets/index.rst
@@ -29,7 +29,7 @@ below in the :ref:`sample_images` section.
 The dataset generation functions and the svmlight loader share a simplistic
 interface, returning a tuple ``(X, y)`` consisting of a ``n_samples`` *
 ``n_features`` numpy array ``X`` and an array of length ``n_samples``
- containing the targets ``y``.
+containing the targets ``y``.
 
 The toy datasets as well as the 'real world' datasets and the datasets
 fetched from mldata.org have more sophisticated structure.
diff --git a/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py b/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
index a56a45e6d20dc0e25ab48bddceae32f5e2cc54cf..488db4d64855db5452dbf1992211292b0734a329 100644
--- a/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
+++ b/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
@@ -68,7 +68,7 @@ mem = Memory(cachedir=cachedir, verbose=1)
 # Ward agglomeration followed by BayesianRidge
 connectivity = grid_to_graph(n_x=size, n_y=size)
 ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
-                            memory=mem, n_components=1)
+                            memory=mem)
 clf = Pipeline([('ward', ward), ('ridge', ridge)])
 # Select the optimal number of parcels with grid search
 clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)