diff --git a/examples/glm/ols.py b/examples/glm/plot_ols.py
similarity index 100%
rename from examples/glm/ols.py
rename to examples/glm/plot_ols.py
diff --git a/scikits/learn/datasets/samples_generator/nonlinear.py b/scikits/learn/datasets/samples_generator.py
old mode 100755
new mode 100644
similarity index 52%
rename from scikits/learn/datasets/samples_generator/nonlinear.py
rename to scikits/learn/datasets/samples_generator.py
index db44141562a4e02d6b78ca571b8fabb04f81e6c0..11628fac836c5318ea019d38a2a64ca7c353bc05
--- a/scikits/learn/datasets/samples_generator/nonlinear.py
+++ b/scikits/learn/datasets/samples_generator.py
@@ -1,6 +1,32 @@
 import numpy as np
 import numpy.random as nr
 
+def sparse_uncorrelated(nb_samples=100, nb_features=10):
+    """
+    Function creating simulated data with sparse uncorrelated design.
+    (cf.Celeux et al. 2009,  Bayesian regularization in regression)
+    X = NR.normal(0,1)
+    Y = NR.normal(X[:,0]+2*X[:,1]-2*X[:,2]-1.5*X[:,3])
+    The number of features is at least 10.
+
+    Parameters
+    ----------
+    nb_samples : int
+                 number of samples (defaut is 100).
+    nb_features : int
+                  number of features (defaut is 5).
+
+    Returns
+    -------
+    X : numpy array of shape (nb_samples, nb_features) for input samples
+    Y : numpy array of shape (nb_samples) for labels
+    """
+    X = nr.normal(loc=0, scale=1, size=(nb_samples, nb_features))
+    Y = nr.normal(loc=X[:, 0] + 2 * X[:, 1] - 2 * X[:,2] - 1.5 * X[:, 3],
+                  scale = np.ones(nb_samples))
+    return X, Y
+
+
 def friedman(nb_samples=100, nb_features=10,noise_std=1):
     """
     Function creating simulated data with non linearities 
@@ -27,4 +53,3 @@ def friedman(nb_samples=100, nb_features=10,noise_std=1):
     Y = 10*np.sin(X[:,0]*X[:,1]) + 20*(X[:,2]-0.5)**2 + 10*X[:,3] + 5*X[:,4]
     Y += noise_std*nr.normal(loc=0,scale=1,size=(nb_samples))
     return X,Y
-    
diff --git a/scikits/learn/datasets/samples_generator/__init__.py b/scikits/learn/datasets/samples_generator/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/scikits/learn/datasets/samples_generator/linear.py b/scikits/learn/datasets/samples_generator/linear.py
deleted file mode 100755
index 8492442812b5ea06b7459fb89ad8b3e7d3daac35..0000000000000000000000000000000000000000
--- a/scikits/learn/datasets/samples_generator/linear.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import numpy as np
-import numpy.random as nr
-
-def sparse_uncorrelated(nb_samples=100, nb_features=10):
-    """
-    Function creating simulated data with sparse uncorrelated design.
-    (cf.Celeux et al. 2009,  Bayesian regularization in regression)
-    X = NR.normal(0,1)
-    Y = NR.normal(X[:,0]+2*X[:,1]-2*X[:,2]-1.5*X[:,3])
-    The number of features is at least 10.
-
-    Parameters
-    ----------
-    nb_samples : int
-                 number of samples (defaut is 100).
-    nb_features : int
-                  number of features (defaut is 5).
-
-    Returns
-    -------
-    X : numpy array of shape (nb_samples, nb_features) for input samples
-    Y : numpy array of shape (nb_samples) for labels
-    """
-    X = nr.normal(loc=0, scale=1, size=(nb_samples, nb_features))
-    Y = nr.normal(loc=X[:, 0] + 2 * X[:, 1] - 2 * X[:,2] - 1.5 * X[:, 3],
-                  scale = np.ones(nb_samples))
-    return X, Y
diff --git a/scikits/learn/datasets/setup.py b/scikits/learn/datasets/setup.py
index 579b3fb084b8b06451cb0edd19cf950488f4ce38..e97f6bd64832af118179bbd74d6631b4ea4a0ef7 100755
--- a/scikits/learn/datasets/setup.py
+++ b/scikits/learn/datasets/setup.py
@@ -3,7 +3,6 @@
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('datasets',parent_package,top_path)
-    config.add_subpackage('samples_generator')
     config.add_data_dir('data')
     config.add_data_dir('descr')
     return config