diff --git a/scikits/learn/pyem/Changelog b/scikits/learn/pyem/Changelog
index 83d11290ac06d688c85f04bc4a92466920574661..17a022f1b10b30a2d4f8c2f6c444a57a18da99e2 100644
--- a/scikits/learn/pyem/Changelog
+++ b/scikits/learn/pyem/Changelog
@@ -1,3 +1,10 @@
+pyem (0.4) Fri, 04 Aug 2006 19:37:47 +0900
+
+	* put version to 0.4.2
+	* adapt to new version of numpy (1.0b2SVN)
+
+-- David Cournapeau <david@ar.media.kyoto-u.ac.jp> 
+
 pyem (0.4) Fri, 14 Jul 2006 17:49:57 +0900
 
 	* put version to 0.4.1
diff --git a/scikits/learn/pyem/LICENSE.txt b/scikits/learn/pyem/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..27727c5eae9a2dd901ec2f0896fdb4cf3795f5e4
--- /dev/null
+++ b/scikits/learn/pyem/LICENSE.txt
@@ -0,0 +1,29 @@
+Copyright (c) 2001, 2002 Enthought, Inc.
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+  a. Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+  b. Redistributions in binary form must reproduce the above copyright
+     notice, this list of conditions and the following disclaimer in the
+     documentation and/or other materials provided with the distribution.
+  c. Neither the name of the Enthought nor the names of its contributors
+     may be used to endorse or promote products derived from this software
+     without specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
diff --git a/scikits/learn/pyem/MANIFEST.in b/scikits/learn/pyem/MANIFEST.in
index b41ff1e9e327fc751d2298a7ba9721168919007d..202a8e31e4398030cf55326fb80fd0c1bc341ae0 100644
--- a/scikits/learn/pyem/MANIFEST.in
+++ b/scikits/learn/pyem/MANIFEST.in
@@ -2,3 +2,4 @@ include pyem/src/c_numpy.pxd
 include pyem/src/c_python.pxd
 include Changelog
 include TODO
+include LICENSE.txt
diff --git a/scikits/learn/pyem/README b/scikits/learn/pyem/README
index 32064b746b1612130e39c97eb403c363fc145157..48d577cb9b4669b5cb5bd8d6098bdd6a365b733c 100644
--- a/scikits/learn/pyem/README
+++ b/scikits/learn/pyem/README
@@ -1,12 +1,25 @@
-Last Change: Wed Jul 12 04:00 PM 2006 J
+Last Change: Fri Aug 04 07:00 PM 2006 J
+
+Version 0.4.2
 
 pyem is a python module build upon numpy and scipy
 (see http://www.scipy.org/) for learning mixtures models
 using Expectation Maximization. For now, only Gaussian
 Mixture Models are implemented. Included features:
-    - computation of Gaussian pdf for multi-variate Gaussian
-    random vectors (spherical, diagonal and full covariance matrices)
-    - Sampling of Gaussian Mixtures Models
-    - Confidence ellipsoides with probability 0.5
-    - Classic EM for Gaussian Mixture Models
-    - K-mean based initialization
+    
+ * computation of Gaussian pdf for multi-variate Gaussian
+ random vectors (spherical, diagonal and full covariance matrices)
+ * Sampling of Gaussian Mixtures Models
+ * Confidence ellipsoides with probability (fixed level of 
+ 0.39 for now)
+ * Classic EM for Gaussian Mixture Models
+ * K-mean based and random initialization for EM available
+
+Has been tested on the following platforms:
+
+ * Ubuntu dapper, bi Xeon 3.2 Ghz, 2 Go RAM
+ python 2.4 + pyrex, numpy 1.0.b2SVN + scipy 0.5.1SVN, uses atlas3-sse2
+ * Ubuntu dapper, pentium M 1.2 ghz,. 512 Mo Ram
+ python 2.4 + pyrex, numpy 1.0.b2SVN + scipy 0.5.1SVN, uses atlas3-sse2
+ * Ubuntu dapper, minimac (ppc G4 1.42 Ghz, 1Gb RAM)
+ python 2.4 + pyrex, numpy 1.0.b2SVN + scipy 0.5.1SVN, uses atlas3-sse2
diff --git a/scikits/learn/pyem/pyem/__init__.py b/scikits/learn/pyem/pyem/__init__.py
index 980b8a283f6b2c3c60efa668986ae93f629cb427..b66857e8ce8e2ec78135c690b68291a3139cb7eb 100644
--- a/scikits/learn/pyem/pyem/__init__.py
+++ b/scikits/learn/pyem/pyem/__init__.py
@@ -1,7 +1,7 @@
 #! /usr/bin/env python
-# Last Change: Fri Jul 14 05:00 PM 2006 J
+# Last Change: Fri Aug 04 07:00 PM 2006 J
 
-version = '0.4.1'
+version = '0.4.2'
 
 from gauss_mix import GmParamError, GM
 from gmm_em import GmmParamError, GMM
diff --git a/scikits/learn/pyem/pyem/densities.py b/scikits/learn/pyem/pyem/densities.py
index 7a4db93e7de008bac3be608cbfd8fd1f8c6b23ef..8d9427a4b9d923d3be6004791801256ec8487bc8 100644
--- a/scikits/learn/pyem/pyem/densities.py
+++ b/scikits/learn/pyem/pyem/densities.py
@@ -1,7 +1,7 @@
 #! /usr/bin/python
 #
 # Copyrighted David Cournapeau
-# Last Change: Fri Jul 14 05:00 PM 2006 J
+# Last Change: Fri Aug 04 07:00 PM 2006 J
 
 import numpy as N
 import numpy.linalg as lin
@@ -146,13 +146,13 @@ def _full_gauss_den(x, mu, va, log):
     # n       = N.size(x, 0)
     # y       = N.zeros(n, float)
     # for i in range(n):
-    #     y[i] = N.matrixmultiply(x[i,:],
-    #              N.matrixmultiply(inva, N.transpose(x[i,:])))
+    #     y[i] = N.dot(x[i,:],
+    #              N.dot(inva, N.transpose(x[i,:])))
     # y *= -0.5
 
     # we are using a trick with sum to "emulate" 
     # the matrix multiplication inva * x without any explicit loop
-    y   = N.matrixmultiply((x-mu), inva)
+    y   = N.dot((x-mu), inva)
     y   = -0.5 * N.sum(y * (x-mu), 1)
 
     if not log:
@@ -197,8 +197,8 @@ def gauss_ell(mu, va, dim = [0, 1], npoints = 100):
     mu  = mu[dim]
     if mode == 'diag':
         va      = va[dim]
-        elps    = N.outerproduct(mu, N.ones(npoints, float))
-        elps    += N.matrixmultiply(N.diag(N.sqrt(va)), circle)
+        elps    = N.outer(mu, N.ones(npoints, float))
+        elps    += N.dot(N.diag(N.sqrt(va)), circle)
     elif mode == 'full':
         va  = va[c,:][:,c]
         # Method: compute the cholesky decomp of each cov matrix, that is
@@ -209,8 +209,8 @@ def gauss_ell(mu, va, dim = [0, 1], npoints = 100):
         #   - va = cova' * cova (matlab)
         # So take care when comparing results with matlab !
         cova    = lin.cholesky(va)
-        elps    = N.outerproduct(mu, N.ones(npoints, float))
-        elps    += N.matrixmultiply(cova, circle)
+        elps    = N.outer(mu, N.ones(npoints, float))
+        elps    += N.dot(cova, circle)
     else:
         raise DenParam("var mode not recognized")
 
@@ -225,7 +225,7 @@ def generate_test_data(n, d, mode = 'diag', file='test.dat'):
         va  = abs(randn(1, d))
     elif mode == 'full':
         va  = randn(d, d)
-        va  = matrixmultiply(va, va.transpose())
+        va  = dot(va, va.transpose())
 
     input   = randn(n, d)
     output  = gauss_den(input, mu, va)
@@ -374,7 +374,7 @@ if __name__ == "__main__":
 
     # Generate a multivariate gaussian of mean mu and covariance va
     X       = randn(1e3, 2)
-    Yc      = N.matrixmultiply(N.diag(N.sqrt(va)), X.transpose())
+    Yc      = N.dot(N.diag(N.sqrt(va)), X.transpose())
     Yc      = Yc.transpose() + mu
 
     # Plotting
@@ -391,7 +391,7 @@ if __name__ == "__main__":
 
     # Generate a multivariate gaussian of mean mu and covariance va
     X       = randn(1e3, 2)
-    Yc      = N.matrixmultiply(lin.cholesky(va), X.transpose())
+    Yc      = N.dot(lin.cholesky(va), X.transpose())
     Yc      = Yc.transpose() + mu
 
     # Plotting
diff --git a/scikits/learn/pyem/pyem/gauss_mix.py b/scikits/learn/pyem/pyem/gauss_mix.py
index 395e1b194dc816de58e4d2d90cc45adb6fe7fba6..0a9a5f5b22c185570d0a54143c7600f82805b9cc 100644
--- a/scikits/learn/pyem/pyem/gauss_mix.py
+++ b/scikits/learn/pyem/pyem/gauss_mix.py
@@ -1,5 +1,5 @@
 # /usr/bin/python
-# Last Change: Fri Jul 14 05:00 PM 2006 J
+# Last Change: Fri Aug 04 07:00 PM 2006 J
 
 # Module to implement GaussianMixture class.
 
@@ -194,7 +194,7 @@ def gen_rand_index(p, n):
     # different
     invcdf  = N.cumsum(p)
     uni     = rand(n)
-    index   = N.zeros(n)
+    index   = N.zeros(n, dtype=int)
 
     # This one should be a bit faster
     for k in range(len(p)-1, 0, -1):
diff --git a/scikits/learn/pyem/pyem/gmm_em.py b/scikits/learn/pyem/pyem/gmm_em.py
index c77afc79e12ec6138edffeae4250d10a7ce2cd99..49b0e4a500d48da55d7b265f94e1aef2a189c271 100644
--- a/scikits/learn/pyem/pyem/gmm_em.py
+++ b/scikits/learn/pyem/pyem/gmm_em.py
@@ -1,5 +1,5 @@
 # /usr/bin/python
-# Last Change: Fri Jul 14 05:00 PM 2006 J
+# Last Change: Fri Aug 04 07:00 PM 2006 J
 
 import numpy as N
 import numpy.linalg as lin
@@ -129,7 +129,7 @@ class GMM(ExpMixtureModel):
         # multiply by the weight
         tgd	*= self.gm.w
         # Normalize to get a pdf
-        gd	= tgd  / N.sum(tgd, axis=1)[:, N.NewAxis]
+        gd	= tgd  / N.sum(tgd, axis=1)[:, N.newaxis]
 
         return gd, tgd
 
diff --git a/scikits/learn/pyem/pyem/info.py b/scikits/learn/pyem/pyem/info.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6afbf445b52ed2b513d991dfb77b052648d1889
--- /dev/null
+++ b/scikits/learn/pyem/pyem/info.py
@@ -0,0 +1,9 @@
+"""
+Routines for for Gaussian Mixture Models
+and Expectation Maximization learning 
+===================================
+
+Copyright: David Cournapeau 2006
+License: BSD-style (see LICENSE.txt in main source directory)
+"""
+
diff --git a/scikits/learn/pyem/pyem/profile_gmm.py b/scikits/learn/pyem/pyem/profile_gmm.py
index 6dc5a3c01328a0c1120bf978b797cd557521d679..f5293d7fdcda5a80c5b9e494b3e45084d46b281d 100644
--- a/scikits/learn/pyem/pyem/profile_gmm.py
+++ b/scikits/learn/pyem/pyem/profile_gmm.py
@@ -1,5 +1,4 @@
 import numpy as N
-import tables
 from gmm_em import GM, GMM
 import copy