From 60c2a251e38bb90e1a5e886efc18324c7da9d5d6 Mon Sep 17 00:00:00 2001
From: Ron Weiss <ronweiss@gmail.com>
Date: Sun, 7 Nov 2010 16:32:39 -0500
Subject: [PATCH] fix pep8, pyflakes errprs

---
 scikits/learn/gmm.py            |  8 ++++----
 scikits/learn/hmm.py            | 22 +++++++++++-----------
 scikits/learn/tests/test_gmm.py |  8 ++++----
 scikits/learn/tests/test_hmm.py | 33 ++++++++++++++++++---------------
 4 files changed, 37 insertions(+), 34 deletions(-)

diff --git a/scikits/learn/gmm.py b/scikits/learn/gmm.py
index 1ce12b6e4f..ae1046a319 100644
--- a/scikits/learn/gmm.py
+++ b/scikits/learn/gmm.py
@@ -474,7 +474,7 @@ class GMM(BaseEstimator):
                 cv, self._cvtype, self._n_states)
         elif not hasattr(self, 'covars'):
                 self.covars = _distribute_covar_matrix_to_match_cvtype(
-                    np.eye(self.n_features), cvtype, n_states)
+                    np.eye(self.n_features), self.cvtype, self.n_states)
 
         # EM algorithm
         logprob = []
@@ -562,9 +562,9 @@ def _lmvnpdffull(obs, means, covars):
     log_prob = np.empty((nobs,nmix))
     for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
         cv_chol = linalg.cholesky(cv, lower=True)
-        cv_det  = np.prod(np.diagonal(cv_chol))**2
-        cv_sol  = solve_triangular(cv_chol, (obs - mu).T, lower=True)
-        log_prob[:, c]  = -.5 * (np.sum(cv_sol**2, axis=0) + \
+        cv_det = np.prod(np.diagonal(cv_chol)) ** 2
+        cv_sol = solve_triangular(cv_chol, (obs - mu).T, lower=True)
+        log_prob[:, c] = -.5 * (np.sum(cv_sol ** 2, axis=0) + \
                            ndim * np.log(2 * np.pi) + np.log(cv_det))
 
     return log_prob
diff --git a/scikits/learn/hmm.py b/scikits/learn/hmm.py
index f748d4108d..41a432ae32 100644
--- a/scikits/learn/hmm.py
+++ b/scikits/learn/hmm.py
@@ -274,7 +274,7 @@ class _BaseHMM(BaseEstimator):
         currstate = (startprob_cdf > rand).argmax()
         obs = [self._generate_sample_from_state(currstate)]
 
-        for x in xrange(n-1):
+        for x in xrange(n - 1):
             rand = np.random.rand()
             currstate = (transmat_cdf[currstate] > rand).argmax()
             obs.append(self._generate_sample_from_state(currstate))
@@ -403,8 +403,8 @@ class _BaseHMM(BaseEstimator):
 
         lattice[0] = self._log_startprob + framelogprob[0]
         for n in xrange(1, nobs):
-            idx = self._prune_states(lattice[n-1], maxrank, beamlogprob)
-            pr = self._log_transmat[idx].T + lattice[n-1,idx]
+            idx = self._prune_states(lattice[n - 1], maxrank, beamlogprob)
+            pr = self._log_transmat[idx].T + lattice[n - 1,idx]
             lattice[n] = np.max(pr, axis=1) + framelogprob[n]
             traceback[n] = np.argmax(pr, axis=1)
         lattice[lattice <= ZEROLOGPROB] = -np.Inf
@@ -427,9 +427,9 @@ class _BaseHMM(BaseEstimator):
 
         fwdlattice[0] = self._log_startprob + framelogprob[0]
         for n in xrange(1, nobs):
-            idx = self._prune_states(fwdlattice[n-1], maxrank, beamlogprob)
+            idx = self._prune_states(fwdlattice[n - 1], maxrank, beamlogprob)
             fwdlattice[n] = (logsum(self._log_transmat[idx].T
-                                    + fwdlattice[n-1,idx], axis=1)
+                                    + fwdlattice[n - 1,idx], axis=1)
                              + framelogprob[n])
         fwdlattice[fwdlattice <= ZEROLOGPROB] = -np.Inf
 
@@ -449,7 +449,7 @@ class _BaseHMM(BaseEstimator):
                                      -50)
                                      #beamlogprob)
                                      #-np.Inf)
-            bwdlattice[n-1] = logsum(self._log_transmat[:,idx]
+            bwdlattice[n - 1] = logsum(self._log_transmat[:,idx]
                                      + bwdlattice[n,idx] + framelogprob[n,idx],
                                      axis=1)
         bwdlattice[bwdlattice <= ZEROLOGPROB] = -np.Inf
@@ -514,7 +514,7 @@ class _BaseHMM(BaseEstimator):
             stats['start'] += posteriors[0]
         if 't' in params:
             for t in xrange(len(framelogprob)):
-                zeta = (fwdlattice[t-1][:,np.newaxis] + self._log_transmat
+                zeta = (fwdlattice[t - 1][:,np.newaxis] + self._log_transmat
                         + framelogprob[t] + bwdlattice[t])
                 stats['trans'] += np.exp(zeta - logsum(zeta))
 
@@ -718,7 +718,7 @@ class GaussianHMM(_BaseHMM):
 
         if 'c' in params:
             if self._cvtype in ('spherical', 'diag'):
-                stats['obs**2'] += np.dot(posteriors.T, obs**2)
+                stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
             elif self._cvtype in ('tied', 'full'):
                 for t, o in enumerate(obs):
                     obsobsT = np.outer(o, o)
@@ -754,10 +754,10 @@ class GaussianHMM(_BaseHMM):
             meandiff = self._means - means_prior
 
             if self._cvtype in ('spherical', 'diag'):
-                cv_num = (means_weight * (meandiff)**2
+                cv_num = (means_weight * (meandiff) ** 2
                           + stats['obs**2']
                           - 2 * self._means * stats['obs']
-                          + self._means**2 * denom)
+                          + self._means ** 2 * denom)
                 cv_den = max(covars_weight - 1, 0) + denom
                 if self._cvtype == 'spherical':
                     self._covars = (covars_prior / cv_den.mean(axis=1)
@@ -971,7 +971,7 @@ class GMMHMM(_BaseHMM):
         # XXX: Hotfit for n_mix that is incompatible with the scikit's
         # BaseEstimator API
         self.n_mix = n_mix
-        self.cvtype= cvtype
+        self.cvtype = cvtype
         if gmms is None:
             gmms = []
             for x in xrange(self.n_states):
diff --git a/scikits/learn/tests/test_gmm.py b/scikits/learn/tests/test_gmm.py
index 10f14f94ee..fd2089ca84 100644
--- a/scikits/learn/tests/test_gmm.py
+++ b/scikits/learn/tests/test_gmm.py
@@ -19,12 +19,14 @@ def _generate_random_spd_matrix(ndim):
     randspd = np.dot(np.dot(U, 1.0 + np.diag(np.random.rand(ndim))), V)
     return randspd
 
+
 def test_logsum_1D():
     A = np.random.rand(2) + 1.0
     for axis in range(1):
         Asum = gmm.logsum(A, axis)
         assert_array_almost_equal(np.exp(Asum), np.sum(np.exp(A), axis))
 
+
 def test_logsum_3D():
     """
     Test also on a 3D matrix
@@ -85,7 +87,7 @@ def _naive_lmvnpdf_diag(obs, mu, cv):
     ref = np.empty((len(obs), len(mu)))
     stds = np.sqrt(cv)
     for i, (m, std) in enumerate(itertools.izip(mu, stds)):
-       ref[:, i] = np.log(stats.norm.pdf(obs, m, std)).sum(axis=1)
+        ref[:, i] = np.log(stats.norm.pdf(obs, m, std)).sum(axis=1)
     return ref
 
 
@@ -118,7 +120,6 @@ def test_lmvnpdf_spherical():
     assert_array_almost_equal(lpr, reference)
 
 
-
 def test_lmvnpdf_full():
     n_features, n_states, n_obs = 2, 3, 10
 
@@ -133,7 +134,6 @@ def test_lmvnpdf_full():
     assert_array_almost_equal(lpr, reference)
 
 
-
 def test_GMM_attributes():
     n_states, n_features = 10, 4
     cvtype = 'diag'
@@ -168,6 +168,7 @@ def test_GMM_attributes():
 
     assert_raises(ValueError, gmm.GMM, n_states=20, cvtype='badcvtype')
 
+
 class GMMTester():
     n_states = 10
     n_features = 4
@@ -181,7 +182,6 @@ class GMMTester():
               'full': np.array([_generate_random_spd_matrix(n_features) + 5 * I
                                 for x in xrange(n_states)])}
 
-
     def test_eval(self):
         g = gmm.GMM(self.n_states, self.cvtype)
         # Make sure the means are far apart so posteriors.argmax()
diff --git a/scikits/learn/tests/test_hmm.py b/scikits/learn/tests/test_hmm.py
index 5a80dacdc6..1b63253a48 100644
--- a/scikits/learn/tests/test_hmm.py
+++ b/scikits/learn/tests/test_hmm.py
@@ -7,7 +7,7 @@ from .test_gmm import _generate_random_spd_matrix
 
 from .. import hmm
 
-SKIP_FAILING = True # skip failing tests
+SKIP_FAILING = True  # skip failing tests
 
 
 class SeedRandomNumberGeneratorTestCase(TestCase):
@@ -221,11 +221,13 @@ class GaussianHMMParams(object):
     transmat = np.random.rand(n_states, n_states)
     transmat /= np.tile(transmat.sum(axis=1)[:,np.newaxis], (1, n_states))
     means = np.random.randint(-20, 20, (n_states, n_features))
-    covars = {'spherical': (1.0 + 2 * np.random.rand(n_states))**2,
-              'tied': _generate_random_spd_matrix(n_features) + np.eye(n_features),
-              'diag': (1.0 + 2 * np.random.rand(n_states, n_features))**2,
-              'full': np.array([_generate_random_spd_matrix(n_features)
-                                + np.eye(n_features) for x in xrange(n_states)])}
+    covars = {'spherical': (1.0 + 2 * np.random.rand(n_states)) ** 2,
+              'tied': (_generate_random_spd_matrix(n_features)
+                       + np.eye(n_features)),
+              'diag': (1.0 + 2 * np.random.rand(n_states, n_features)) ** 2,
+              'full': np.array(
+                  [_generate_random_spd_matrix(n_features) + np.eye(n_features)
+                   for x in xrange(n_states)])}
     expanded_covars = {'spherical': [np.eye(n_features) * cov
                                      for cov in covars['spherical']],
                        'diag': [np.diag(cov) for cov in covars['diag']],
@@ -328,11 +330,10 @@ class GaussianHMMTester(GaussianHMMParams):
                    % (self.cvtype, params, trainll, np.diff(trainll)))
         self.assertTrue(np.all(np.diff(trainll) > -0.5))
 
-
     def test_fit_with_priors(self, params='stmc', n_iter=10,
                              verbose=False):
-        startprob_prior = 10*self.startprob + 2.0
-        transmat_prior = 10*self.transmat + 2.0
+        startprob_prior = 10 * self.startprob + 2.0
+        transmat_prior = 10 * self.transmat + 2.0
         means_prior = self.means
         means_weight = 2.0
         covars_weight = 2.0
@@ -343,8 +344,8 @@ class GaussianHMMTester(GaussianHMMParams):
         h = hmm.GaussianHMM(self.n_states, self.cvtype)
         h.startprob = self.startprob
         h.startprob_prior = startprob_prior
-        h.transmat=hmm.normalize(self.transmat
-                                 + np.diag(np.random.rand(self.n_states)), 1)
+        h.transmat = hmm.normalize(self.transmat
+                                   + np.diag(np.random.rand(self.n_states)), 1)
         h.transmat_prior = transmat_prior
         h.means = 20 * self.means
         h.means_prior = means_prior
@@ -516,10 +517,12 @@ class GMMHMMParams(object):
         g = gmm.GMM(n_mix, cvtype=cvtype)
         g.means = np.random.randint(-20, 20, (n_mix, n_features))
         mincv = 0.1
-        g.covars = {'spherical': (mincv + mincv * np.random.rand(n_mix))**2,
+        g.covars = {'spherical': (mincv
+                                  + mincv * np.random.rand(n_mix)) ** 2,
                     'tied': _generate_random_spd_matrix(n_features)
                            + mincv * np.eye(n_features),
-                    'diag': (mincv + mincv * np.random.rand(n_mix, n_features))**2,
+                    'diag': (mincv
+                             + mincv * np.random.rand(n_mix, n_features)) ** 2,
                     'full': np.array([_generate_random_spd_matrix(n_features)
                                       + mincv * np.eye(n_features)
                                       for x in xrange(n_mix)])}[cvtype]
@@ -534,8 +537,8 @@ class TestGMMHMM(GMMHMMParams, SeedRandomNumberGeneratorTestCase):
         np.random.seed(self.seed)
         self.gmms = []
         for state in xrange(self.n_states):
-            self.gmms.append(self.create_random_gmm(self.n_mix, self.n_features,
-                                                    self.cvtype))
+            self.gmms.append(self.create_random_gmm(
+                self.n_mix, self.n_features, self.cvtype))
 
     def test_attributes(self):
         h = hmm.GMMHMM(self.n_states, cvtype=self.cvtype)
-- 
GitLab