diff --git a/examples/linear_model/plot_sparse_recovery.py b/examples/linear_model/plot_sparse_recovery.py
index 75b6d7a92d4a768d7e15d4df1397db7bc65c293f..5c080d96489e7bb92394796ce81a8a9abee2094b 100644
--- a/examples/linear_model/plot_sparse_recovery.py
+++ b/examples/linear_model/plot_sparse_recovery.py
@@ -53,6 +53,7 @@ from sklearn.feature_selection import f_regression
 from sklearn.preprocessing import Scaler
 from sklearn.metrics import auc, precision_recall_curve
 from sklearn.ensemble import ExtraTreesRegressor
+from sklearn.utils.extmath import pinvh
 
 
 def mutual_incoherence(X_relevant, X_irelevant):
@@ -60,7 +61,7 @@ def mutual_incoherence(X_relevant, X_irelevant):
     """
     projector = np.dot(
                     np.dot(X_irelevant.T, X_relevant),
-                    linalg.pinv(np.dot(X_relevant.T, X_relevant))
+                    pinvh(np.dot(X_relevant.T, X_relevant))
                     )
     return np.max(np.abs(projector).sum(axis=1))
 
diff --git a/sklearn/linear_model/bayes.py b/sklearn/linear_model/bayes.py
index 0e0e448fb03551de9032806eb8d46e7f571e0034..a3faf894684d83b74bb5867bb1b49bc03607f7b1 100644
--- a/sklearn/linear_model/bayes.py
+++ b/sklearn/linear_model/bayes.py
@@ -11,7 +11,7 @@ from scipy import linalg
 
 from .base import LinearModel
 from ..base import RegressorMixin
-from ..utils.extmath import fast_logdet
+from ..utils.extmath import fast_logdet, pinvh
 from ..utils import check_arrays
 
 
@@ -382,7 +382,7 @@ class ARDRegression(LinearModel, RegressorMixin):
         ### Iterative procedure of ARDRegression
         for iter_ in range(self.n_iter):
             ### Compute mu and sigma (using Woodbury matrix identity)
-            sigma_ = linalg.pinv(np.eye(n_samples) / alpha_ +
+            sigma_ = pinvh(np.eye(n_samples) / alpha_ +
                           np.dot(X[:, keep_lambda] *
                           np.reshape(1. / lambda_[keep_lambda], [1, -1]),
                           X[:, keep_lambda].T))
diff --git a/sklearn/mixture/dpgmm.py b/sklearn/mixture/dpgmm.py
index b74bcbeee413da74b2b026ed56fff28522c87565..227c96913727a7135e71d2070b153af4b85de96b 100644
--- a/sklearn/mixture/dpgmm.py
+++ b/sklearn/mixture/dpgmm.py
@@ -16,7 +16,7 @@ from scipy import linalg
 from scipy.spatial.distance import cdist
 
 from ..utils import check_random_state
-from ..utils.extmath import norm, logsumexp
+from ..utils.extmath import norm, logsumexp, pinvh
 from .. import cluster
 from .gmm import GMM
 
@@ -215,7 +215,7 @@ class DPGMM(GMM):
             return [self.precs_] * self.n_components
 
     def _get_covars(self):
-        return [linalg.pinv(c) for c in self._get_precisions()]
+        return [pinvh(c) for c in self._get_precisions()]
 
     def _set_covars(self, covars):
         raise NotImplementedError("""The variational algorithm does
@@ -332,7 +332,7 @@ class DPGMM(GMM):
             for k in xrange(self.n_components):
                     diff = X - self.means_[k]
                     self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
-            self.scale_ = linalg.pinv(self.scale_)
+            self.scale_ = pinvh(self.scale_)
             self.precs_ = self.dof_ * self.scale_
             self.det_scale_ = linalg.det(self.scale_)
             self.bound_prec_ = 0.5 * wishart_log_det(
@@ -346,7 +346,7 @@ class DPGMM(GMM):
                 self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
                 diff = X - self.means_[k]
                 self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
-                self.scale_[k] = linalg.pinv(self.scale_[k])
+                self.scale_[k] = pinvh(self.scale_[k])
                 self.precs_[k] = self.dof_[k] * self.scale_[k]
                 self.det_scale_[k] = linalg.det(self.scale_[k])
                 self.bound_prec_[k] = 0.5 * wishart_log_det(self.dof_[k],
diff --git a/sklearn/mixture/gmm.py b/sklearn/mixture/gmm.py
index 3b04963bfcdb7561a164cec2dc47a9d3bc20dc0c..40fb9c193b9190fe4d4569aa2a86d89054b4511e 100644
--- a/sklearn/mixture/gmm.py
+++ b/sklearn/mixture/gmm.py
@@ -14,7 +14,7 @@ import warnings
 
 from ..base import BaseEstimator
 from ..utils import check_random_state, deprecated
-from ..utils.extmath import logsumexp
+from ..utils.extmath import logsumexp, pinvh
 from .. import cluster
 
 EPS = np.finfo(float).eps
@@ -616,7 +616,7 @@ def _log_multivariate_normal_density_tied(X, means, covars):
     """Compute Gaussian log-density at X for a tied model"""
     from scipy import linalg
     n_samples, n_dim = X.shape
-    icv = linalg.pinv(covars)
+    icv = pinvh(covars)
     lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
                   + np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
                   - 2 * np.dot(np.dot(X, icv), means.T)