Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
S
scikit-learn
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Ian Johnson
scikit-learn
Commits
54ebafce
Commit
54ebafce
authored
13 years ago
by
Gael Varoquaux
Browse files
Options
Downloads
Patches
Plain Diff
COSMIT we no longer support Py 2.5
Also, avoid 'from foo import bar' or 'import as' as much as possible
parent
36983177
No related branches found
No related tags found
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
sklearn/linear_model/omp.py
+11
-10
11 additions, 10 deletions
sklearn/linear_model/omp.py
sklearn/linear_model/tests/test_omp.py
+0
-12
0 additions, 12 deletions
sklearn/linear_model/tests/test_omp.py
with
11 additions
and
22 deletions
sklearn/linear_model/omp.py
+
11
−
10
View file @
54ebafce
...
...
@@ -5,7 +5,7 @@
#
# License: BSD Style.
from
warnings
import
warn
import
warn
ings
import
numpy
as
np
from
scipy
import
linalg
...
...
@@ -75,7 +75,7 @@ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True):
lam
=
np
.
argmax
(
np
.
abs
(
np
.
dot
(
X
.
T
,
residual
)))
if
lam
<
n_active
or
alpha
[
lam
]
**
2
<
min_float
:
# atom already selected or inner product too small
warn
(
premature
,
RuntimeWarning
,
stacklevel
=
2
)
warnings
.
warn
(
premature
,
RuntimeWarning
,
stacklevel
=
2
)
break
if
n_active
>
0
:
# Updates the Cholesky decomposition of X' X
...
...
@@ -83,7 +83,7 @@ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True):
solve_triangular
(
L
[:
n_active
,
:
n_active
],
L
[
n_active
,
:
n_active
])
v
=
nrm2
(
L
[
n_active
,
:
n_active
])
**
2
if
1
-
v
<=
min_float
:
# selected atoms are dependent
warn
(
premature
,
RuntimeWarning
,
stacklevel
=
2
)
warnings
.
warn
(
premature
,
RuntimeWarning
,
stacklevel
=
2
)
break
L
[
n_active
,
n_active
]
=
np
.
sqrt
(
1
-
v
)
X
.
T
[
n_active
],
X
.
T
[
lam
]
=
swap
(
X
.
T
[
n_active
],
X
.
T
[
lam
])
...
...
@@ -169,14 +169,14 @@ def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
lam
=
np
.
argmax
(
np
.
abs
(
alpha
))
if
lam
<
n_active
or
alpha
[
lam
]
**
2
<
min_float
:
# selected same atom twice, or inner product too small
warn
(
premature
,
RuntimeWarning
,
stacklevel
=
2
)
warnings
.
warn
(
premature
,
RuntimeWarning
,
stacklevel
=
2
)
break
if
n_active
>
0
:
L
[
n_active
,
:
n_active
]
=
Gram
[
lam
,
:
n_active
]
solve_triangular
(
L
[:
n_active
,
:
n_active
],
L
[
n_active
,
:
n_active
])
v
=
nrm2
(
L
[
n_active
,
:
n_active
])
**
2
if
1
-
v
<=
min_float
:
# selected atoms are dependent
warn
(
premature
,
RuntimeWarning
,
stacklevel
=
2
)
warnings
.
warn
(
premature
,
RuntimeWarning
,
stacklevel
=
2
)
break
L
[
n_active
,
n_active
]
=
np
.
sqrt
(
1
-
v
)
Gram
[
n_active
],
Gram
[
lam
]
=
swap
(
Gram
[
n_active
],
Gram
[
lam
])
...
...
@@ -520,11 +520,12 @@ class OrthogonalMatchingPursuit(LinearModel):
self
.
n_nonzero_coefs
=
int
(
0.1
*
n_features
)
if
(
Gram
is
not
None
or
Xy
is
not
None
)
and
(
self
.
fit_intercept
is
True
or
self
.
normalize
is
True
):
warn
(
'
Mean subtraction (fit_intercept) and normalization cannot
'
'
be applied on precomputed Gram and Xy matrices. Your
'
'
precomputed values are ignored and recomputed. To avoid
'
'
this, do the scaling yourself and call with fit_intercept
'
'
and normalize set to False.
'
,
RuntimeWarning
,
stacklevel
=
2
)
warnings
.
warn
(
'
Mean subtraction (fit_intercept) and
'
'
normalization cannot be applied on precomputed Gram
'
'
and Xy matrices. Your precomputed values are ignored
'
'
and recomputed. To avoid this, do the scaling yourself
'
'
and call with fit_intercept and normalize set to False.
'
,
RuntimeWarning
,
stacklevel
=
2
)
Gram
,
Xy
=
None
,
None
if
Gram
is
not
None
:
...
...
This diff is collapsed.
Click to expand it.
sklearn/linear_model/tests/test_omp.py
+
0
−
12
View file @
54ebafce
...
...
@@ -2,11 +2,9 @@
# License: BSD style
import
warnings
from
sys
import
version_info
import
numpy
as
np
from
nose
import
SkipTest
from
nose.tools
import
assert_raises
,
assert_true
from
numpy.testing
import
assert_equal
,
assert_array_almost_equal
...
...
@@ -23,12 +21,6 @@ G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# and y (n_samples, 3)
def
check_warnings
():
if
version_info
<
(
2
,
6
):
raise
SkipTest
(
"
Testing for warnings is not supported in versions
\
older than Python 2.6
"
)
def
test_correct_shapes
():
assert_equal
(
orthogonal_mp
(
X
,
y
[:,
0
],
n_nonzero_coefs
=
5
).
shape
,
(
n_features
,))
...
...
@@ -71,7 +63,6 @@ def test_with_without_gram_tol():
def
test_unreachable_accuracy
():
check_warnings
()
# Skip if unsupported Python version
with
warnings
.
catch_warnings
(
record
=
True
)
as
w
:
warnings
.
simplefilter
(
'
always
'
)
assert_array_almost_equal
(
...
...
@@ -133,7 +124,6 @@ def test_estimator():
def
test_scaling_with_gram
():
check_warnings
()
# Skip if unsupported Python version
with
warnings
.
catch_warnings
(
record
=
True
)
as
w
:
warnings
.
simplefilter
(
'
always
'
)
# Use only 1 nonzero coef to be faster and to avoid warnings
...
...
@@ -157,7 +147,6 @@ def test_scaling_with_gram():
def
test_identical_regressors
():
check_warnings
()
# Skip if unsupported Python version
newX
=
X
.
copy
()
newX
[:,
1
]
=
newX
[:,
0
]
gamma
=
np
.
zeros
(
n_features
)
...
...
@@ -185,7 +174,6 @@ def test_swapped_regressors():
def
test_no_atoms
():
check_warnings
()
# Skip if unsupported Python version
y_empty
=
np
.
zeros_like
(
y
)
Xy_empty
=
np
.
dot
(
X
.
T
,
y_empty
)
with
warnings
.
catch_warnings
():
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment