Skip to content

Commit f1bae33

Browse files
author
mvargas33
committed
Replaced all 'convergence_threshold' with 'tol'
1 parent abf79d1 commit f1bae33

File tree

5 files changed

+23
-23
lines changed

5 files changed

+23
-23
lines changed

doc/weakly_supervised.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ are respected.
134134
>>> from metric_learn import MMC
135135
>>> mmc = MMC(random_state=42)
136136
>>> mmc.fit(tuples, y)
137-
MMC(A0='deprecated', convergence_threshold=0.001, diagonal=False,
137+
MMC(A0='deprecated', tol=0.001, diagonal=False,
138138
diagonal_c=1.0, init='auto', max_iter=100, max_proj=10000,
139139
preprocessor=None, random_state=42, verbose=False)
140140

@@ -250,7 +250,7 @@ tuples).
250250
>>> y_pairs = np.array([1, -1])
251251
>>> mmc = MMC(random_state=42)
252252
>>> mmc.fit(pairs, y_pairs)
253-
MMC(convergence_threshold=0.001, diagonal=False,
253+
MMC(tol=0.001, diagonal=False,
254254
diagonal_c=1.0, init='auto', max_iter=100, max_proj=10000, preprocessor=None,
255255
random_state=42, verbose=False)
256256

metric_learn/itml.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,12 @@ class _BaseITML(MahalanobisMixin):
1616

1717
_tuple_size = 2 # constraints are pairs
1818

19-
def __init__(self, gamma=1., max_iter=1000, convergence_threshold=1e-3,
19+
def __init__(self, gamma=1., max_iter=1000, tol=1e-3,
2020
prior='identity', verbose=False,
2121
preprocessor=None, random_state=None):
2222
self.gamma = gamma
2323
self.max_iter = max_iter
24-
self.convergence_threshold = convergence_threshold
24+
self.tol = tol
2525
self.prior = prior
2626
self.verbose = verbose
2727
self.random_state = random_state
@@ -86,7 +86,7 @@ def _fit(self, pairs, y, bounds=None):
8686
conv = np.inf
8787
break
8888
conv = np.abs(lambdaold - _lambda).sum() / normsum
89-
if conv < self.convergence_threshold:
89+
if conv < self.tol:
9090
break
9191
lambdaold = _lambda.copy()
9292
if self.verbose:
@@ -122,7 +122,7 @@ class ITML(_BaseITML, _PairsClassifierMixin):
122122
max_iter : int, optional (default=1000)
123123
Maximum number of iteration of the optimization procedure.
124124
125-
convergence_threshold : float, optional (default=1e-3)
125+
tol : float, optional (default=1e-3)
126126
Convergence tolerance.
127127
128128
prior : string or numpy array, optional (default='identity')
@@ -260,7 +260,7 @@ class ITML_Supervised(_BaseITML, TransformerMixin):
260260
max_iter : int, optional (default=1000)
261261
Maximum number of iterations of the optimization procedure.
262262
263-
convergence_threshold : float, optional (default=1e-3)
263+
tol : float, optional (default=1e-3)
264264
Tolerance of the optimization procedure.
265265
266266
n_constraints : int, optional (default=None)
@@ -338,11 +338,11 @@ class ITML_Supervised(_BaseITML, TransformerMixin):
338338
that describes the supervised version of weakly supervised estimators.
339339
"""
340340

341-
def __init__(self, gamma=1.0, max_iter=1000, convergence_threshold=1e-3,
341+
def __init__(self, gamma=1.0, max_iter=1000, tol=1e-3,
342342
n_constraints=None, prior='identity',
343343
verbose=False, preprocessor=None, random_state=None):
344344
_BaseITML.__init__(self, gamma=gamma, max_iter=max_iter,
345-
convergence_threshold=convergence_threshold,
345+
tol=tol,
346346
prior=prior, verbose=verbose,
347347
preprocessor=preprocessor, random_state=random_state)
348348
self.n_constraints = n_constraints

metric_learn/mmc.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@ class _BaseMMC(MahalanobisMixin):
1212

1313
_tuple_size = 2 # constraints are pairs
1414

15-
def __init__(self, max_iter=100, max_proj=10000, convergence_threshold=1e-3,
15+
def __init__(self, max_iter=100, max_proj=10000, tol=1e-3,
1616
init='identity', diagonal=False,
1717
diagonal_c=1.0, verbose=False, preprocessor=None,
1818
random_state=None):
1919
self.max_iter = max_iter
2020
self.max_proj = max_proj
21-
self.convergence_threshold = convergence_threshold
21+
self.tol = tol
2222
self.init = init
2323
self.diagonal = diagonal
2424
self.diagonal_c = diagonal_c
@@ -145,13 +145,13 @@ def _fit_full(self, pairs, y):
145145
A[:] = A_old + alpha * M
146146

147147
delta = np.linalg.norm(alpha * M) / np.linalg.norm(A_old)
148-
if delta < self.convergence_threshold:
148+
if delta < self.tol:
149149
break
150150
if self.verbose:
151151
print('mmc iter: %d, conv = %f, projections = %d' %
152152
(cycle, delta, it + 1))
153153

154-
if delta > self.convergence_threshold:
154+
if delta > self.tol:
155155
self.converged_ = False
156156
if self.verbose:
157157
print('mmc did not converge, conv = %f' % (delta,))
@@ -185,7 +185,7 @@ def _fit_diag(self, pairs, y):
185185
reduction = 2.0
186186
w = np.diag(self.A_).copy()
187187

188-
while error > self.convergence_threshold and it < self.max_iter:
188+
while error > self.tol and it < self.max_iter:
189189

190190
fD0, fD_1st_d, fD_2nd_d = self._D_constraint(neg_pairs, w)
191191
obj_initial = np.dot(s_sum, w) + self.diagonal_c * fD0
@@ -332,7 +332,7 @@ class MMC(_BaseMMC, _PairsClassifierMixin):
332332
max_proj : int, optional (default=10000)
333333
Maximum number of projection steps.
334334
335-
convergence_threshold : float, optional (default=1e-3)
335+
tol : float, optional (default=1e-3)
336336
Convergence threshold for the optimization procedure.
337337
338338
init : string or numpy array, optional (default='identity')
@@ -469,7 +469,7 @@ class MMC_Supervised(_BaseMMC, TransformerMixin):
469469
max_proj : int, optional (default=10000)
470470
Maximum number of projection steps.
471471
472-
convergence_threshold : float, optional (default=1e-3)
472+
tol : float, optional (default=1e-3)
473473
Convergence threshold for the optimization procedure.
474474
475475
n_constraints: int, optional (default=None)
@@ -538,12 +538,12 @@ class MMC_Supervised(_BaseMMC, TransformerMixin):
538538
metric (See function `components_from_metric`.)
539539
"""
540540

541-
def __init__(self, max_iter=100, max_proj=10000, convergence_threshold=1e-6,
541+
def __init__(self, max_iter=100, max_proj=10000, tol=1e-6,
542542
n_constraints=None, init='identity',
543543
diagonal=False, diagonal_c=1.0, verbose=False,
544544
preprocessor=None, random_state=None):
545545
_BaseMMC.__init__(self, max_iter=max_iter, max_proj=max_proj,
546-
convergence_threshold=convergence_threshold,
546+
tol=tol,
547547
init=init, diagonal=diagonal,
548548
diagonal_c=diagonal_c, verbose=verbose,
549549
preprocessor=preprocessor, random_state=random_state)

test/metric_learn_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1067,7 +1067,7 @@ def test_iris(self):
10671067

10681068
# Full metric
10691069
n_features = self.iris_points.shape[1]
1070-
mmc = MMC(convergence_threshold=0.01, init=np.eye(n_features) / 10)
1070+
mmc = MMC(tol=0.01, init=np.eye(n_features) / 10)
10711071
mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d]))
10721072
expected = [[+0.000514, +0.000868, -0.001195, -0.001703],
10731073
[+0.000868, +0.001468, -0.002021, -0.002879],

test/test_base_metric.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -65,14 +65,14 @@ def test_lfda(self):
6565
remove_spaces(f"LFDA({merged_kwargs})"))
6666

6767
def test_itml(self):
68-
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
68+
def_kwargs = {'tol': 0.001, 'gamma': 1.0,
6969
'max_iter': 1000, 'preprocessor': None,
7070
'prior': 'identity', 'random_state': None, 'verbose': False}
7171
nndef_kwargs = {'gamma': 0.5}
7272
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
7373
self.assertEqual(remove_spaces(str(metric_learn.ITML(gamma=0.5))),
7474
remove_spaces(f"ITML({merged_kwargs})"))
75-
def_kwargs = {'convergence_threshold': 0.001, 'gamma': 1.0,
75+
def_kwargs = {'tol': 0.001, 'gamma': 1.0,
7676
'max_iter': 1000, 'n_constraints': None,
7777
'preprocessor': None, 'prior': 'identity',
7878
'random_state': None, 'verbose': False}
@@ -141,15 +141,15 @@ def test_mlkr(self):
141141
remove_spaces(f"MLKR({merged_kwargs})"))
142142

143143
def test_mmc(self):
144-
def_kwargs = {'convergence_threshold': 0.001, 'diagonal': False,
144+
def_kwargs = {'tol': 0.001, 'diagonal': False,
145145
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
146146
'max_proj': 10000, 'preprocessor': None,
147147
'random_state': None, 'verbose': False}
148148
nndef_kwargs = {'diagonal': True}
149149
merged_kwargs = sk_repr_kwargs(def_kwargs, nndef_kwargs)
150150
self.assertEqual(remove_spaces(str(metric_learn.MMC(diagonal=True))),
151151
remove_spaces(f"MMC({merged_kwargs})"))
152-
def_kwargs = {'convergence_threshold': 1e-06, 'diagonal': False,
152+
def_kwargs = {'tol': 1e-06, 'diagonal': False,
153153
'diagonal_c': 1.0, 'init': 'identity', 'max_iter': 100,
154154
'max_proj': 10000, 'n_constraints': None,
155155
'preprocessor': None, 'random_state': None,

0 commit comments

Comments
 (0)