From 8875fd03b9fde98b0662f50ae8dd5c88e228b696 Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Tue, 26 Mar 2024 18:46:37 +0530 Subject: [PATCH 01/14] Moved the kron function as-is from slinalg.py to nlinalg.py --- pytensor/tensor/nlinalg.py | 45 +++++++++++++++++++++++++++++++++++++ pytensor/tensor/slinalg.py | 46 +------------------------------------- 2 files changed, 46 insertions(+), 45 deletions(-) diff --git a/pytensor/tensor/nlinalg.py b/pytensor/tensor/nlinalg.py index cf510fb065..658b371ab4 100644 --- a/pytensor/tensor/nlinalg.py +++ b/pytensor/tensor/nlinalg.py @@ -1026,3 +1026,48 @@ def tensorsolve(a, b, axes=None): "tensorinv", "tensorsolve", ] + +# Adding the kron function here, which earlier used to be in slinalg.py +def kron(a, b): + """Kronecker product. + + Same as scipy.linalg.kron(a, b). + + Parameters + ---------- + a: array_like + b: array_like + + Returns + ------- + array_like with a.ndim + b.ndim - 2 dimensions + + Notes + ----- + numpy.kron(a, b) != scipy.linalg.kron(a, b)! + They don't have the same shape and order when + a.ndim != b.ndim != 2. + + """ + a = as_tensor_variable(a) + b = as_tensor_variable(b) + if a.ndim + b.ndim <= 2: + raise TypeError( + "kron: inputs dimensions must sum to 3 or more. " + f"You passed {int(a.ndim)} and {int(b.ndim)}." + ) + o = ptm.outer(a, b) + o = o.reshape(ptb.concatenate((a.shape, b.shape)), ndim=a.ndim + b.ndim) + shf = o.dimshuffle(0, 2, 1, *range(3, o.ndim)) + if shf.ndim == 3: + shf = o.dimshuffle(1, 0, 2) + o = shf.flatten() + else: + o = shf.reshape( + ( + o.shape[0] * o.shape[2], + o.shape[1] * o.shape[3], + *(o.shape[i] for i in range(4, o.ndim)), + ) + ) + return o \ No newline at end of file diff --git a/pytensor/tensor/slinalg.py b/pytensor/tensor/slinalg.py index c162902c18..e8594e1aba 100644 --- a/pytensor/tensor/slinalg.py +++ b/pytensor/tensor/slinalg.py @@ -558,51 +558,7 @@ def infer_shape(self, fgraph, node, shapes): def eigvalsh(a, b, lower=True): return Eigvalsh(lower)(a, b) - -def kron(a, b): - """Kronecker product. - - Same as scipy.linalg.kron(a, b). - - Parameters - ---------- - a: array_like - b: array_like - - Returns - ------- - array_like with a.ndim + b.ndim - 2 dimensions - - Notes - ----- - numpy.kron(a, b) != scipy.linalg.kron(a, b)! - They don't have the same shape and order when - a.ndim != b.ndim != 2. - - """ - a = as_tensor_variable(a) - b = as_tensor_variable(b) - if a.ndim + b.ndim <= 2: - raise TypeError( - "kron: inputs dimensions must sum to 3 or more. " - f"You passed {int(a.ndim)} and {int(b.ndim)}." - ) - o = ptm.outer(a, b) - o = o.reshape(ptb.concatenate((a.shape, b.shape)), ndim=a.ndim + b.ndim) - shf = o.dimshuffle(0, 2, 1, *range(3, o.ndim)) - if shf.ndim == 3: - shf = o.dimshuffle(1, 0, 2) - o = shf.flatten() - else: - o = shf.reshape( - ( - o.shape[0] * o.shape[2], - o.shape[1] * o.shape[3], - *(o.shape[i] for i in range(4, o.ndim)), - ) - ) - return o - +# Removed kron function from here and moved to nlinalg.py class Expm(Op): """ From 9eb50a6a009afedc6ddb07fa61e2f68a3542aa9a Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 02:50:46 +0530 Subject: [PATCH 02/14] Updated the kron function to use JAX implementation and moved tests to the nlinalg file --- pytensor/tensor/nlinalg.py | 31 +++++++++++++------------- tests/tensor/test_nlinalg.py | 40 ++++++++++++++++++++++++++++++++++ tests/tensor/test_slinalg.py | 42 ------------------------------------ 3 files changed, 55 insertions(+), 58 deletions(-) diff --git a/pytensor/tensor/nlinalg.py b/pytensor/tensor/nlinalg.py index 658b371ab4..3c871668dc 100644 --- a/pytensor/tensor/nlinalg.py +++ b/pytensor/tensor/nlinalg.py @@ -9,6 +9,7 @@ from pytensor.gradient import DisconnectedType from pytensor.graph.basic import Apply from pytensor.graph.op import Op +from pytensor.tensor import reshape from pytensor.tensor import basic as ptb from pytensor.tensor import math as ptm from pytensor.tensor.basic import as_tensor_variable, diagonal @@ -1031,7 +1032,7 @@ def tensorsolve(a, b, axes=None): def kron(a, b): """Kronecker product. - Same as scipy.linalg.kron(a, b). + Uses the JAX implementation for kron. Parameters ---------- @@ -1048,6 +1049,8 @@ def kron(a, b): They don't have the same shape and order when a.ndim != b.ndim != 2. + This new function now works for ndim > 2 + """ a = as_tensor_variable(a) b = as_tensor_variable(b) @@ -1056,18 +1059,14 @@ def kron(a, b): "kron: inputs dimensions must sum to 3 or more. " f"You passed {int(a.ndim)} and {int(b.ndim)}." ) - o = ptm.outer(a, b) - o = o.reshape(ptb.concatenate((a.shape, b.shape)), ndim=a.ndim + b.ndim) - shf = o.dimshuffle(0, 2, 1, *range(3, o.ndim)) - if shf.ndim == 3: - shf = o.dimshuffle(1, 0, 2) - o = shf.flatten() - else: - o = shf.reshape( - ( - o.shape[0] * o.shape[2], - o.shape[1] * o.shape[3], - *(o.shape[i] for i in range(4, o.ndim)), - ) - ) - return o \ No newline at end of file + + if (a.ndim < b.ndim): + a = ptb.expand_dims(a, tuple(range(b.ndim - a.ndim))) + elif (b.ndim < a.ndim): + b = ptb.expand_dims(b, tuple(range(a.ndim - b.ndim))) + a_reshaped = ptb.expand_dims(a, tuple(range(1, 2*a.ndim,2))) + b_reshaped = ptb.expand_dims(b, tuple(range(0, 2*b.ndim,2))) + out_shape = tuple(a.shape * b.shape) + output_out_of_shape = a_reshaped * b_reshaped + output_reshaped = reshape(output_out_of_shape, out_shape) + return output_reshaped diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index d39ab0b777..278c274e1c 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -29,6 +29,7 @@ tensorinv, tensorsolve, trace, + kron, ) from pytensor.tensor.type import ( lmatrix, @@ -580,3 +581,42 @@ def test_eval(self): t_binv1 = tf_b1(self.b1) assert _allclose(t_binv, n_binv) assert _allclose(t_binv1, n_binv1) + +class TestKron(utt.InferShapeTester): + rng = np.random.default_rng(43) + + def setup_method(self): + self.op = kron + super().setup_method() + + def test_perform(self): + for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]: + x = tensor(dtype="floatX", shape=(None,) * len(shp0)) + a = np.asarray(self.rng.random(shp0)).astype(config.floatX) + for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]: + if len(shp0) + len(shp1) == 2: + continue + y = tensor(dtype="floatX", shape=(None,) * len(shp1)) + f = function([x, y], kron(x, y)) + b = self.rng.random(shp1).astype(config.floatX) + out = f(a, b) + # Newer versions of scipy want 4 dimensions at least, + # so we have to add a dimension to a and flatten the result. + if len(shp0) + len(shp1) == 3: + scipy_val = scipy.linalg.kron(a[np.newaxis, :], b).flatten() + else: + scipy_val = scipy.linalg.kron(a, b) + np.testing.assert_allclose(out, scipy_val) + + def test_numpy_2d(self): + for shp0 in [(2, 3)]: + x = tensor(dtype="floatX", shape=(None,) * len(shp0)) + a = np.asarray(self.rng.random(shp0)).astype(config.floatX) + for shp1 in [(6, 7)]: + if len(shp0) + len(shp1) == 2: + continue + y = tensor(dtype="floatX", shape=(None,) * len(shp1)) + f = function([x, y], kron(x, y)) + b = self.rng.random(shp1).astype(config.floatX) + out = f(a, b) + assert np.allclose(out, np.kron(a, b)) \ No newline at end of file diff --git a/tests/tensor/test_slinalg.py b/tests/tensor/test_slinalg.py index a2cc3c52e8..ebfe029410 100644 --- a/tests/tensor/test_slinalg.py +++ b/tests/tensor/test_slinalg.py @@ -20,7 +20,6 @@ cholesky, eigvalsh, expm, - kron, solve, solve_continuous_lyapunov, solve_discrete_are, @@ -511,47 +510,6 @@ def test_expm_grad_3(): utt.verify_grad(expm, [A], rng=rng) - -class TestKron(utt.InferShapeTester): - rng = np.random.default_rng(43) - - def setup_method(self): - self.op = kron - super().setup_method() - - def test_perform(self): - for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]: - x = tensor(dtype="floatX", shape=(None,) * len(shp0)) - a = np.asarray(self.rng.random(shp0)).astype(config.floatX) - for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]: - if len(shp0) + len(shp1) == 2: - continue - y = tensor(dtype="floatX", shape=(None,) * len(shp1)) - f = function([x, y], kron(x, y)) - b = self.rng.random(shp1).astype(config.floatX) - out = f(a, b) - # Newer versions of scipy want 4 dimensions at least, - # so we have to add a dimension to a and flatten the result. - if len(shp0) + len(shp1) == 3: - scipy_val = scipy.linalg.kron(a[np.newaxis, :], b).flatten() - else: - scipy_val = scipy.linalg.kron(a, b) - np.testing.assert_allclose(out, scipy_val) - - def test_numpy_2d(self): - for shp0 in [(2, 3)]: - x = tensor(dtype="floatX", shape=(None,) * len(shp0)) - a = np.asarray(self.rng.random(shp0)).astype(config.floatX) - for shp1 in [(6, 7)]: - if len(shp0) + len(shp1) == 2: - continue - y = tensor(dtype="floatX", shape=(None,) * len(shp1)) - f = function([x, y], kron(x, y)) - b = self.rng.random(shp1).astype(config.floatX) - out = f(a, b) - assert np.allclose(out, np.kron(a, b)) - - def test_solve_discrete_lyapunov_via_direct_real(): N = 5 rng = np.random.default_rng(utt.fetch_seed()) From be52b59bf635803f4acd3779d0ec48949a40aaca Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 04:00:06 +0530 Subject: [PATCH 03/14] Updated test for kron --- pytensor/tensor/nlinalg.py | 15 +++++++-------- tests/tensor/test_nlinalg.py | 15 ++++++--------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/pytensor/tensor/nlinalg.py b/pytensor/tensor/nlinalg.py index 3c871668dc..af2dc75538 100644 --- a/pytensor/tensor/nlinalg.py +++ b/pytensor/tensor/nlinalg.py @@ -9,9 +9,9 @@ from pytensor.gradient import DisconnectedType from pytensor.graph.basic import Apply from pytensor.graph.op import Op -from pytensor.tensor import reshape from pytensor.tensor import basic as ptb from pytensor.tensor import math as ptm +from pytensor.tensor import reshape from pytensor.tensor.basic import as_tensor_variable, diagonal from pytensor.tensor.blockwise import Blockwise from pytensor.tensor.type import dvector, lscalar, matrix, scalar, vector @@ -1028,11 +1028,12 @@ def tensorsolve(a, b, axes=None): "tensorsolve", ] + # Adding the kron function here, which earlier used to be in slinalg.py def kron(a, b): """Kronecker product. - Uses the JAX implementation for kron. + Same as np.kron(a, b) Parameters ---------- @@ -1049,8 +1050,6 @@ def kron(a, b): They don't have the same shape and order when a.ndim != b.ndim != 2. - This new function now works for ndim > 2 - """ a = as_tensor_variable(a) b = as_tensor_variable(b) @@ -1060,12 +1059,12 @@ def kron(a, b): f"You passed {int(a.ndim)} and {int(b.ndim)}." ) - if (a.ndim < b.ndim): + if a.ndim < b.ndim: a = ptb.expand_dims(a, tuple(range(b.ndim - a.ndim))) - elif (b.ndim < a.ndim): + elif b.ndim < a.ndim: b = ptb.expand_dims(b, tuple(range(a.ndim - b.ndim))) - a_reshaped = ptb.expand_dims(a, tuple(range(1, 2*a.ndim,2))) - b_reshaped = ptb.expand_dims(b, tuple(range(0, 2*b.ndim,2))) + a_reshaped = ptb.expand_dims(a, tuple(range(1, 2 * a.ndim, 2))) + b_reshaped = ptb.expand_dims(b, tuple(range(0, 2 * b.ndim, 2))) out_shape = tuple(a.shape * b.shape) output_out_of_shape = a_reshaped * b_reshaped output_reshaped = reshape(output_out_of_shape, out_shape) diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index 278c274e1c..fb827eab36 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -17,6 +17,7 @@ det, eig, eigh, + kron, lstsq, matrix_dot, matrix_inverse, @@ -29,7 +30,6 @@ tensorinv, tensorsolve, trace, - kron, ) from pytensor.tensor.type import ( lmatrix, @@ -582,6 +582,7 @@ def test_eval(self): assert _allclose(t_binv, n_binv) assert _allclose(t_binv1, n_binv1) + class TestKron(utt.InferShapeTester): rng = np.random.default_rng(43) @@ -600,13 +601,9 @@ def test_perform(self): f = function([x, y], kron(x, y)) b = self.rng.random(shp1).astype(config.floatX) out = f(a, b) - # Newer versions of scipy want 4 dimensions at least, - # so we have to add a dimension to a and flatten the result. - if len(shp0) + len(shp1) == 3: - scipy_val = scipy.linalg.kron(a[np.newaxis, :], b).flatten() - else: - scipy_val = scipy.linalg.kron(a, b) - np.testing.assert_allclose(out, scipy_val) + # Using the np.kron to compare outputs + np_val = np.kron(a, b) + np.testing.assert_allclose(out, np_val) def test_numpy_2d(self): for shp0 in [(2, 3)]: @@ -619,4 +616,4 @@ def test_numpy_2d(self): f = function([x, y], kron(x, y)) b = self.rng.random(shp1).astype(config.floatX) out = f(a, b) - assert np.allclose(out, np.kron(a, b)) \ No newline at end of file + assert np.allclose(out, np.kron(a, b)) From 52ed8c3b95d42f6687228c0619a70fdb770344e7 Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 04:05:01 +0530 Subject: [PATCH 04/14] Updated slinalg to use the new kron implementation --- pytensor/tensor/slinalg.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pytensor/tensor/slinalg.py b/pytensor/tensor/slinalg.py index e8594e1aba..4bc166e227 100644 --- a/pytensor/tensor/slinalg.py +++ b/pytensor/tensor/slinalg.py @@ -15,7 +15,7 @@ from pytensor.tensor import basic as ptb from pytensor.tensor import math as ptm from pytensor.tensor.blockwise import Blockwise -from pytensor.tensor.nlinalg import matrix_dot +from pytensor.tensor.nlinalg import kron, matrix_dot from pytensor.tensor.shape import reshape from pytensor.tensor.type import matrix, tensor, vector from pytensor.tensor.variable import TensorVariable @@ -558,8 +558,10 @@ def infer_shape(self, fgraph, node, shapes): def eigvalsh(a, b, lower=True): return Eigvalsh(lower)(a, b) + # Removed kron function from here and moved to nlinalg.py + class Expm(Op): """ Compute the matrix exponential of a square array. From 1611f9a381fff04a458a953ff2efeb8546a16e19 Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 04:21:43 +0530 Subject: [PATCH 05/14] running all pre commit checks --- tests/tensor/test_slinalg.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/tensor/test_slinalg.py b/tests/tensor/test_slinalg.py index ebfe029410..d39c370ed3 100644 --- a/tests/tensor/test_slinalg.py +++ b/tests/tensor/test_slinalg.py @@ -510,6 +510,7 @@ def test_expm_grad_3(): utt.verify_grad(expm, [A], rng=rng) + def test_solve_discrete_lyapunov_via_direct_real(): N = 5 rng = np.random.default_rng(utt.fetch_seed()) From 5aef0973164029f73d93deb5f2c5eef45ff3b6c0 Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 04:32:39 +0530 Subject: [PATCH 06/14] removed importing reshape --- pytensor/tensor/nlinalg.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pytensor/tensor/nlinalg.py b/pytensor/tensor/nlinalg.py index af2dc75538..347cbc2024 100644 --- a/pytensor/tensor/nlinalg.py +++ b/pytensor/tensor/nlinalg.py @@ -11,7 +11,6 @@ from pytensor.graph.op import Op from pytensor.tensor import basic as ptb from pytensor.tensor import math as ptm -from pytensor.tensor import reshape from pytensor.tensor.basic import as_tensor_variable, diagonal from pytensor.tensor.blockwise import Blockwise from pytensor.tensor.type import dvector, lscalar, matrix, scalar, vector @@ -1067,5 +1066,5 @@ def kron(a, b): b_reshaped = ptb.expand_dims(b, tuple(range(0, 2 * b.ndim, 2))) out_shape = tuple(a.shape * b.shape) output_out_of_shape = a_reshaped * b_reshaped - output_reshaped = reshape(output_out_of_shape, out_shape) + output_reshaped = output_out_of_shape.reshape(out_shape) return output_reshaped From 5299b75905ba1327ea072dd951297e73ce263f40 Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 05:05:50 +0530 Subject: [PATCH 07/14] added test for commute with inv --- pytensor/tensor/nlinalg.py | 45 +++++++++++++++--------------------- pytensor/tensor/slinalg.py | 4 ---- tests/tensor/test_nlinalg.py | 13 +++++++---- 3 files changed, 27 insertions(+), 35 deletions(-) diff --git a/pytensor/tensor/nlinalg.py b/pytensor/tensor/nlinalg.py index 347cbc2024..0252423926 100644 --- a/pytensor/tensor/nlinalg.py +++ b/pytensor/tensor/nlinalg.py @@ -1010,25 +1010,6 @@ def tensorsolve(a, b, axes=None): return TensorSolve(axes)(a, b) -__all__ = [ - "pinv", - "inv", - "trace", - "matrix_dot", - "det", - "eig", - "eigh", - "qr", - "svd", - "lstsq", - "matrix_power", - "norm", - "tensorinv", - "tensorsolve", -] - - -# Adding the kron function here, which earlier used to be in slinalg.py def kron(a, b): """Kronecker product. @@ -1042,13 +1023,6 @@ def kron(a, b): Returns ------- array_like with a.ndim + b.ndim - 2 dimensions - - Notes - ----- - numpy.kron(a, b) != scipy.linalg.kron(a, b)! - They don't have the same shape and order when - a.ndim != b.ndim != 2. - """ a = as_tensor_variable(a) b = as_tensor_variable(b) @@ -1068,3 +1042,22 @@ def kron(a, b): output_out_of_shape = a_reshaped * b_reshaped output_reshaped = output_out_of_shape.reshape(out_shape) return output_reshaped + + +__all__ = [ + "pinv", + "inv", + "trace", + "matrix_dot", + "det", + "eig", + "eigh", + "qr", + "svd", + "lstsq", + "matrix_power", + "norm", + "tensorinv", + "tensorsolve", + "kron", +] diff --git a/pytensor/tensor/slinalg.py b/pytensor/tensor/slinalg.py index 4bc166e227..2d032e220e 100644 --- a/pytensor/tensor/slinalg.py +++ b/pytensor/tensor/slinalg.py @@ -559,9 +559,6 @@ def eigvalsh(a, b, lower=True): return Eigvalsh(lower)(a, b) -# Removed kron function from here and moved to nlinalg.py - - class Expm(Op): """ Compute the matrix exponential of a square array. @@ -979,7 +976,6 @@ def block_diag(*matrices: TensorVariable): "cholesky", "solve", "eigvalsh", - "kron", "expm", "solve_discrete_lyapunov", "solve_continuous_lyapunov", diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index fb827eab36..ef96e55c07 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -605,15 +605,18 @@ def test_perform(self): np_val = np.kron(a, b) np.testing.assert_allclose(out, np_val) - def test_numpy_2d(self): - for shp0 in [(2, 3)]: + def test_kron_commutes_with_inv(self): + for shp0 in [(2, 3), (2, 4, 3)]: x = tensor(dtype="floatX", shape=(None,) * len(shp0)) a = np.asarray(self.rng.random(shp0)).astype(config.floatX) - for shp1 in [(6, 7)]: + for shp1 in [(6, 7), (4, 3, 5)]: + if len(shp0) != len(shp1): + continue if len(shp0) + len(shp1) == 2: continue y = tensor(dtype="floatX", shape=(None,) * len(shp1)) f = function([x, y], kron(x, y)) b = self.rng.random(shp1).astype(config.floatX) - out = f(a, b) - assert np.allclose(out, np.kron(a, b)) + lhs = matrix_inverse(f(a, b)) + rhs = f(matrix_inverse(a), matrix_inverse(b)) + assert np.allclose(lhs, rhs) From 3e0c79ca3f6f7ca13c9cf448602eec24c8bd2ab9 Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 15:56:30 +0530 Subject: [PATCH 08/14] updated test --- tests/tensor/test_nlinalg.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index ef96e55c07..3c7f7b99cc 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -606,17 +606,13 @@ def test_perform(self): np.testing.assert_allclose(out, np_val) def test_kron_commutes_with_inv(self): - for shp0 in [(2, 3), (2, 4, 3)]: + for shp0, shp1 in zip( + [(2, 3), (2, 3), (2, 4, 3)], [(6, 7), (4, 3, 5), (4, 3, 5)] + ): x = tensor(dtype="floatX", shape=(None,) * len(shp0)) a = np.asarray(self.rng.random(shp0)).astype(config.floatX) - for shp1 in [(6, 7), (4, 3, 5)]: - if len(shp0) != len(shp1): - continue - if len(shp0) + len(shp1) == 2: - continue - y = tensor(dtype="floatX", shape=(None,) * len(shp1)) - f = function([x, y], kron(x, y)) - b = self.rng.random(shp1).astype(config.floatX) - lhs = matrix_inverse(f(a, b)) - rhs = f(matrix_inverse(a), matrix_inverse(b)) - assert np.allclose(lhs, rhs) + y = tensor(dtype="floatX", shape=(None,) * len(shp1)) + b = self.rng.random(shp1).astype(config.floatX) + lhs_f = function([x, y], matrix_inverse(kron(x, y))) + rhs_f = function([x, y], kron(matrix_inverse(x), matrix_inverse(y))) + assert np.testing.assert_allclose(lhs_f(a, b), rhs_f(a, b)) From a7902fd08b224f1830ae118e03be80a1b75f1a46 Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 17:06:19 +0530 Subject: [PATCH 09/14] working test for commute_with_inv --- tests/tensor/test_nlinalg.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index 3c7f7b99cc..93bd46afbf 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -613,6 +613,6 @@ def test_kron_commutes_with_inv(self): a = np.asarray(self.rng.random(shp0)).astype(config.floatX) y = tensor(dtype="floatX", shape=(None,) * len(shp1)) b = self.rng.random(shp1).astype(config.floatX) - lhs_f = function([x, y], matrix_inverse(kron(x, y))) - rhs_f = function([x, y], kron(matrix_inverse(x), matrix_inverse(y))) - assert np.testing.assert_allclose(lhs_f(a, b), rhs_f(a, b)) + lhs_f = function([x, y], pinv(kron(x, y))) + rhs_f = function([x, y], kron(pinv(x), pinv(y))) + np.testing.assert_allclose(lhs_f(a, b), rhs_f(a, b)) From e3b1aaa8cb14c3b005b5f2e1b49210d178ddc7bc Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 17:58:28 +0530 Subject: [PATCH 10/14] working test for commute_with_inv --- tests/tensor/test_nlinalg.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index 93bd46afbf..2e8fe05e97 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -609,10 +609,13 @@ def test_kron_commutes_with_inv(self): for shp0, shp1 in zip( [(2, 3), (2, 3), (2, 4, 3)], [(6, 7), (4, 3, 5), (4, 3, 5)] ): + if len(shp0) == 3 or len(shp1) == 3: + continue x = tensor(dtype="floatX", shape=(None,) * len(shp0)) a = np.asarray(self.rng.random(shp0)).astype(config.floatX) y = tensor(dtype="floatX", shape=(None,) * len(shp1)) b = self.rng.random(shp1).astype(config.floatX) lhs_f = function([x, y], pinv(kron(x, y))) rhs_f = function([x, y], kron(pinv(x), pinv(y))) - np.testing.assert_allclose(lhs_f(a, b), rhs_f(a, b)) + atol = 1e-4 if config.floatX == "float32" else 1e-12 + np.testing.assert_allclose(lhs_f(a, b), rhs_f(a, b), atol=atol) From 3ea1cdb64e640e3b654c5d18aa02321289db366f Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 18:03:56 +0530 Subject: [PATCH 11/14] skipped some half precision tests --- tests/tensor/test_nlinalg.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index 2e8fe05e97..36df7ca25b 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -609,8 +609,10 @@ def test_kron_commutes_with_inv(self): for shp0, shp1 in zip( [(2, 3), (2, 3), (2, 4, 3)], [(6, 7), (4, 3, 5), (4, 3, 5)] ): - if len(shp0) == 3 or len(shp1) == 3: - continue + if (pytensor.config.floatX == "float32") & ( + len(shp0) == 3 or len(shp1) == 3 + ): + pytest.skip("Kron and inv do not commute at half precision") x = tensor(dtype="floatX", shape=(None,) * len(shp0)) a = np.asarray(self.rng.random(shp0)).astype(config.floatX) y = tensor(dtype="floatX", shape=(None,) * len(shp1)) From 7fa03cf27b9d330213769f952aab0494b065c82b Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 18:07:56 +0530 Subject: [PATCH 12/14] skipped some half precision tests --- tests/tensor/test_nlinalg.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index 36df7ca25b..0812c755f2 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -606,12 +606,10 @@ def test_perform(self): np.testing.assert_allclose(out, np_val) def test_kron_commutes_with_inv(self): - for shp0, shp1 in zip( - [(2, 3), (2, 3), (2, 4, 3)], [(6, 7), (4, 3, 5), (4, 3, 5)] + for i, (shp0, shp1) in enumerate( + zip([(2, 3), (2, 3), (2, 4, 3)], [(6, 7), (4, 3, 5), (4, 3, 5)]) ): - if (pytensor.config.floatX == "float32") & ( - len(shp0) == 3 or len(shp1) == 3 - ): + if (pytensor.config.floatX == "float32") & (i == 2): pytest.skip("Kron and inv do not commute at half precision") x = tensor(dtype="floatX", shape=(None,) * len(shp0)) a = np.asarray(self.rng.random(shp0)).astype(config.floatX) From 510449479783e0293c8b5994a9a38d68c7069b33 Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Wed, 27 Mar 2024 18:11:35 +0530 Subject: [PATCH 13/14] updated comment --- tests/tensor/test_nlinalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index 0812c755f2..af4b1f2c5f 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -610,7 +610,7 @@ def test_kron_commutes_with_inv(self): zip([(2, 3), (2, 3), (2, 4, 3)], [(6, 7), (4, 3, 5), (4, 3, 5)]) ): if (pytensor.config.floatX == "float32") & (i == 2): - pytest.skip("Kron and inv do not commute at half precision") + pytest.skip("Half precision insufficient for test 3 to pass") x = tensor(dtype="floatX", shape=(None,) * len(shp0)) a = np.asarray(self.rng.random(shp0)).astype(config.floatX) y = tensor(dtype="floatX", shape=(None,) * len(shp1)) From 35996a5f3b4869f58e8716043f36758341db114c Mon Sep 17 00:00:00 2001 From: Tanish Taneja Date: Thu, 28 Mar 2024 00:23:00 +0530 Subject: [PATCH 14/14] Parametrized the tests --- tests/tensor/test_nlinalg.py | 59 ++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py index af4b1f2c5f..d10242d388 100644 --- a/tests/tensor/test_nlinalg.py +++ b/tests/tensor/test_nlinalg.py @@ -590,32 +590,33 @@ def setup_method(self): self.op = kron super().setup_method() - def test_perform(self): - for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]: - x = tensor(dtype="floatX", shape=(None,) * len(shp0)) - a = np.asarray(self.rng.random(shp0)).astype(config.floatX) - for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]: - if len(shp0) + len(shp1) == 2: - continue - y = tensor(dtype="floatX", shape=(None,) * len(shp1)) - f = function([x, y], kron(x, y)) - b = self.rng.random(shp1).astype(config.floatX) - out = f(a, b) - # Using the np.kron to compare outputs - np_val = np.kron(a, b) - np.testing.assert_allclose(out, np_val) - - def test_kron_commutes_with_inv(self): - for i, (shp0, shp1) in enumerate( - zip([(2, 3), (2, 3), (2, 4, 3)], [(6, 7), (4, 3, 5), (4, 3, 5)]) - ): - if (pytensor.config.floatX == "float32") & (i == 2): - pytest.skip("Half precision insufficient for test 3 to pass") - x = tensor(dtype="floatX", shape=(None,) * len(shp0)) - a = np.asarray(self.rng.random(shp0)).astype(config.floatX) - y = tensor(dtype="floatX", shape=(None,) * len(shp1)) - b = self.rng.random(shp1).astype(config.floatX) - lhs_f = function([x, y], pinv(kron(x, y))) - rhs_f = function([x, y], kron(pinv(x), pinv(y))) - atol = 1e-4 if config.floatX == "float32" else 1e-12 - np.testing.assert_allclose(lhs_f(a, b), rhs_f(a, b), atol=atol) + @pytest.mark.parametrize("shp0", [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]) + @pytest.mark.parametrize("shp1", [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]) + def test_perform(self, shp0, shp1): + if len(shp0) + len(shp1) == 2: + pytest.skip("Sum of shp0 and shp1 must be more than 2") + x = tensor(dtype="floatX", shape=(None,) * len(shp0)) + a = np.asarray(self.rng.random(shp0)).astype(config.floatX) + y = tensor(dtype="floatX", shape=(None,) * len(shp1)) + f = function([x, y], kron(x, y)) + b = self.rng.random(shp1).astype(config.floatX) + out = f(a, b) + # Using the np.kron to compare outputs + np_val = np.kron(a, b) + np.testing.assert_allclose(out, np_val) + + @pytest.mark.parametrize( + "i, shp0, shp1", + [(0, (2, 3), (6, 7)), (1, (2, 3), (4, 3, 5)), (2, (2, 4, 3), (4, 3, 5))], + ) + def test_kron_commutes_with_inv(self, i, shp0, shp1): + if (pytensor.config.floatX == "float32") & (i == 2): + pytest.skip("Half precision insufficient for test 3 to pass") + x = tensor(dtype="floatX", shape=(None,) * len(shp0)) + a = np.asarray(self.rng.random(shp0)).astype(config.floatX) + y = tensor(dtype="floatX", shape=(None,) * len(shp1)) + b = self.rng.random(shp1).astype(config.floatX) + lhs_f = function([x, y], pinv(kron(x, y))) + rhs_f = function([x, y], kron(pinv(x), pinv(y))) + atol = 1e-4 if config.floatX == "float32" else 1e-12 + np.testing.assert_allclose(lhs_f(a, b), rhs_f(a, b), atol=atol)