Skip to content

Numpy 2.0 #689

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 12 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ channels:
dependencies:
- python>=3.10
- compilers
- numpy>=1.17.0
- conda-forge/label/numpy_dev::numpy=2.0.0rc1
- scipy>=0.14
- filelock
- etuples
Expand Down
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
requires = [
"setuptools>=48.0.0",
"cython",
"numpy>=1.17.0",
"numpy>=2.0.0rc1",
"versioneer[toml]>=0.28",
]
build-backend = "setuptools.build_meta"
Expand Down Expand Up @@ -48,7 +48,7 @@ keywords = [
dependencies = [
"setuptools>=48.0.0",
"scipy>=0.14",
"numpy>=1.17.0",
"numpy>=2.0.0rc1",
"filelock",
"etuples",
"logical-unification",
Expand Down Expand Up @@ -125,7 +125,7 @@ line-length = 88
exclude = ["doc/", "pytensor/_version.py", "bin/pytensor_cache.py"]

[tool.ruff.lint]
select = ["C", "E", "F", "I", "UP", "W", "RUF"]
select = ["C", "E", "F", "I", "UP", "W", "RUF", "NPY201"]
ignore = ["C408", "C901", "E501", "E741", "RUF012"]


Expand Down
4 changes: 2 additions & 2 deletions pytensor/link/c/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1371,8 +1371,8 @@ def cmodule_key_(

# We must always add the numpy ABI version here as
# DynamicModule always add the include <numpy/arrayobject.h>
if np.lib.NumpyVersion(np.__version__) < "1.16.0a":
ndarray_c_version = np.core.multiarray._get_ndarray_c_version()
if np.lib.NumpyVersion(np.__version__) >= "2.0.0rc":
ndarray_c_version = np._core._multiarray_umath._get_ndarray_c_version()
else:
ndarray_c_version = np.core._multiarray_umath._get_ndarray_c_version()
sig.append(f"NPY_ABI_VERSION=0x{ndarray_c_version:X}")
Expand Down
195 changes: 18 additions & 177 deletions pytensor/scalar/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,8 +279,6 @@ class ScalarType(CType, HasDataType, HasShape):
Analogous to TensorType, but for zero-dimensional objects.
Maps directly to C primitives.

TODO: refactor to be named ScalarType for consistency with TensorType.

"""

__props__ = ("dtype",)
Expand Down Expand Up @@ -350,11 +348,14 @@ def c_element_type(self):
return self.dtype_specs()[1]

def c_headers(self, c_compiler=None, **kwargs):
l = ["<math.h>"]
# These includes are needed by ScalarType and TensorType,
# we declare them here and they will be re-used by TensorType
l.append("<numpy/arrayobject.h>")
l.append("<numpy/arrayscalars.h>")
l = [
"<math.h>",
# These includes are needed by ScalarType and TensorType,
# we declare them here and they will be re-used by TensorType
"<numpy/arrayobject.h>",
"<numpy/arrayscalars.h>",
"<numpy/npy_2_complexcompat.h>",
]
if config.lib__amblibm and c_compiler.supports_amdlibm:
l += ["<amdlibm.h>"]
return l
Expand Down Expand Up @@ -396,8 +397,8 @@ def dtype_specs(self):
"float16": (np.float16, "npy_float16", "Float16"),
"float32": (np.float32, "npy_float32", "Float32"),
"float64": (np.float64, "npy_float64", "Float64"),
"complex128": (np.complex128, "pytensor_complex128", "Complex128"),
"complex64": (np.complex64, "pytensor_complex64", "Complex64"),
"complex128": (np.complex128, "npy_complex128", "Complex128"),
"complex64": (np.complex64, "npy_complex64", "Complex64"),
"bool": (np.bool_, "npy_bool", "Bool"),
"uint8": (np.uint8, "npy_uint8", "UInt8"),
"int8": (np.int8, "npy_int8", "Int8"),
Expand Down Expand Up @@ -506,171 +507,11 @@ def c_sync(self, name, sub):
def c_cleanup(self, name, sub):
return ""

def c_support_code(self, **kwargs):
if self.dtype.startswith("complex"):
cplx_types = ["pytensor_complex64", "pytensor_complex128"]
real_types = [
"npy_int8",
"npy_int16",
"npy_int32",
"npy_int64",
"npy_float32",
"npy_float64",
]
# If the 'int' C type is not exactly the same as an existing
# 'npy_intX', some C code may not compile, e.g. when assigning
# the value 0 (cast to 'int' in C) to an PyTensor_complex64.
if np.dtype("intc").num not in [np.dtype(d[4:]).num for d in real_types]:
# In that case we add the 'int' type to the real types.
real_types.append("int")

template = """
struct pytensor_complex%(nbits)s : public npy_complex%(nbits)s
{
typedef pytensor_complex%(nbits)s complex_type;
typedef npy_float%(half_nbits)s scalar_type;

complex_type operator +(const complex_type &y) const {
complex_type ret;
ret.real = this->real + y.real;
ret.imag = this->imag + y.imag;
return ret;
}

complex_type operator -() const {
complex_type ret;
ret.real = -this->real;
ret.imag = -this->imag;
return ret;
}
bool operator ==(const complex_type &y) const {
return (this->real == y.real) && (this->imag == y.imag);
}
bool operator ==(const scalar_type &y) const {
return (this->real == y) && (this->imag == 0);
}
complex_type operator -(const complex_type &y) const {
complex_type ret;
ret.real = this->real - y.real;
ret.imag = this->imag - y.imag;
return ret;
}
complex_type operator *(const complex_type &y) const {
complex_type ret;
ret.real = this->real * y.real - this->imag * y.imag;
ret.imag = this->real * y.imag + this->imag * y.real;
return ret;
}
complex_type operator /(const complex_type &y) const {
complex_type ret;
scalar_type y_norm_square = y.real * y.real + y.imag * y.imag;
ret.real = (this->real * y.real + this->imag * y.imag) / y_norm_square;
ret.imag = (this->imag * y.real - this->real * y.imag) / y_norm_square;
return ret;
}
template <typename T>
complex_type& operator =(const T& y);

pytensor_complex%(nbits)s() {}

template <typename T>
pytensor_complex%(nbits)s(const T& y) { *this = y; }

template <typename TR, typename TI>
pytensor_complex%(nbits)s(const TR& r, const TI& i) { this->real=r; this->imag=i; }
};
"""

def operator_eq_real(mytype, othertype):
return f"""
template <> {mytype} & {mytype}::operator=<{othertype}>(const {othertype} & y)
{{ this->real=y; this->imag=0; return *this; }}
"""

def operator_eq_cplx(mytype, othertype):
return f"""
template <> {mytype} & {mytype}::operator=<{othertype}>(const {othertype} & y)
{{ this->real=y.real; this->imag=y.imag; return *this; }}
"""

operator_eq = "".join(
operator_eq_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types
) + "".join(
operator_eq_cplx(ctype1, ctype2)
for ctype1 in cplx_types
for ctype2 in cplx_types
)

# We are not using C++ generic templating here, because this would
# generate two different functions for adding a complex64 and a
# complex128, one returning a complex64, the other a complex128,
# and the compiler complains it is ambiguous.
# Instead, we generate code for known and safe types only.

def operator_plus_real(mytype, othertype):
return f"""
const {mytype} operator+(const {mytype} &x, const {othertype} &y)
{{ return {mytype}(x.real+y, x.imag); }}

const {mytype} operator+(const {othertype} &y, const {mytype} &x)
{{ return {mytype}(x.real+y, x.imag); }}
"""

operator_plus = "".join(
operator_plus_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types
)

def operator_minus_real(mytype, othertype):
return f"""
const {mytype} operator-(const {mytype} &x, const {othertype} &y)
{{ return {mytype}(x.real-y, x.imag); }}

const {mytype} operator-(const {othertype} &y, const {mytype} &x)
{{ return {mytype}(y-x.real, -x.imag); }}
"""

operator_minus = "".join(
operator_minus_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types
)

def operator_mul_real(mytype, othertype):
return f"""
const {mytype} operator*(const {mytype} &x, const {othertype} &y)
{{ return {mytype}(x.real*y, x.imag*y); }}

const {mytype} operator*(const {othertype} &y, const {mytype} &x)
{{ return {mytype}(x.real*y, x.imag*y); }}
"""

operator_mul = "".join(
operator_mul_real(ctype, rtype)
for ctype in cplx_types
for rtype in real_types
)

return (
template % dict(nbits=64, half_nbits=32)
+ template % dict(nbits=128, half_nbits=64)
+ operator_eq
+ operator_plus
+ operator_minus
+ operator_mul
)

else:
return ""

def c_init_code(self, **kwargs):
return ["import_array();"]

def c_code_cache_version(self):
return (13, np.__version__)
return (14, np.__version__)

def get_shape_info(self, obj):
return obj.itemsize
Expand Down Expand Up @@ -3144,7 +2985,7 @@ def L_op(self, inputs, outputs, gout):
else:
return [x.zeros_like()]

return (gz * exp2(x) * log(np.cast[x.type](2)),)
return (gz * exp2(x) * log(np.asarray(2, dtype=x.type)),)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These look like they could be cherry-picked and applied already?


def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
Expand Down Expand Up @@ -3391,7 +3232,7 @@ def L_op(self, inputs, outputs, gout):
else:
return [x.zeros_like()]

return (-gz / sqrt(np.cast[x.type](1) - sqr(x)),)
return (-gz / sqrt(np.asarray(1, dtype=x.type) - sqr(x)),)

def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
Expand Down Expand Up @@ -3465,7 +3306,7 @@ def L_op(self, inputs, outputs, gout):
else:
return [x.zeros_like()]

return (gz / sqrt(np.cast[x.type](1) - sqr(x)),)
return (gz / sqrt(np.asarray(1, dtype=x.type) - sqr(x)),)

def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
Expand Down Expand Up @@ -3537,7 +3378,7 @@ def L_op(self, inputs, outputs, gout):
else:
return [x.zeros_like()]

return (gz / (np.cast[x.type](1) + sqr(x)),)
return (gz / (np.asarray(1, dtype=x.type) + sqr(x)),)

def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
Expand Down Expand Up @@ -3660,7 +3501,7 @@ def L_op(self, inputs, outputs, gout):
else:
return [x.zeros_like()]

return (gz / sqrt(sqr(x) - np.cast[x.type](1)),)
return (gz / sqrt(sqr(x) - np.asarray(1, dtype=x.type)),)

def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
Expand Down Expand Up @@ -3737,7 +3578,7 @@ def L_op(self, inputs, outputs, gout):
else:
return [x.zeros_like()]

return (gz / sqrt(sqr(x) + np.cast[x.type](1)),)
return (gz / sqrt(sqr(x) + np.asarray(1, dtype=x.type)),)

def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
Expand Down Expand Up @@ -3815,7 +3656,7 @@ def L_op(self, inputs, outputs, gout):
else:
return [x.zeros_like()]

return (gz / (np.cast[x.type](1) - sqr(x)),)
return (gz / (np.asarray(1, dtype=x.type) - sqr(x)),)

def c_code(self, node, name, inputs, outputs, sub):
(x,) = inputs
Expand Down
20 changes: 10 additions & 10 deletions pytensor/sparse/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3607,7 +3607,7 @@ def perform(self, node, inputs, outputs):
out[0] = g_a_data

def c_code_cache_version(self):
return (1,)
return (2,)

def c_code(self, node, name, inputs, outputs, sub):
(_indices, _indptr, _d, _g) = inputs
Expand Down Expand Up @@ -3643,11 +3643,11 @@ def c_code(self, node, name, inputs, outputs, sub):
npy_intp nnz = PyArray_DIMS({_indices})[0];
npy_intp N = PyArray_DIMS({_indptr})[0]-1; //TODO: error checking with this

npy_intp Sindices = PyArray_STRIDES({_indices})[0]/PyArray_DESCR({_indices})->elsize;
npy_intp Sindptr = PyArray_STRIDES({_indptr})[0]/PyArray_DESCR({_indptr})->elsize;
npy_intp Sindices = PyArray_STRIDES({_indices})[0]/PyArray_ITEMSIZE({_indices});
npy_intp Sindptr = PyArray_STRIDES({_indptr})[0]/PyArray_ITEMSIZE({_indptr});

const npy_intp Sd1 = PyArray_STRIDES({_d})[1]/PyArray_DESCR({_d})->elsize;
const npy_intp Sg1 = PyArray_STRIDES({_g})[1]/PyArray_DESCR({_g})->elsize;
const npy_intp Sd1 = PyArray_STRIDES({_d})[1]/PyArray_ITEMSIZE({_d});
const npy_intp Sg1 = PyArray_STRIDES({_g})[1]/PyArray_ITEMSIZE({_g});

const npy_intp K = PyArray_DIMS({_d})[1];

Expand Down Expand Up @@ -3740,7 +3740,7 @@ def perform(self, node, inputs, outputs):
out[0] = g_a_data

def c_code_cache_version(self):
return (1,)
return (2,)

def c_code(self, node, name, inputs, outputs, sub):
(_indices, _indptr, _d, _g) = inputs
Expand Down Expand Up @@ -3777,11 +3777,11 @@ def c_code(self, node, name, inputs, outputs, sub):
// extract number of rows
npy_intp N = PyArray_DIMS({_indptr})[0]-1; //TODO: error checking with this

npy_intp Sindices = PyArray_STRIDES({_indices})[0]/PyArray_DESCR({_indices})->elsize;
npy_intp Sindptr = PyArray_STRIDES({_indptr})[0]/PyArray_DESCR({_indptr})->elsize;
npy_intp Sindices = PyArray_STRIDES({_indices})[0]/PyArray_ITEMSIZE({_indices});
npy_intp Sindptr = PyArray_STRIDES({_indptr})[0]/PyArray_ITEMSIZE({_indptr});

const npy_intp Sd1 = PyArray_STRIDES({_d})[1]/PyArray_DESCR({_d})->elsize;
const npy_intp Sg1 = PyArray_STRIDES({_g})[1]/PyArray_DESCR({_g})->elsize;
const npy_intp Sd1 = PyArray_STRIDES({_d})[1]/PyArray_ITEMSIZE({_d});
const npy_intp Sg1 = PyArray_STRIDES({_g})[1]/PyArray_ITEMSIZE({_g});

const npy_intp K = PyArray_DIMS({_d})[1];

Expand Down
Loading