Skip to content

Commit d9ed1e2

Browse files
committed
Fix typo amblibm -> amdlibm
1 parent 7a0175a commit d9ed1e2

File tree

5 files changed

+13
-13
lines changed

5 files changed

+13
-13
lines changed

doc/library/config.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,7 @@ import ``pytensor`` and print the config variable, as in:
355355

356356
When ``True``, ignore the first call to an PyTensor function while profiling.
357357

358-
.. attribute:: config.lib__amblibm
358+
.. attribute:: config.lib__amdlibm
359359

360360
Bool value: either ``True`` or ``False``
361361

pytensor/compile/profiling.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1566,26 +1566,26 @@ def exp_float32_op(op):
15661566
printed_tip = True
15671567

15681568
# tip 2
1569-
if not config.lib__amblibm and any(
1569+
if not config.lib__amdlibm and any(
15701570
amdlibm_speed_up(a.op) for (fgraph, a) in self.apply_time
15711571
):
15721572
print(
15731573
" - Try installing amdlibm and set the PyTensor flag "
1574-
"lib__amblibm=True. This speeds up only some Elemwise "
1574+
"lib__amdlibm=True. This speeds up only some Elemwise "
15751575
"operation.",
15761576
file=file,
15771577
)
15781578
printed_tip = True
15791579

15801580
# tip 3
1581-
if not config.lib__amblibm and any(
1581+
if not config.lib__amdlibm and any(
15821582
exp_float32_op(a.op) and a.inputs[0].dtype == "float32"
15831583
for (fgraph, a) in self.apply_time
15841584
):
15851585
print(
15861586
" - With the default gcc libm, exp in float32 is slower "
15871587
"than in float64! Try PyTensor flag floatX=float64, or "
1588-
"install amdlibm and set the pytensor flags lib__amblibm=True",
1588+
"install amdlibm and set the pytensor flags lib__amdlibm=True",
15891589
file=file,
15901590
)
15911591
printed_tip = True

pytensor/configdefaults.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ def add_tensor_configvars():
547547

548548
# http://developer.amd.com/CPU/LIBRARIES/LIBM/Pages/default.aspx
549549
config.add(
550-
"lib__amblibm",
550+
"lib__amdlibm",
551551
"Use amd's amdlibm numerical library",
552552
BoolParam(False),
553553
# Added elsewhere in the c key only when needed.

pytensor/configparser.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ class PyTensorConfigParser:
9595
compile__timeout: int
9696
# add_tensor_configvars
9797
tensor__cmp_sloppy: int
98-
lib__amblibm: bool
98+
lib__amdlibm: bool
9999
tensor__insert_inplace_optimizer_validate_nb: int
100100
# add_traceback_configvars
101101
traceback__limit: int

pytensor/scalar/basic.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -356,18 +356,18 @@ def c_headers(self, c_compiler=None, **kwargs):
356356
# we declare them here and they will be re-used by TensorType
357357
l.append("<numpy/arrayobject.h>")
358358
l.append("<numpy/arrayscalars.h>")
359-
if config.lib__amblibm and c_compiler.supports_amdlibm:
359+
if config.lib__amdlibm and c_compiler.supports_amdlibm:
360360
l += ["<amdlibm.h>"]
361361
return l
362362

363363
def c_libraries(self, c_compiler=None, **kwargs):
364364
l = []
365-
if config.lib__amblibm and c_compiler and c_compiler.supports_amdlibm:
365+
if config.lib__amdlibm and c_compiler and c_compiler.supports_amdlibm:
366366
l += ["amdlibm"]
367367
return l
368368

369369
def c_compile_args(self, c_compiler=None, **kwargs):
370-
if config.lib__amblibm and c_compiler and c_compiler.supports_amdlibm:
370+
if config.lib__amdlibm and c_compiler and c_compiler.supports_amdlibm:
371371
return ["-DREPLACE_WITH_AMDLIBM"]
372372
else:
373373
return []
@@ -1245,7 +1245,7 @@ class UnaryScalarOp(ScalarOp):
12451245
def c_code_contiguous(self, node, name, inputs, outputs, sub):
12461246
(x,) = inputs
12471247
(z,) = outputs
1248-
if not config.lib__amblibm or node.inputs[0].type != node.outputs[0].type:
1248+
if not config.lib__amdlibm or node.inputs[0].type != node.outputs[0].type:
12491249
raise MethodNotDefined()
12501250

12511251
dtype = node.inputs[0].type.dtype_specs()[1]
@@ -1260,7 +1260,7 @@ def c_code_contiguous(self, node, name, inputs, outputs, sub):
12601260
"""
12611261

12621262
def c_code_contiguous_raw(self, dtype, n, i, o):
1263-
if not config.lib__amblibm:
1263+
if not config.lib__amdlibm:
12641264
raise MethodNotDefined()
12651265
if dtype.startswith("npy_"):
12661266
dtype = dtype[4:]
@@ -2296,7 +2296,7 @@ def L_op(self, inputs, outputs, gout):
22962296
def c_code_contiguous(self, node, name, inputs, outputs, sub):
22972297
(x, y) = inputs
22982298
(z,) = outputs
2299-
if not config.lib__amblibm:
2299+
if not config.lib__amdlibm:
23002300
raise MethodNotDefined()
23012301

23022302
# We compare the dtype AND the broadcast flag

0 commit comments

Comments
 (0)