diff --git a/pytensor/compile/function/types.py b/pytensor/compile/function/types.py index 43199328a3..b7caff1bf4 100644 --- a/pytensor/compile/function/types.py +++ b/pytensor/compile/function/types.py @@ -326,8 +326,8 @@ class Function: def __init__( self, vm: "VM", - input_storage, - output_storage, + input_storage: list[Container], + output_storage: list[Container], indices, outputs, defaults, @@ -372,7 +372,6 @@ def __init__( name A string name. """ - # TODO: Rename to `vm` self.vm = vm self.input_storage = input_storage self.output_storage = output_storage @@ -388,31 +387,52 @@ def __init__( self.nodes_with_inner_function = [] self.output_keys = output_keys - # See if we have any mutable / borrow inputs - # TODO: this only need to be set if there is more than one input - self._check_for_aliased_inputs = False - for i in maker.inputs: - # If the input is a shared variable, the memory region is - # under PyTensor control and so we don't need to check if it - # is aliased as we never do that. - if ( - isinstance(i, In) - and not i.shared - and (getattr(i, "borrow", False) or getattr(i, "mutable", False)) + if self.output_keys is not None: + warnings.warn("output_keys is deprecated.", FutureWarning) + + assert len(self.input_storage) == len(self.maker.fgraph.inputs) + assert len(self.output_storage) == len(self.maker.fgraph.outputs) + + # Group indexes of inputs that are potentially aliased to each other + # Note: Historically, we only worried about aliasing inputs if they belonged to the same type, + # even though there could be two distinct types that use the same kinds of underlying objects. + potential_aliased_input_groups = [] + for inp in maker.inputs: + # If the input is a shared variable, the memory region is under PyTensor control + # and can't be aliased. + if not ( + isinstance(inp, In) + and inp.borrow + and not inp.shared + and hasattr(inp.variable.type, "may_share_memory") ): - self._check_for_aliased_inputs = True - break + continue + + for group in potential_aliased_input_groups: + # If one is super of the other, that means one could be replaced by the other + if any( + inp.variable.type.is_super(other_inp.variable.type) + or other_inp.variable.type.is_super(inp.variable.type) + for other_inp in group + ): + group.append(inp) + break + else: # no break + # Input makes a new group + potential_aliased_input_groups.append([inp]) + + # Potential aliased inputs are those that belong to the same group + self._potential_aliased_input_groups: tuple[tuple[int, ...], ...] = tuple( + tuple(maker.inputs.index(inp) for inp in group) + for group in potential_aliased_input_groups + if len(group) > 1 + ) # We will be popping stuff off this `containers` object. It is a copy. containers = list(self.input_storage) finder = {} inv_finder = {} - def distribute(indices, cs, value): - input.distribute(value, indices, cs) - for c in cs: - c.provided += 1 - # Store the list of names of named inputs. named_inputs = [] # Count the number of un-named inputs. @@ -777,6 +797,13 @@ def checkSV(sv_ori, sv_rpl): f_cpy.maker.fgraph.name = name return f_cpy + def _restore_defaults(self): + for i, (required, refeed, value) in enumerate(self.defaults): + if refeed: + if isinstance(value, Container): + value = value.storage[0] + self[i] = value + def __call__(self, *args, **kwargs): """ Evaluates value of a function on given arguments. @@ -805,52 +832,45 @@ def __call__(self, *args, **kwargs): List of outputs on indices/keys from ``output_subset`` or all of them, if ``output_subset`` is not passed. """ - - def restore_defaults(): - for i, (required, refeed, value) in enumerate(self.defaults): - if refeed: - if isinstance(value, Container): - value = value.storage[0] - self[i] = value - + input_storage = self.input_storage profile = self.profile - t0 = time.perf_counter() + + if profile: + t0 = time.perf_counter() output_subset = kwargs.pop("output_subset", None) - if output_subset is not None and self.output_keys is not None: - output_subset = [self.output_keys.index(key) for key in output_subset] + if output_subset is not None: + warnings.warn("output_subset is deprecated.", FutureWarning) + if self.output_keys is not None: + output_subset = [self.output_keys.index(key) for key in output_subset] # Reinitialize each container's 'provided' counter if self.trust_input: - i = 0 - for arg in args: - s = self.input_storage[i] - s.storage[0] = arg - i += 1 + for arg_container, arg in zip(input_storage, args, strict=False): + arg_container.storage[0] = arg else: - for c in self.input_storage: - c.provided = 0 + for arg_container in input_storage: + arg_container.provided = 0 - if len(args) + len(kwargs) > len(self.input_storage): + if len(args) + len(kwargs) > len(input_storage): raise TypeError("Too many parameter passed to pytensor function") # Set positional arguments - i = 0 - for arg in args: - # TODO: provide a option for skipping the filter if we really - # want speed. - s = self.input_storage[i] - # see this emails for a discuation about None as input + for arg_container, arg in zip(input_storage, args, strict=False): + # See discussion about None as input # https://groups.google.com/group/theano-dev/browse_thread/thread/920a5e904e8a8525/4f1b311a28fc27e5 if arg is None: - s.storage[0] = arg + arg_container.storage[0] = arg else: try: - s.storage[0] = s.type.filter( - arg, strict=s.strict, allow_downcast=s.allow_downcast + arg_container.storage[0] = arg_container.type.filter( + arg, + strict=arg_container.strict, + allow_downcast=arg_container.allow_downcast, ) except Exception as e: + i = input_storage.index(arg_container) function_name = "pytensor function" argument_name = "argument" if self.name: @@ -875,85 +895,66 @@ def restore_defaults(): + function_name + f" at index {int(i)} (0-based). {where}" ) + e.args - restore_defaults() + self._restore_defaults() raise - s.provided += 1 - i += 1 + arg_container.provided += 1 # Set keyword arguments if kwargs: # for speed, skip the items for empty kwargs for k, arg in kwargs.items(): self[k] = arg - if ( - not self.trust_input - and - # The getattr is only needed for old pickle - getattr(self, "_check_for_aliased_inputs", True) - ): + if not self.trust_input: # Collect aliased inputs among the storage space - args_share_memory = [] - for i in range(len(self.input_storage)): - i_var = self.maker.inputs[i].variable - i_val = self.input_storage[i].storage[0] - if hasattr(i_var.type, "may_share_memory"): - is_aliased = False - for j in range(len(args_share_memory)): - group_j = zip( - [ - self.maker.inputs[k].variable - for k in args_share_memory[j] - ], - [ - self.input_storage[k].storage[0] - for k in args_share_memory[j] - ], - ) + for potential_group in self._potential_aliased_input_groups: + args_share_memory: list[list[int]] = [] + for i in potential_group: + i_type = self.maker.inputs[i].variable.type + i_val = input_storage[i].storage[0] + + # Check if value is aliased with any of the values in one of the groups + for j_group in args_share_memory: if any( - ( - var.type is i_var.type - and var.type.may_share_memory(val, i_val) - ) - for (var, val) in group_j + i_type.may_share_memory(input_storage[j].storage[0], i_val) + for j in j_group ): - is_aliased = True - args_share_memory[j].append(i) + j_group.append(i) break - - if not is_aliased: + else: # no break + # Create a new group args_share_memory.append([i]) - # Check for groups of more than one argument that share memory - for group in args_share_memory: - if len(group) > 1: - # copy all but the first - for j in group[1:]: - self.input_storage[j].storage[0] = copy.copy( - self.input_storage[j].storage[0] - ) + # Check for groups of more than one argument that share memory + for group in args_share_memory: + if len(group) > 1: + # copy all but the first + for i in group[1:]: + input_storage[i].storage[0] = copy.copy( + input_storage[i].storage[0] + ) - # Check if inputs are missing, or if inputs were set more than once, or - # if we tried to provide inputs that are supposed to be implicit. - if not self.trust_input: - for c in self.input_storage: - if c.required and not c.provided: - restore_defaults() + # Check if inputs are missing, or if inputs were set more than once, or + # if we tried to provide inputs that are supposed to be implicit. + for arg_container in input_storage: + if arg_container.required and not arg_container.provided: + self._restore_defaults() raise TypeError( - f"Missing required input: {getattr(self.inv_finder[c], 'variable', self.inv_finder[c])}" + f"Missing required input: {getattr(self.inv_finder[arg_container], 'variable', self.inv_finder[arg_container])}" ) - if c.provided > 1: - restore_defaults() + if arg_container.provided > 1: + self._restore_defaults() raise TypeError( - f"Multiple values for input: {getattr(self.inv_finder[c], 'variable', self.inv_finder[c])}" + f"Multiple values for input: {getattr(self.inv_finder[arg_container], 'variable', self.inv_finder[arg_container])}" ) - if c.implicit and c.provided > 0: - restore_defaults() + if arg_container.implicit and arg_container.provided > 0: + self._restore_defaults() raise TypeError( - f"Tried to provide value for implicit input: {getattr(self.inv_finder[c], 'variable', self.inv_finder[c])}" + f"Tried to provide value for implicit input: {getattr(self.inv_finder[arg_container], 'variable', self.inv_finder[arg_container])}" ) # Do the actual work - t0_fn = time.perf_counter() + if profile: + t0_fn = time.perf_counter() try: outputs = ( self.vm() @@ -961,7 +962,7 @@ def restore_defaults(): else self.vm(output_subset=output_subset) ) except Exception: - restore_defaults() + self._restore_defaults() if hasattr(self.vm, "position_of_error"): # this is a new vm-provided function or c linker # they need this because the exception manipulation @@ -979,26 +980,24 @@ def restore_defaults(): # old-style linkers raise their own exceptions raise - dt_fn = time.perf_counter() - t0_fn - self.maker.mode.fn_time += dt_fn if profile: + dt_fn = time.perf_counter() - t0_fn + self.maker.mode.fn_time += dt_fn profile.vm_call_time += dt_fn # Retrieve the values that were computed if outputs is None: outputs = [x.data for x in self.output_storage] - assert len(outputs) == len(self.output_storage) # Remove internal references to required inputs. # These cannot be re-used anyway. - for c in self.input_storage: - if c.required: - c.storage[0] = None + for arg_container in input_storage: + if arg_container.required: + arg_container.storage[0] = None # if we are allowing garbage collection, remove the # output reference from the internal storage cells if getattr(self.vm, "allow_gc", False): - assert len(self.output_storage) == len(self.maker.fgraph.outputs) for o_container, o_variable in zip( self.output_storage, self.maker.fgraph.outputs ): @@ -1007,12 +1006,10 @@ def restore_defaults(): # WARNING: This circumvents the 'readonly' attribute in x o_container.storage[0] = None - # TODO: Get rid of this and `expanded_inputs`, since all the VMs now - # perform the updates themselves if getattr(self.vm, "need_update_inputs", True): # Update the inputs that have an update function for input, storage in reversed( - list(zip(self.maker.expanded_inputs, self.input_storage)) + list(zip(self.maker.expanded_inputs, input_storage)) ): if input.update is not None: storage.data = outputs.pop() @@ -1020,17 +1017,12 @@ def restore_defaults(): outputs = outputs[: self.n_returned_outputs] # Put default values back in the storage - restore_defaults() - # - # NOTE: This logic needs to be replicated in - # scan. - # grep for 'PROFILE_CODE' - # - - dt_call = time.perf_counter() - t0 - pytensor.compile.profiling.total_fct_exec_time += dt_call - self.maker.mode.call_time += dt_call + self._restore_defaults() + if profile: + dt_call = time.perf_counter() - t0 + pytensor.compile.profiling.total_fct_exec_time += dt_call + self.maker.mode.call_time += dt_call profile.fct_callcount += 1 profile.fct_call_time += dt_call if hasattr(self.vm, "update_profile"): @@ -1038,6 +1030,7 @@ def restore_defaults(): if profile.ignore_first_call: profile.reset() profile.ignore_first_call = False + if self.return_none: return None elif self.unpack_single and len(outputs) == 1 and output_subset is None: @@ -1572,6 +1565,8 @@ def __init__( ) for i in self.inputs ] + if any(self.refeed): + warnings.warn("Inputs with default values are deprecated.", FutureWarning) def create(self, input_storage=None, storage_map=None): """ diff --git a/pytensor/gradient.py b/pytensor/gradient.py index f9c393b512..5946a20dd4 100644 --- a/pytensor/gradient.py +++ b/pytensor/gradient.py @@ -128,9 +128,6 @@ def fiter_variable(self, other): " a symbolic placeholder." ) - def may_share_memory(a, b): - return False - def value_eq(a, b, force_same_dtype=True): raise AssertionError( "If you're assigning to a DisconnectedType you're" diff --git a/pytensor/graph/null_type.py b/pytensor/graph/null_type.py index 66f5c18fd1..0e5579d11a 100644 --- a/pytensor/graph/null_type.py +++ b/pytensor/graph/null_type.py @@ -26,9 +26,6 @@ def filter(self, data, strict=False, allow_downcast=None): def filter_variable(self, other, allow_convert=True): raise ValueError("No values may be assigned to a NullType") - def may_share_memory(a, b): - return False - def values_eq(self, a, b, force_same_dtype=True): raise ValueError("NullType has no values to compare") diff --git a/pytensor/graph/op.py b/pytensor/graph/op.py index 684add6308..519abe49d8 100644 --- a/pytensor/graph/op.py +++ b/pytensor/graph/op.py @@ -513,6 +513,7 @@ def make_py_thunk( """ node_input_storage = [storage_map[r] for r in node.inputs] node_output_storage = [storage_map[r] for r in node.outputs] + node_compute_map = [compute_map[r] for r in node.outputs] if debug and hasattr(self, "debug_perform"): p = node.op.debug_perform @@ -520,10 +521,16 @@ def make_py_thunk( p = node.op.perform @is_thunk_type - def rval(p=p, i=node_input_storage, o=node_output_storage, n=node): + def rval( + p=p, + i=node_input_storage, + o=node_output_storage, + n=node, + cm=node_compute_map, + ): r = p(n, [x[0] for x in i], o) - for o in node.outputs: - compute_map[o][0] = True + for entry in cm: + entry[0] = True return r rval.inputs = node_input_storage diff --git a/pytensor/graph/type.py b/pytensor/graph/type.py index ee97c1823d..d4d800716d 100644 --- a/pytensor/graph/type.py +++ b/pytensor/graph/type.py @@ -48,10 +48,7 @@ def in_same_class(self, otype: "Type") -> bool | None: unique element (i.e. it uses `self.__eq__`). """ - if self == otype: - return True - - return False + return self == otype def is_super(self, otype: "Type") -> bool | None: """Determine if `self` is a supertype of `otype`. diff --git a/pytensor/scalar/basic.py b/pytensor/scalar/basic.py index ca58006d24..c9dbfb46b0 100644 --- a/pytensor/scalar/basic.py +++ b/pytensor/scalar/basic.py @@ -303,13 +303,6 @@ def clone(self, dtype=None, **kwargs): dtype = self.dtype return type(self)(dtype) - @staticmethod - def may_share_memory(a, b): - # This class represent basic c type, represented in python - # with numpy.scalar. They are read only. So from python, they - # can never share memory. - return False - def filter(self, data, strict=False, allow_downcast=None): py_type = self.dtype_specs()[0] if strict and not isinstance(data, py_type): diff --git a/pytensor/tensor/random/op.py b/pytensor/tensor/random/op.py index e43dfaa222..309a661c9a 100644 --- a/pytensor/tensor/random/op.py +++ b/pytensor/tensor/random/op.py @@ -387,24 +387,17 @@ def dist_params(self, node) -> Sequence[Variable]: return node.inputs[2:] def perform(self, node, inputs, outputs): - rng_var_out, smpl_out = outputs - rng, size, *args = inputs # Draw from `rng` if `self.inplace` is `True`, and from a copy of `rng` otherwise. if not self.inplace: rng = copy(rng) - rng_var_out[0] = rng - - if size is not None: - size = tuple(size) - smpl_val = self.rng_fn(rng, *([*args, size])) - - if not isinstance(smpl_val, np.ndarray) or str(smpl_val.dtype) != self.dtype: - smpl_val = np.asarray(smpl_val, dtype=self.dtype) - - smpl_out[0] = smpl_val + outputs[0][0] = rng + outputs[1][0] = np.asarray( + self.rng_fn(rng, *args, None if size is None else tuple(size)), + dtype=self.dtype, + ) def grad(self, inputs, outputs): return [ diff --git a/pytensor/tensor/type_other.py b/pytensor/tensor/type_other.py index bc293d8906..a9e559504f 100644 --- a/pytensor/tensor/type_other.py +++ b/pytensor/tensor/type_other.py @@ -126,12 +126,6 @@ def filter(self, x, strict=False, allow_downcast=None): else: raise TypeError("Expected None!") - @staticmethod - def may_share_memory(a, b): - # None never share memory between object, in the sense of DebugMode. - # Python None are singleton - return False - none_type_t = NoneTypeT() diff --git a/tests/compile/function/test_types.py b/tests/compile/function/test_types.py index af292eb10d..4b6537d328 100644 --- a/tests/compile/function/test_types.py +++ b/tests/compile/function/test_types.py @@ -19,6 +19,8 @@ from pytensor.printing import debugprint from pytensor.tensor.math import dot, tanh from pytensor.tensor.math import sum as pt_sum +from pytensor.tensor.random import normal +from pytensor.tensor.random.type import random_generator_type from pytensor.tensor.type import ( dmatrix, dscalar, @@ -33,6 +35,9 @@ ) +pytestmark = pytest.mark.filterwarnings("error") + + def PatternOptimizer(p1, p2, ign=True): return OpKeyGraphRewriter(PatternNodeRewriter(p1, p2), ignore_newtrees=ign) @@ -193,7 +198,10 @@ def test_naming_rule3(self): x, s = scalars("xs") # x's name is not ignored (as in test_naming_rule2) because a has a default value. - f = function([x, In(a, value=1.0), s], a / s + x) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function([x, In(a, value=1.0), s], a / s + x) assert f(9, 2, 4) == 9.5 # can specify all args in order assert f(9, 2, s=4) == 9.5 # can give s as kwarg assert f(9, s=4) == 9.25 # can give s as kwarg, get default a @@ -212,7 +220,10 @@ def test_naming_rule4(self): a = scalar() # the a is for 'anonymous' (un-named). x, s = scalars("xs") - f = function([x, In(a, value=1.0, name="a"), s], a / s + x) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function([x, In(a, value=1.0, name="a"), s], a / s + x) assert f(9, 2, 4) == 9.5 # can specify all args in order assert f(9, 2, s=4) == 9.5 # can give s as kwarg @@ -246,11 +257,14 @@ def test_state_access(self, mode): a = scalar() x, s = scalars("xs") - f = function( - [x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x)], - s + a * x, - mode=mode, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function( + [x, In(a, value=1.0, name="a"), In(s, value=0.0, update=s + a * x)], + s + a * x, + mode=mode, + ) assert f[a] == 1.0 assert f[s] == 0.0 @@ -301,16 +315,19 @@ def test_copy(self): a = scalar() x, s = scalars("xs") - f = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=0.0, update=s + a * x, mutable=True), + ], + s + a * x, + ) - g = copy.copy(f) + g = copy.copy(f) assert f.unpack_single == g.unpack_single assert f.trust_input == g.trust_input @@ -502,22 +519,25 @@ def test_shared_state0(self): a = scalar() # the a is for 'anonymous' (un-named). x, s = scalars("xs") - f = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=True), - ], - s + a * x, - ) - g = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=f.container[s], update=s - a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=0.0, update=s + a * x, mutable=True), + ], + s + a * x, + ) + g = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=f.container[s], update=s - a * x, mutable=True), + ], + s + a * x, + ) f(1, 2) assert f[s] == 2 @@ -530,17 +550,20 @@ def test_shared_state1(self): a = scalar() # the a is for 'anonymous' (un-named). x, s = scalars("xs") - f = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=True), - ], - s + a * x, - ) - g = function( - [x, In(a, value=1.0, name="a"), In(s, value=f.container[s])], s + a * x - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=0.0, update=s + a * x, mutable=True), + ], + s + a * x, + ) + g = function( + [x, In(a, value=1.0, name="a"), In(s, value=f.container[s])], s + a * x + ) f(1, 2) assert f[s] == 2 @@ -554,17 +577,20 @@ def test_shared_state2(self): a = scalar() # the a is for 'anonymous' (un-named). x, s = scalars("xs") - f = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=False), - ], - s + a * x, - ) - g = function( - [x, In(a, value=1.0, name="a"), In(s, value=f.container[s])], s + a * x - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=0.0, update=s + a * x, mutable=False), + ], + s + a * x, + ) + g = function( + [x, In(a, value=1.0, name="a"), In(s, value=f.container[s])], s + a * x + ) f(1, 2) assert f[s] == 2 @@ -716,7 +742,10 @@ def test_default_values(self): a, b = dscalars("a", "b") c = a + b - funct = function([In(a, name="first"), In(b, value=1, name="second")], c) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + funct = function([In(a, name="first"), In(b, value=1, name="second")], c) x = funct(first=1) try: funct(second=2) @@ -728,6 +757,8 @@ def test_check_for_aliased_inputs(self): s1 = shared(b) s2 = shared(b) x1 = vector() + x2 = vector(shape=(3,)) + x3 = vector(shape=(1,)) # Assert cases we should not check for aliased inputs for d in [ @@ -735,27 +766,29 @@ def test_check_for_aliased_inputs(self): dict(outputs=[s1 + 1, s2 + 3]), dict(outputs=[s1 + 1], updates=[(s2, s2 + 3)]), dict(inputs=[x1], outputs=[x1 + 1], updates=[(s2, s2 + 3)]), + dict( + inputs=[In(x1, mutable=True)], outputs=[x1 + 1], updates=[(s2, s2 + 3)] + ), + dict( + inputs=[In(x2, mutable=True), In(x3, mutable=True)], + outputs=[x2 + 2, x3 + 3], + ), ]: if "inputs" not in d: d["inputs"] = [] f = function(**d) - assert not f._check_for_aliased_inputs, d + assert not f._potential_aliased_input_groups, d # Assert cases we should check for aliased inputs for d in [ dict( - inputs=[In(x1, borrow=True)], - outputs=[x1 + 1], - updates=[(s2, s2 + 3)], - ), - dict( - inputs=[In(x1, borrow=True, mutable=True)], - outputs=[x1 + 1], + inputs=[In(x1, mutable=True), In(x2, mutable=True)], + outputs=[x1 + 1, x2 + 2], updates=[(s2, s2 + 3)], ), dict( - inputs=[In(x1, mutable=True)], - outputs=[x1 + 1], + inputs=[In(x1, mutable=True), In(x3, mutable=True)], + outputs=[x1 + 1, x3 + 3], updates=[(s2, s2 + 3)], ), ]: @@ -763,13 +796,14 @@ def test_check_for_aliased_inputs(self): d["inputs"] = [] f = function(**d) - assert f._check_for_aliased_inputs, d + assert f._potential_aliased_input_groups, d def test_output_dictionary(self): # Tests that function works when outputs is a dictionary x = scalar() - f = function([x], outputs={"a": x, "c": x * 2, "b": x * 3, "1": x * 4}) + with pytest.warns(FutureWarning, match="output_keys is deprecated."): + f = function([x], outputs={"a": x, "c": x * 2, "b": x * 3, "1": x * 4}) outputs = f(10.0) @@ -784,7 +818,8 @@ def test_input_named_variables(self): x = scalar("x") y = scalar("y") - f = function([x, y], outputs={"a": x + y, "b": x * y}) + with pytest.warns(FutureWarning, match="output_keys is deprecated."): + f = function([x, y], outputs={"a": x + y, "b": x * y}) assert f(2, 4) == {"a": 6, "b": 8} assert f(2, y=4) == f(2, 4) @@ -799,9 +834,10 @@ def test_output_order_sorted(self): e1 = scalar("1") e2 = scalar("2") - f = function( - [x, y, z, e1, e2], outputs={"x": x, "y": y, "z": z, "1": e1, "2": e2} - ) + with pytest.warns(FutureWarning, match="output_keys is deprecated."): + f = function( + [x, y, z, e1, e2], outputs={"x": x, "y": y, "z": z, "1": e1, "2": e2} + ) assert "1" in str(f.outputs[0]) assert "2" in str(f.outputs[1]) @@ -819,7 +855,8 @@ def test_composing_function(self): a = x + y b = x * y - f = function([x, y], outputs={"a": a, "b": b}) + with pytest.warns(FutureWarning, match="output_keys is deprecated."): + f = function([x, y], outputs={"a": a, "b": b}) a = scalar("a") b = scalar("b") @@ -874,14 +911,17 @@ def test_deepcopy(self): a = scalar() # the a is for 'anonymous' (un-named). x, s = scalars("xs") - f = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function( + [ + x, + In(a, value=1.0, name="a", mutable=True), + In(s, value=0.0, update=s + a * x, mutable=True), + ], + s + a * x, + ) try: g = copy.deepcopy(f) except NotImplementedError as e: @@ -899,7 +939,12 @@ def test_deepcopy(self): assert x not in g.container assert x not in g.value assert len(f.defaults) == len(g.defaults) - assert f._check_for_aliased_inputs is g._check_for_aliased_inputs + # Shared variable is the first input + assert ( + f._potential_aliased_input_groups + == g._potential_aliased_input_groups + == ((1, 2),) + ) assert f.name == g.name assert f.maker.fgraph.name == g.maker.fgraph.name # print(f"{f.defaults = }") @@ -930,14 +975,17 @@ def test_deepcopy_trust_input(self): a = dscalar() # the a is for 'anonymous' (un-named). x, s = dscalars("xs") - f = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=0.0, update=s + a * x, mutable=True), + ], + s + a * x, + ) f.trust_input = True try: g = copy.deepcopy(f) @@ -956,11 +1004,13 @@ def test_deepcopy_trust_input(self): def test_output_keys(self): x = vector() - f = function([x], {"vec": x**2}) + with pytest.warns(FutureWarning, match="output_keys is deprecated."): + f = function([x], {"vec": x**2}) o = f([2, 3, 4]) assert isinstance(o, dict) assert np.allclose(o["vec"], [4, 9, 16]) - g = copy.deepcopy(f) + with pytest.warns(FutureWarning, match="output_keys is deprecated."): + g = copy.deepcopy(f) o = g([2, 3, 4]) assert isinstance(o, dict) assert np.allclose(o["vec"], [4, 9, 16]) @@ -969,7 +1019,10 @@ def test_deepcopy_shared_container(self): # Ensure that shared containers remain shared after a deep copy. a, x = scalars("ax") - h = function([In(a, value=0.0)], a) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + h = function([In(a, value=0.0)], a) f = function([x, In(a, value=h.container[a], implicit=True)], x + a) try: @@ -993,14 +1046,17 @@ def test_pickle(self): a = scalar() # the a is for 'anonymous' (un-named). x, s = scalars("xs") - f = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=0.0, update=s + a * x, mutable=True), + ], + s + a * x, + ) try: # Note that here we also test protocol 0 on purpose, since it @@ -1094,25 +1150,31 @@ def test_multiple_functions(self): # some derived thing, whose inputs aren't all in the list list_of_things.append(a * x + s) - f1 = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f1 = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=0.0, update=s + a * x, mutable=True), + ], + s + a * x, + ) list_of_things.append(f1) # now put in a function sharing container with the previous one - f2 = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=f1.container[s], update=s + a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f2 = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=f1.container[s], update=s + a * x, mutable=True), + ], + s + a * x, + ) list_of_things.append(f2) assert isinstance(f2.container[s].storage, list) @@ -1120,7 +1182,10 @@ def test_multiple_functions(self): # now put in a function with non-scalar v_value = np.asarray([2, 3, 4.0], dtype=config.floatX) - f3 = function([x, In(v, value=v_value)], x + v) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + f3 = function([x, In(v, value=v_value)], x + v) list_of_things.append(f3) # try to pickle the entire things @@ -1252,23 +1317,29 @@ def __init__(self): self.e = a * x + s - self.f1 = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=0.0, update=s + a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + self.f1 = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=0.0, update=s + a * x, mutable=True), + ], + s + a * x, + ) - self.f2 = function( - [ - x, - In(a, value=1.0, name="a"), - In(s, value=self.f1.container[s], update=s + a * x, mutable=True), - ], - s + a * x, - ) + with pytest.warns( + FutureWarning, match="Inputs with default values are deprecated." + ): + self.f2 = function( + [ + x, + In(a, value=1.0, name="a"), + In(s, value=self.f1.container[s], update=s + a * x, mutable=True), + ], + s + a * x, + ) def test_empty_givens_updates(): @@ -1280,3 +1351,15 @@ def test_empty_givens_updates(): y = x * 2 function([In(x)], y, givens={}) function([In(x)], y, updates={}) + + +@pytest.mark.parametrize("trust_input", [True, False]) +def test_minimal_random_function_call_benchmark(trust_input, benchmark): + rng = random_generator_type() + x = normal(rng=rng, size=(100,)) + + f = function([In(rng, mutable=True)], x) + f.trust_input = trust_input + + rng_val = np.random.default_rng() + benchmark(f, rng_val)