From 2a1d8276d6dcc23767c0a66a924f663e790a3a23 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Sun, 7 May 2023 03:51:11 +0800 Subject: [PATCH 1/5] feat: add code generator of ops. --- TensorFlow.NET.sln | 41 ++ Tensorflow.CodeGen/FunctionGenerator.cs | 550 +++++++++++++++++++ Tensorflow.CodeGen/GenOpsWriter.cs | 80 +++ Tensorflow.CodeGen/OpClassifier.cs | 39 ++ Tensorflow.CodeGen/Program.cs | 12 + Tensorflow.CodeGen/Tensorflow.CodeGen.csproj | 18 + Tensorflow.CodeGen/Utils.cs | 46 ++ 7 files changed, 786 insertions(+) create mode 100644 Tensorflow.CodeGen/FunctionGenerator.cs create mode 100644 Tensorflow.CodeGen/GenOpsWriter.cs create mode 100644 Tensorflow.CodeGen/OpClassifier.cs create mode 100644 Tensorflow.CodeGen/Program.cs create mode 100644 Tensorflow.CodeGen/Tensorflow.CodeGen.csproj create mode 100644 Tensorflow.CodeGen/Utils.cs diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 0c7d6e3c2..8d5488146 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -35,6 +35,10 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "helpers", "helpers", "{E1A5 EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistHolder", "helpers\Tensorflow.UnitTest.RedistHolder\Tensorflow.UnitTest.RedistHolder.csproj", "{62D543A2-8846-45A3-829B-5754B094A8E2}" EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{BADBB104-2F03-4824-A249-803A871D8122}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "protobuf.Text", "..\protobuf.Text\src\protobuf.Text\protobuf.Text.csproj", "{151B3A8A-8576-4190-BD58-F42944A49718}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -282,6 +286,42 @@ Global {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x64.Build.0 = Release|Any CPU {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x86.ActiveCfg = Release|Any CPU {62D543A2-8846-45A3-829B-5754B094A8E2}.Release|x86.Build.0 = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|Any CPU.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x64.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x64.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x86.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Debug|x86.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|Any CPU.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x64.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x64.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x86.ActiveCfg = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.GPU|x86.Build.0 = Debug|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|Any CPU.ActiveCfg = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|Any CPU.Build.0 = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.ActiveCfg = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.Build.0 = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.ActiveCfg = Release|Any CPU + {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.Build.0 = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|Any CPU.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x64.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x64.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x86.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x86.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|Any CPU.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|Any CPU.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x64.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x64.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x86.ActiveCfg = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x86.Build.0 = Debug|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|Any CPU.ActiveCfg = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|Any CPU.Build.0 = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x64.ActiveCfg = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x64.Build.0 = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x86.ActiveCfg = Release|Any CPU + {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -300,6 +340,7 @@ Global {9738D16A-CFA0-405C-A7DF-D3D203B0CB18} = {01A1787F-A9BE-4221-84E8-6360DD010AB6} {7DEA8760-E401-4872-81F3-405F185A13A0} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD} {62D543A2-8846-45A3-829B-5754B094A8E2} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} + {BADBB104-2F03-4824-A249-803A871D8122} = {E1A5D2B7-10AF-4876-85C0-7714EF274214} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A} diff --git a/Tensorflow.CodeGen/FunctionGenerator.cs b/Tensorflow.CodeGen/FunctionGenerator.cs new file mode 100644 index 000000000..d45203072 --- /dev/null +++ b/Tensorflow.CodeGen/FunctionGenerator.cs @@ -0,0 +1,550 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Reflection.Metadata.Ecma335; +using System.Text; +using System.Threading.Tasks; +using Microsoft.CodeAnalysis.CSharp; + +namespace Tensorflow.CodeGen +{ + public class FunctionGenerator + { + public void AppendFunction(OpDef op, StringBuilder sb) + { + // TODO: add descriptions + sb.Append("public static "); + int outputArgsCount = op.OutputArg.Count; + if (outputArgsCount > 1) + { + sb.Append("Tensor[] "); + } + else if (outputArgsCount == 1) + { + sb.Append("Tensor "); + } + else + { + sb.Append("Operation "); + } + string funcName = Utils.ConvertToUnderscore(op.Name); + var token = SyntaxFactory.ParseToken(funcName); + if (token.IsKeyword()) + { + funcName = $"_{funcName}"; + } + sb.Append($" {funcName}("); + + // define args + AppendArgs(op, sb); + sb.Append(")\n{\n"); + + // begin to write main body + sb.AppendLine("var _ctx = tf.Context;"); + sb.AppendLine("if(_ctx.executing_eagerly()){"); + + if(HasRefArgs(op)) + { + var possibleRefArg = op.InputArg.FirstOrDefault(x => x.IsRef, null); + sb.AppendLine($"throw new RuntimeError(\"{funcName} op does not support eager execution. Arg {possibleRefArg.Name} is a ref.\");"); + } + else + { + sb.Append("try\n{\n"); + + AppendFastPathExecute(op, sb); + if (outputArgsCount == 0) + { + sb.AppendLine("return null;"); + } + else if (outputArgsCount == 1) + { + sb.AppendLine("return _fast_path_result[0];"); + } + else + { + sb.AppendLine("return _fast_path_result;"); + } + + sb.AppendLine("}"); // try + + sb.Append("catch(Exception)\n{\n"); + sb.AppendLine("}"); // catch + + sb.Append("try\n{\n"); + AppendEagerFallbackCall(op, sb); + sb.AppendLine("}"); // try + + sb.Append("catch(Exception)\n{\n"); + sb.AppendLine("}"); // catch + } + + sb.AppendLine("}"); // if + + // begin to use op helper. + AppendOpHelperCall(op, sb); + sb.AppendLine("var _result = _op.outputs;"); + + // check if it needs to record gradient. + sb.Append("if(_execute.must_record_gradient())\n{\n"); + sb.Append("object[] _attrs = new object[]{"); + foreach (var attr in op.Attr) + { + string attrRealName = attr.Name; + if (SyntaxFactory.ParseToken(attrRealName).IsKeyword()) + { + attrRealName += "_"; + } + if (attr.Type == "type") + { + sb.Append($"\"{attr.Name}\", _op._get_attr_type(\"{attrRealName}\"), "); + } + else if (attr.Type == "int") + { + sb.Append($"\"{attr.Name}\", _op._get_attr_int(\"{attrRealName}\"), "); + } + else if (attr.Type == "bool") + { + sb.Append($"\"{attr.Name}\", _op._get_attr_bool(\"{attrRealName}\"), "); + } + else + { + sb.Append($"\"{attr.Name}\", _op.get_attr(\"{attr.Name}\"), "); + } + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("};\n"); + sb.AppendLine($"_execute.record_gradient(\"{op.Name}\", _op.inputs, _attrs, _result);"); + + sb.AppendLine("}"); // if + + if (outputArgsCount == 0) + { + sb.AppendLine("return _op;"); + } + else if (outputArgsCount == 1) + { + sb.AppendLine("return _result[0];"); + } + else + { + sb.AppendLine("return _result;"); + } + sb.AppendLine("}"); // body + + sb.AppendLine(); + + AppendEagerFallbackDefinition(op, sb); + } + + public void AppendArgs(OpDef op, StringBuilder sb) + { + foreach (var arg in op.InputArg) + { + string argName = arg.Name; + var token = SyntaxFactory.ParseToken(argName); + if (token.IsKeyword()) + { + argName = $"{argName}_"; + } + if (!string.IsNullOrEmpty(arg.NumberAttr)) + { + sb.Append($"Tensors {argName}, "); + } + else + { + sb.Append($"Tensor {argName}, "); + } + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, (typeStr, value)) in attrValueDic) + { + var token = SyntaxFactory.ParseToken(key); + string realKey = key; + if (token.IsKeyword()) + { + realKey += "_"; + } + if (value != "NOVALUE") + { + sb.Append($"{typeStr} {realKey} = {value}, "); + } + else + { + sb.Append($"{typeStr} {realKey}, "); + } + } + sb.Append($"string? name = null"); + } + + public void AppendFastPathExecute(OpDef op, StringBuilder sb) + { + sb.Append($"var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, \"{op.Name}\", name, "); + foreach (var arg in op.InputArg) + { + string attrArgName = arg.Name; + if (SyntaxFactory.ParseToken(attrArgName).IsKeyword()) + { + attrArgName += "_"; + } + sb.Append($"{attrArgName}, "); + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, _) in attrValueDic) + { + sb.Append($"\"{key}\", {key}, "); + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("));\n"); + } + + public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) + { + string funcName = $"{Utils.ConvertToUnderscore(op.Name)}_eager_fallback"; + sb.Append($"return {funcName}("); + foreach (var arg in op.InputArg) + { + string inputArgRealName = arg.Name; + if (SyntaxFactory.ParseToken(inputArgRealName).IsKeyword()) + { + inputArgRealName += "_"; + } + sb.Append($"{inputArgRealName}, "); + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, _) in attrValueDic) + { + string keyRealName = key; + if (SyntaxFactory.ParseToken(keyRealName).IsKeyword()) + { + keyRealName += "_"; + } + sb.Append($"{key}: {keyRealName}, "); + } + sb.Append("name: name, ctx: _ctx);\n"); + } + + public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) + { + sb.Append("public static Tensor"); + int outputArgsCount = op.OutputArg.Count; + if (outputArgsCount > 1) + { + sb.Append("[]"); + } + string opName = op.Name; + string funcName = Utils.ConvertToUnderscore(op.Name); + sb.Append($" {funcName}_eager_fallback("); + AppendFallBackFunctionArgs(op, sb); + sb.Append(")\n{\n"); + + var possibleRefArg = op.InputArg.FirstOrDefault(x => x.IsRef, null); + if (possibleRefArg is not null) + { + sb.AppendLine($"throw new RuntimeError($\"{funcName} op does not support eager execution." + + $" Arg '{possibleRefArg.Name}' is a ref.\");"); + sb.AppendLine("}"); // body + return; + } + + sb.Append("Tensor[] _inputs_flat = new Tensor[]{"); + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + sb.Append($"{realArgName}, "); + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("};\n"); + + sb.Append("object[] _attrs = new object[]{"); + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var attr in op.Attr) + { + if (attr.Type == "type") + { + bool found = false; + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + if (arg.TypeAttr == attr.Name) + { + sb.Append($"\"{attr.Name}\", {realArgName}.dtype, "); + found = true; + break; + } + } + if (!found) + { + if (attr.Name.StartsWith("T") && attr.Name.Length > 1) + { + string paramName = attr.Name.Substring(1); + if (SyntaxFactory.ParseToken(paramName).IsKeyword()) + { + paramName = $"{paramName}_"; + } + sb.Append($"\"{attr.Name}\", {paramName}.dtype, "); + } + else + { + string attrRealName = attr.Name; + if (SyntaxFactory.ParseToken(attrRealName).IsKeyword()) + { + attrRealName = $"{attrRealName}_"; + } + sb.Append($"\"{attr.Name}\", {attrRealName}, "); + } + } + } + else if(attr.Type == "int" && (op.InputArg.Any(x => x.NumberAttr == attr.Name) || op.OutputArg.Any(x => x.NumberAttr == attr.Name))) + { + bool found = false; + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + if (arg.NumberAttr == attr.Name) + { + sb.Append($"\"{attr.Name}\", {realArgName}.Length, "); + found = true; + break; + } + } + } + else + { + sb.Append($"\"{attr.Name}\", {attr.Name}, "); + } + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("};\n"); + + sb.AppendLine($"var _result = _execute.execute(\"{op.Name}\", {outputArgsCount}, inputs: _inputs_flat, " + + $"attrs: _attrs, ctx: ctx, name: name);"); + + sb.Append("if(_execute.must_record_gradient())\n{\n"); + + sb.AppendLine($"_execute.record_gradient(\"{op.Name}\", _inputs_flat, _attrs, _result);"); + + sb.AppendLine("}"); // if + + if (outputArgsCount == 0) + { + sb.AppendLine("return null;"); + } + else if (outputArgsCount == 1) + { + sb.AppendLine("return _result[0];"); + } + else + { + sb.AppendLine("return _result;"); + } + + sb.AppendLine("}"); // body + } + + public void AppendFallBackFunctionArgs(OpDef op, StringBuilder sb) + { + foreach (var arg in op.InputArg) + { + string argName = arg.Name; + var token = SyntaxFactory.ParseToken(argName); + if (token.IsKeyword()) + { + argName = $"{argName}_"; + } + if (!string.IsNullOrEmpty(arg.NumberAttr)) + { + sb.Append($"Tensors {argName}, "); + } + else + { + sb.Append($"Tensor {argName}, "); + } + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, (typeStr, _)) in attrValueDic) + { + var token = SyntaxFactory.ParseToken(key); + string realKey = key; + if (token.IsKeyword()) + { + realKey += "_"; + } + sb.Append($"{typeStr} {realKey}, "); + } + sb.Append($"string name, Context ctx"); + } + + public void AppendOpHelperCall(OpDef op, StringBuilder sb) + { + sb.AppendLine("Dictionary keywords = new();"); + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName += "_"; + } + sb.AppendLine($"keywords[\"{arg.Name}\"] = {realArgName};"); + } + var attrValueDic = GetAttrsDefaultValue(op); + foreach (var (key, _) in attrValueDic) + { + sb.Append($"keywords[\"{key}\"] = {key};"); + } + sb.AppendLine($"var _op = tf.OpDefLib._apply_op_helper(\"{op.Name}\", name, keywords);"); + } + + // key, (type string, default value) + public Dictionary GetAttrsDefaultValue(OpDef op) + { + Dictionary dic = new(); + foreach (var attr in op.Attr) + { + if (attr.Type == "type") + { + bool found = op.InputArg.Any(x => x.TypeAttr == attr.Name); + if (!found) + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) + { + string name = Enum.GetName(typeof(TF_DataType), attr.DefaultValue.Type.as_tf_dtype()); + string enumPath = typeof(TF_DataType).Name + "." + name; + dic[attr.Name] = ("TF_DataType", enumPath); + } + else + { + dic[attr.Name] = ("TF_DataType", "NOVALUE"); + } + } + } + else if (attr.Type == "int") + { + if(op.InputArg.Any(x => x.NumberAttr == attr.Name) || op.OutputArg.Any(x => x.NumberAttr == attr.Name)) + { + continue; + } + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.I) + { + dic[attr.Name] = ("int", attr.DefaultValue.I.ToString()); + } + else + { + dic[attr.Name] = ("int", "0"); + } + } + else if (attr.Type == "float") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.F) + { + dic[attr.Name] = ("float", attr.DefaultValue.F.ToString() + "f"); + } + else + { + dic[attr.Name] = ("float", "NOVALUE"); + } + } + else if (attr.Type == "string") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + { + dic[attr.Name] = ("string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\""); + } + else + { + dic[attr.Name] = ("string", "NOVALUE"); + } + } + else if (attr.Type == "bool") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.B) + { + dic[attr.Name] = ("bool", attr.DefaultValue.B.ToString().ToLower()); + } + else + { + dic[attr.Name] = ("bool", "NOVALUE"); + } + } + else if (attr.Type == "shape") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Shape) + { + dic[attr.Name] = ("Shape", $"null"); + } + else + { + dic[attr.Name] = ("Shape", "NOVALUE"); + } + } + else if (attr.Type == "list(type)") + { + dic[attr.Name] = ("TF_DataType[]", "NOVALUE"); + } + else if (attr.Type == "list(shape)") + { + dic[attr.Name] = ("Shape[]", "NOVALUE"); + } + else if (attr.Type == "list(string)") + { + dic[attr.Name] = ("string[]", "NOVALUE"); + } + else if (attr.Type == "list(int)") + { + dic[attr.Name] = ("int[]", "NOVALUE"); + } + else if (attr.Type == "list(float)") + { + dic[attr.Name] = ("float[]", "NOVALUE"); + } + else if (attr.Type == "func") + { + dic[attr.Name] = ("Func", "NOVALUE"); + } + else if (attr.Type == "list(func)") + { + dic[attr.Name] = ("Func[]", "NOVALUE"); + } + else if (attr.Type == "tensor") + { + dic[attr.Name] = ("TensorProto", "NOVALUE"); + } + else + { + throw new NotImplementedException(); + } + } + return dic; + } + + private static bool HasRefArgs(OpDef op) + { + return op.InputArg.Any(x => x.IsRef); + } + } +} diff --git a/Tensorflow.CodeGen/GenOpsWriter.cs b/Tensorflow.CodeGen/GenOpsWriter.cs new file mode 100644 index 000000000..83ca6e0b9 --- /dev/null +++ b/Tensorflow.CodeGen/GenOpsWriter.cs @@ -0,0 +1,80 @@ +using Protobuf.Text; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace Tensorflow.CodeGen +{ + public class GenOpsWriter + { + private string _basePath; + private Dictionary _opMap; + private OpClassifier _opClassifier; + private FunctionGenerator _g = new(); + + public GenOpsWriter(string basePath, string pythonFilesDirectory, string opDefFilename) + { + _basePath = basePath; + + var opDefs = ReadAllOpDefs(opDefFilename); + _opMap = opDefs.Op.ToDictionary( + x => Tensorflow.CodeGen.Utils.ConvertToUnderscore(x.Name), x => x); + _opClassifier = new OpClassifier(pythonFilesDirectory); + } + + public void WriteAll() + { + foreach(var (target, set) in _opClassifier.OpSet) + { + StringBuilder sb = new StringBuilder(); + + // Write file header. + sb.AppendLine("/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/"); + sb.AppendLine(); + + // Add commonly used namespaces. + sb.AppendLine("using Tensorflow.Eager;"); + sb.AppendLine("using Tensorflow.Contexts;"); + sb.AppendLine("using static Tensorflow.Binding;"); + sb.AppendLine(); + + // Specify the namespace + sb.AppendLine("namespace Tensorflow;"); + sb.AppendLine(); + + // Write class name + sb.AppendLine($"internal static class {target}"); + sb.AppendLine("{"); + + foreach(var funcName in set) + { + if(_opMap.ContainsKey(funcName)) + { + var opDef = _opMap[funcName]; + _g.AppendFunction(opDef, sb); + } + else if (funcName.StartsWith("_")) + { + var opDef = _opMap[funcName.Substring(1)]; + _g.AppendFunction(opDef, sb); + } + } + + // Close class scope. + sb.AppendLine("}"); + + string fullFilePath = Path.Combine(_basePath, $"{target}.cs"); + File.WriteAllText(fullFilePath, sb.ToString()); + } + } + + private OpList ReadAllOpDefs(string path) + { + var text = File.ReadAllText(path); + var opDefs = OpList.Parser.ParseText(text); + return opDefs; + } + } +} diff --git a/Tensorflow.CodeGen/OpClassifier.cs b/Tensorflow.CodeGen/OpClassifier.cs new file mode 100644 index 000000000..2ea2f35ef --- /dev/null +++ b/Tensorflow.CodeGen/OpClassifier.cs @@ -0,0 +1,39 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using System.Text.RegularExpressions; + +namespace Tensorflow.CodeGen +{ + public class OpClassifier + { + private static readonly string _filenamePattern = @"^gen_[a-z]*_ops.py$"; + private static readonly string _pythonFunctionPattern = @"def\s+(\w+)\((?:\s*\w+\s*(?:=\s*[\S]*)*,\s*)*\s*\w+\s*=None\s*\):"; + private Dictionary> _opSet = new(); + public Dictionary> OpSet => _opSet; + public OpClassifier(string pythonFileFolder) + { + DirectoryInfo directory = new DirectoryInfo(pythonFileFolder); + + foreach (FileInfo file in directory.GetFiles()) + { + if (Regex.IsMatch(file.Name, _filenamePattern)) + { + string filenamePrefix = file.Name.Split('.')[0]; + string content = File.ReadAllText(file.FullName); + var matches = Regex.Matches(content, _pythonFunctionPattern); + foreach(Match match in matches) + { + var funcName = match.Groups[1].Value; + if (!funcName.EndsWith("_eager_fallback")) + { + _opSet.SetDefault(filenamePrefix, new HashSet()).Add(funcName); + } + } + } + } + } + } +} diff --git a/Tensorflow.CodeGen/Program.cs b/Tensorflow.CodeGen/Program.cs new file mode 100644 index 000000000..d46dcdcba --- /dev/null +++ b/Tensorflow.CodeGen/Program.cs @@ -0,0 +1,12 @@ +using OneOf.Types; +using Protobuf.Text; +using System.Diagnostics; +using System.Text; +using System.Xml.Linq; +using Tensorflow.CodeGen; + +GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops", + @"D:\Apps\miniconda3\envs\tf2.11\Lib\site-packages\tensorflow\python\ops", + @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\ops\ops.pbtxt"); + +writer.WriteAll(); diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj new file mode 100644 index 000000000..61273d013 --- /dev/null +++ b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -0,0 +1,18 @@ + + + + Exe + net6.0 + enable + enable + + + + + + + + + + + diff --git a/Tensorflow.CodeGen/Utils.cs b/Tensorflow.CodeGen/Utils.cs new file mode 100644 index 000000000..8cf21dee6 --- /dev/null +++ b/Tensorflow.CodeGen/Utils.cs @@ -0,0 +1,46 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection.Metadata.Ecma335; +using System.Text; +using System.Threading.Tasks; + +namespace Tensorflow.CodeGen +{ + public static class Utils + { + public static string ConvertToUnderscore(string input) + { + if (string.IsNullOrEmpty(input)) + { + return input; + } + + StringBuilder result = new StringBuilder(); + + int state = 0; // the previous char was not lowered. + for (int i = 0; i < input.Length; i++) + { + char current = input[i]; + + // 首字母不需要添加下划线 + if (i != 0 && char.IsUpper(current)) + { + if(state == 0) + { + result.Append("_"); + state = 1; + } + result.Append(char.ToLower(current)); + } + else + { + result.Append(char.ToLower(current)); + state = 0; + } + } + + return result.ToString(); + } + } +} From 2295a04ecd3af4b73383e4f17dec29b6e902ab3b Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Sun, 7 May 2023 22:49:57 +0800 Subject: [PATCH 2/5] fix: revise wrong behaviors of op code generator. --- Tensorflow.CodeGen/FunctionGenerator.cs | 284 +++++++++++++------ Tensorflow.CodeGen/GenOpsWriter.cs | 4 +- Tensorflow.CodeGen/OpClassifier.cs | 30 +- Tensorflow.CodeGen/Program.cs | 2 + Tensorflow.CodeGen/Tensorflow.CodeGen.csproj | 5 +- Tensorflow.CodeGen/Utils.cs | 15 +- 6 files changed, 242 insertions(+), 98 deletions(-) diff --git a/Tensorflow.CodeGen/FunctionGenerator.cs b/Tensorflow.CodeGen/FunctionGenerator.cs index d45203072..b3b695c58 100644 --- a/Tensorflow.CodeGen/FunctionGenerator.cs +++ b/Tensorflow.CodeGen/FunctionGenerator.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Diagnostics; using System.Linq; +using System.Linq.Expressions; using System.Reflection.Metadata.Ecma335; using System.Text; using System.Threading.Tasks; @@ -16,17 +17,17 @@ public void AppendFunction(OpDef op, StringBuilder sb) // TODO: add descriptions sb.Append("public static "); int outputArgsCount = op.OutputArg.Count; - if (outputArgsCount > 1) + if (outputArgsCount == 0) { - sb.Append("Tensor[] "); + sb.Append("Operation "); } - else if (outputArgsCount == 1) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) { sb.Append("Tensor "); } else { - sb.Append("Operation "); + sb.Append("Tensor[] "); } string funcName = Utils.ConvertToUnderscore(op.Name); var token = SyntaxFactory.ParseToken(funcName); @@ -42,6 +43,17 @@ public void AppendFunction(OpDef op, StringBuilder sb) // begin to write main body sb.AppendLine("var _ctx = tf.Context;"); + + var attrValueDic = GetAttrsDefaultValue(op, out var dynamicDefaultValues); + // deal with dynamic default values. + foreach(var (name, expr) in dynamicDefaultValues) + { + sb.AppendLine($"if({name} is null)"); + sb.AppendLine("{"); + sb.AppendLine($"{name} = {expr};"); + sb.AppendLine("}"); + } + sb.AppendLine("if(_ctx.executing_eagerly()){"); if(HasRefArgs(op)) @@ -58,7 +70,7 @@ public void AppendFunction(OpDef op, StringBuilder sb) { sb.AppendLine("return null;"); } - else if (outputArgsCount == 1) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) { sb.AppendLine("return _fast_path_result[0];"); } @@ -82,6 +94,17 @@ public void AppendFunction(OpDef op, StringBuilder sb) sb.AppendLine("}"); // if + foreach(var (name, type, value) in attrValueDic.Where(x => x.Item2 == "string")) + { + if(value != "NOVALUE") + { + sb.AppendLine($"if({name} is null)"); + sb.AppendLine("{"); + sb.AppendLine($"{name} = {value};"); + sb.AppendLine("}"); + } + } + // begin to use op helper. AppendOpHelperCall(op, sb); sb.AppendLine("var _result = _op.outputs;"); @@ -126,7 +149,7 @@ public void AppendFunction(OpDef op, StringBuilder sb) { sb.AppendLine("return _op;"); } - else if (outputArgsCount == 1) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) { sb.AppendLine("return _result[0];"); } @@ -160,8 +183,8 @@ public void AppendArgs(OpDef op, StringBuilder sb) sb.Append($"Tensor {argName}, "); } } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, (typeStr, value)) in attrValueDic) + var attrValueDic = GetAttrsDefaultValue(op, out var dynamicDefaultValues); + foreach (var (key, typeStr, value) in attrValueDic.Where(x => x.Item3 == "NOVALUE")) { var token = SyntaxFactory.ParseToken(key); string realKey = key; @@ -169,21 +192,25 @@ public void AppendArgs(OpDef op, StringBuilder sb) { realKey += "_"; } - if (value != "NOVALUE") - { - sb.Append($"{typeStr} {realKey} = {value}, "); - } - else + sb.Append($"{typeStr} {realKey}, "); + } + foreach (var (key, typeStr, value) in attrValueDic.Where(x => x.Item3 != "NOVALUE")) + { + var token = SyntaxFactory.ParseToken(key); + string realKey = key; + if (token.IsKeyword()) { - sb.Append($"{typeStr} {realKey}, "); + realKey += "_"; } + sb.Append($"{typeStr} {realKey} = {value}, "); } sb.Append($"string? name = null"); } public void AppendFastPathExecute(OpDef op, StringBuilder sb) { - sb.Append($"var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, \"{op.Name}\", name, "); + sb.Append($"var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, \"{op.Name}\", name)"); + sb.Append("{ args = new object[]{ "); foreach (var arg in op.InputArg) { string attrArgName = arg.Name; @@ -193,16 +220,23 @@ public void AppendFastPathExecute(OpDef op, StringBuilder sb) } sb.Append($"{attrArgName}, "); } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, _) in attrValueDic) + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') { - sb.Append($"\"{key}\", {key}, "); + sb.Remove(sb.Length - 2, 2); + } + + sb.Append("}, attrs = new Dictionary(){ "); + var attrValueDic = GetAttrsDefaultValue(op, out var _); + foreach (var (key, _, _) in attrValueDic) + { + sb.Append($"[\"{key}\"] = {key}, "); } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') { sb.Remove(sb.Length - 2, 2); } - sb.Append("));\n"); + sb.Append("}});\n"); } public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) @@ -218,8 +252,8 @@ public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) } sb.Append($"{inputArgRealName}, "); } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, _) in attrValueDic) + var attrValueDic = GetAttrsDefaultValue(op, out var _); + foreach (var (key, _, _) in attrValueDic) { string keyRealName = key; if (SyntaxFactory.ParseToken(keyRealName).IsKeyword()) @@ -233,11 +267,19 @@ public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) { - sb.Append("public static Tensor"); + sb.Append("public static "); int outputArgsCount = op.OutputArg.Count; - if (outputArgsCount > 1) + if (outputArgsCount == 0) + { + sb.Append("Operation "); + } + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) + { + sb.Append("Tensor "); + } + else { - sb.Append("[]"); + sb.Append("Tensor[] "); } string opName = op.Name; string funcName = Utils.ConvertToUnderscore(op.Name); @@ -254,24 +296,47 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) return; } - sb.Append("Tensor[] _inputs_flat = new Tensor[]{"); - foreach (var arg in op.InputArg) + if(op.InputArg.Any(x => !string.IsNullOrEmpty(x.NumberAttr))) { - string realArgName = arg.Name; - if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + sb.AppendLine("List _inputs_flat_list = new();"); + foreach (var arg in op.InputArg) { - realArgName = $"{realArgName}_"; + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + if (string.IsNullOrEmpty(arg.NumberAttr)) + { + sb.AppendLine($"_inputs_flat_list.Add({realArgName});"); + } + else + { + sb.AppendLine($"_inputs_flat_list.AddRange({realArgName});"); + } } - sb.Append($"{realArgName}, "); + sb.AppendLine($"var _inputs_flat = _inputs_flat_list.ToArray();"); } - if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + else { - sb.Remove(sb.Length - 2, 2); + sb.Append("Tensor[] _inputs_flat = new Tensor[]{"); + foreach (var arg in op.InputArg) + { + string realArgName = arg.Name; + if (SyntaxFactory.ParseToken(realArgName).IsKeyword()) + { + realArgName = $"{realArgName}_"; + } + sb.Append($"{realArgName}, "); + } + if (sb[sb.Length - 1] == ' ' && sb[sb.Length - 2] == ',') + { + sb.Remove(sb.Length - 2, 2); + } + sb.Append("};\n"); } - sb.Append("};\n"); sb.Append("object[] _attrs = new object[]{"); - var attrValueDic = GetAttrsDefaultValue(op); foreach (var attr in op.Attr) { if (attr.Type == "type") @@ -293,27 +358,15 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) } if (!found) { - if (attr.Name.StartsWith("T") && attr.Name.Length > 1) - { - string paramName = attr.Name.Substring(1); - if (SyntaxFactory.ParseToken(paramName).IsKeyword()) - { - paramName = $"{paramName}_"; - } - sb.Append($"\"{attr.Name}\", {paramName}.dtype, "); - } - else + string attrRealName = attr.Name; + if (SyntaxFactory.ParseToken(attrRealName).IsKeyword()) { - string attrRealName = attr.Name; - if (SyntaxFactory.ParseToken(attrRealName).IsKeyword()) - { - attrRealName = $"{attrRealName}_"; - } - sb.Append($"\"{attr.Name}\", {attrRealName}, "); + attrRealName = $"{attrRealName}_"; } + sb.Append($"\"{attr.Name}\", {attrRealName}, "); } } - else if(attr.Type == "int" && (op.InputArg.Any(x => x.NumberAttr == attr.Name) || op.OutputArg.Any(x => x.NumberAttr == attr.Name))) + else if(attr.Type == "int" && op.InputArg.Any(x => x.NumberAttr == attr.Name)) { bool found = false; foreach (var arg in op.InputArg) @@ -355,7 +408,7 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb) { sb.AppendLine("return null;"); } - else if (outputArgsCount == 1) + else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)) { sb.AppendLine("return _result[0];"); } @@ -386,8 +439,8 @@ public void AppendFallBackFunctionArgs(OpDef op, StringBuilder sb) sb.Append($"Tensor {argName}, "); } } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, (typeStr, _)) in attrValueDic) + var attrValueDic = GetAttrsDefaultValue(op, out var _); + foreach (var (key, typeStr, _) in attrValueDic) { var token = SyntaxFactory.ParseToken(key); string realKey = key; @@ -412,18 +465,19 @@ public void AppendOpHelperCall(OpDef op, StringBuilder sb) } sb.AppendLine($"keywords[\"{arg.Name}\"] = {realArgName};"); } - var attrValueDic = GetAttrsDefaultValue(op); - foreach (var (key, _) in attrValueDic) + var attrValueDic = GetAttrsDefaultValue(op, out var _); + foreach (var (key, _, _) in attrValueDic) { - sb.Append($"keywords[\"{key}\"] = {key};"); + sb.AppendLine($"keywords[\"{key}\"] = {key};"); } sb.AppendLine($"var _op = tf.OpDefLib._apply_op_helper(\"{op.Name}\", name, keywords);"); } - // key, (type string, default value) - public Dictionary GetAttrsDefaultValue(OpDef op) + // name, type string, default value + public List<(string, string, string)> GetAttrsDefaultValue(OpDef op, out Dictionary dynamicDefaultValues) { - Dictionary dic = new(); + dynamicDefaultValues = new(); + List<(string, string, string)> res = new(); foreach (var attr in op.Attr) { if (attr.Type == "type") @@ -435,111 +489,177 @@ public void AppendOpHelperCall(OpDef op, StringBuilder sb) { string name = Enum.GetName(typeof(TF_DataType), attr.DefaultValue.Type.as_tf_dtype()); string enumPath = typeof(TF_DataType).Name + "." + name; - dic[attr.Name] = ("TF_DataType", enumPath); + res.Add((attr.Name, "TF_DataType", enumPath)); } else { - dic[attr.Name] = ("TF_DataType", "NOVALUE"); + res.Add((attr.Name, "TF_DataType", "NOVALUE")); } } } else if (attr.Type == "int") { - if(op.InputArg.Any(x => x.NumberAttr == attr.Name) || op.OutputArg.Any(x => x.NumberAttr == attr.Name)) + if(op.InputArg.Any(x => x.NumberAttr == attr.Name)) { continue; } if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.I) { - dic[attr.Name] = ("int", attr.DefaultValue.I.ToString()); + res.Add((attr.Name, "int", attr.DefaultValue.I.ToString())); } else { - dic[attr.Name] = ("int", "0"); + res.Add((attr.Name, "int", "0")); } } else if (attr.Type == "float") { if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.F) { - dic[attr.Name] = ("float", attr.DefaultValue.F.ToString() + "f"); + res.Add((attr.Name, "float", attr.DefaultValue.F.ToString() + "f")); } else { - dic[attr.Name] = ("float", "NOVALUE"); + res.Add((attr.Name, "float", "NOVALUE")); } } else if (attr.Type == "string") { if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) { - dic[attr.Name] = ("string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\""); + res.Add((attr.Name, "string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\"")); } else { - dic[attr.Name] = ("string", "NOVALUE"); + res.Add((attr.Name, "string", "NOVALUE")); } } else if (attr.Type == "bool") { if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.B) { - dic[attr.Name] = ("bool", attr.DefaultValue.B.ToString().ToLower()); + res.Add((attr.Name, "bool", attr.DefaultValue.B.ToString().ToLower())); } else { - dic[attr.Name] = ("bool", "NOVALUE"); + res.Add((attr.Name, "bool", "NOVALUE")); } } else if (attr.Type == "shape") { if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Shape) { - dic[attr.Name] = ("Shape", $"null"); + if (attr.DefaultValue.Shape.UnknownRank) + { + res.Add((attr.Name, "Shape", $"null")); + } + else + { + Shape shape = new Shape(attr.DefaultValue.Shape); + string expression = $"new Shape({string.Join(", ", shape.dims)})"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "Shape", $"null")); + } } else { - dic[attr.Name] = ("Shape", "NOVALUE"); + res.Add((attr.Name, "Shape", "NOVALUE")); } } else if (attr.Type == "list(type)") { - dic[attr.Name] = ("TF_DataType[]", "NOVALUE"); + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.Type) + { + values.Add(value.as_tf_dtype()); + } + string expression = "new TF_DataType[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "TF_DataType[]", $"null")); + } + else + { + res.Add((attr.Name, "TF_DataType[]", "NOVALUE")); + } } else if (attr.Type == "list(shape)") { - dic[attr.Name] = ("Shape[]", "NOVALUE"); + res.Add((attr.Name, "Shape[]", "NOVALUE")); } else if (attr.Type == "list(string)") { - dic[attr.Name] = ("string[]", "NOVALUE"); + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.S) + { + values.Add(value.ToStringUtf8()); + } + string expression = "new string[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "string[]", $"null")); + } + else + { + res.Add((attr.Name, "string[]", "NOVALUE")); + } } else if (attr.Type == "list(int)") { - dic[attr.Name] = ("int[]", "NOVALUE"); + if(attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List values = new(); + foreach(var value in attr.DefaultValue.List.I) + { + values.Add((int)value); + } + string expression = "new int[]{" + $"{string.Join(", ", values)}" +"}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "int[]", $"null")); + } + else + { + res.Add((attr.Name, "int[]", "NOVALUE")); + } } else if (attr.Type == "list(float)") { - dic[attr.Name] = ("float[]", "NOVALUE"); + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.F) + { + values.Add(value); + } + string expression = "new float[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "float[]", $"null")); + } + else + { + res.Add((attr.Name, "float[]", "NOVALUE")); + } } else if (attr.Type == "func") { - dic[attr.Name] = ("Func", "NOVALUE"); + res.Add((attr.Name, "Func", "NOVALUE")); } else if (attr.Type == "list(func)") { - dic[attr.Name] = ("Func[]", "NOVALUE"); + res.Add((attr.Name, "Func[]", "NOVALUE")); } else if (attr.Type == "tensor") { - dic[attr.Name] = ("TensorProto", "NOVALUE"); + res.Add((attr.Name, "TensorProto", "NOVALUE")); } else { throw new NotImplementedException(); } } - return dic; + return res; } private static bool HasRefArgs(OpDef op) diff --git a/Tensorflow.CodeGen/GenOpsWriter.cs b/Tensorflow.CodeGen/GenOpsWriter.cs index 83ca6e0b9..2cd7bca50 100644 --- a/Tensorflow.CodeGen/GenOpsWriter.cs +++ b/Tensorflow.CodeGen/GenOpsWriter.cs @@ -21,7 +21,7 @@ public GenOpsWriter(string basePath, string pythonFilesDirectory, string opDefFi var opDefs = ReadAllOpDefs(opDefFilename); _opMap = opDefs.Op.ToDictionary( x => Tensorflow.CodeGen.Utils.ConvertToUnderscore(x.Name), x => x); - _opClassifier = new OpClassifier(pythonFilesDirectory); + _opClassifier = new OpClassifier(pythonFilesDirectory, opDefs.Op.Select(x => Utils.ConvertToUnderscore(x.Name))); } public void WriteAll() @@ -45,7 +45,7 @@ public void WriteAll() sb.AppendLine(); // Write class name - sb.AppendLine($"internal static class {target}"); + sb.AppendLine($"public static class {target}"); sb.AppendLine("{"); foreach(var funcName in set) diff --git a/Tensorflow.CodeGen/OpClassifier.cs b/Tensorflow.CodeGen/OpClassifier.cs index 2ea2f35ef..eaad3fec8 100644 --- a/Tensorflow.CodeGen/OpClassifier.cs +++ b/Tensorflow.CodeGen/OpClassifier.cs @@ -10,27 +10,39 @@ namespace Tensorflow.CodeGen public class OpClassifier { private static readonly string _filenamePattern = @"^gen_[a-z]*_ops.py$"; - private static readonly string _pythonFunctionPattern = @"def\s+(\w+)\((?:\s*\w+\s*(?:=\s*[\S]*)*,\s*)*\s*\w+\s*=None\s*\):"; + private static readonly string _pythonFunctionPattern = @"def\s+(\w+\d*\w*)\((?:\s*\w+\s*(?:=\s*[\S]*)*,\s*)*\s*name=None\):"; private Dictionary> _opSet = new(); public Dictionary> OpSet => _opSet; - public OpClassifier(string pythonFileFolder) + public OpClassifier(string pythonFileFolder, IEnumerable funcNames) { DirectoryInfo directory = new DirectoryInfo(pythonFileFolder); + Dictionary fileContentMap = new(); foreach (FileInfo file in directory.GetFiles()) { if (Regex.IsMatch(file.Name, _filenamePattern)) { + Console.WriteLine(file.Name); string filenamePrefix = file.Name.Split('.')[0]; string content = File.ReadAllText(file.FullName); - var matches = Regex.Matches(content, _pythonFunctionPattern); - foreach(Match match in matches) + fileContentMap[filenamePrefix] = content; + } + } + + foreach(var funcName in funcNames) + { + Console.WriteLine(funcName); + string funcPattern = @$"^def\s+{funcName}\("; + string fallbackFuncPattern = @$"^def\s+{funcName}_eager_fallback\("; + foreach (var (target, content) in fileContentMap) + { + if(content.Contains($"def {funcName}") && content.Contains($"def {funcName}_eager_fallback")) + { + _opSet.SetDefault(target, new HashSet()).Add(funcName); + } + else if (content.Contains($"def _{funcName}") && content.Contains($"def _{funcName}_eager_fallback")) { - var funcName = match.Groups[1].Value; - if (!funcName.EndsWith("_eager_fallback")) - { - _opSet.SetDefault(filenamePrefix, new HashSet()).Add(funcName); - } + _opSet.SetDefault(target, new HashSet()).Add(funcName); } } } diff --git a/Tensorflow.CodeGen/Program.cs b/Tensorflow.CodeGen/Program.cs index d46dcdcba..a26031cb3 100644 --- a/Tensorflow.CodeGen/Program.cs +++ b/Tensorflow.CodeGen/Program.cs @@ -5,6 +5,8 @@ using System.Xml.Linq; using Tensorflow.CodeGen; +//Console.WriteLine(Utils.ConvertToUnderscore("LRN")); + GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops", @"D:\Apps\miniconda3\envs\tf2.11\Lib\site-packages\tensorflow\python\ops", @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\ops\ops.pbtxt"); diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index 61273d013..a052eb692 100644 --- a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -1,4 +1,4 @@ - + Exe @@ -9,10 +9,11 @@ + - + diff --git a/Tensorflow.CodeGen/Utils.cs b/Tensorflow.CodeGen/Utils.cs index 8cf21dee6..608222e01 100644 --- a/Tensorflow.CodeGen/Utils.cs +++ b/Tensorflow.CodeGen/Utils.cs @@ -18,15 +18,24 @@ public static string ConvertToUnderscore(string input) StringBuilder result = new StringBuilder(); - int state = 0; // the previous char was not lowered. + int state = 1; // the previous char was not lowered. for (int i = 0; i < input.Length; i++) { char current = input[i]; // 首字母不需要添加下划线 - if (i != 0 && char.IsUpper(current)) + if (char.IsUpper(current)) { - if(state == 0) + if(i > 0) + { + char pre = input[i - 1]; + if (char.IsDigit(pre)) + { + result.Append(char.ToLower(current)); + continue; + } + } + if (state == 0) { result.Append("_"); state = 1; From 28642568a22a242dbbddd375472ee1aeb90e7dce Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Mon, 8 May 2023 01:57:18 +0800 Subject: [PATCH 3/5] feat: description generator of op code. --- Tensorflow.CodeGen/DescriptionGenerator.cs | 263 +++++++++++++++++++ Tensorflow.CodeGen/FunctionGenerator.cs | 201 +------------- Tensorflow.CodeGen/GenOpsWriter.cs | 26 +- Tensorflow.CodeGen/Program.cs | 3 +- Tensorflow.CodeGen/Tensorflow.CodeGen.csproj | 2 +- Tensorflow.CodeGen/Utils.cs | 199 +++++++++++++- 6 files changed, 482 insertions(+), 212 deletions(-) create mode 100644 Tensorflow.CodeGen/DescriptionGenerator.cs diff --git a/Tensorflow.CodeGen/DescriptionGenerator.cs b/Tensorflow.CodeGen/DescriptionGenerator.cs new file mode 100644 index 000000000..0437370a1 --- /dev/null +++ b/Tensorflow.CodeGen/DescriptionGenerator.cs @@ -0,0 +1,263 @@ +using Microsoft.CodeAnalysis.CSharp; +using Protobuf.Text; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection.Metadata.Ecma335; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading.Tasks; + +namespace Tensorflow.CodeGen +{ + public class DescriptionGenerator + { + private static readonly string replaceStrInner = "~~%~~"; + private static readonly string replaceStrInnerQuotationMarks = "^%^"; + Dictionary> _opDescriptions = new Dictionary>(); + Dictionary _opDescriptionDefs = new Dictionary(); + public DescriptionGenerator(string apiDefDirectory) + { + DirectoryInfo directory = new DirectoryInfo(apiDefDirectory); + + int errors = 0; + foreach (FileInfo file in directory.GetFiles()) + { + string target = file.Name.Split('.')[0].Split('_').Last(); + OpDef op = null; + try + { + op = ReadOpDefs(file.FullName).Op[0]; + } + catch + { + errors++; + continue; + } + _opDescriptionDefs[target] = op; + _opDescriptions[target] = new Dictionary(); + foreach (var arg in op.InputArg) + { + string argName = arg.Name; + var token = SyntaxFactory.ParseToken(argName); + if (token.IsKeyword()) + { + argName = $"{argName}_"; + } + _opDescriptions[target][argName] = arg.Description ?? ""; + } + foreach (var arg in op.Attr) + { + var token = SyntaxFactory.ParseToken(arg.Name); + string realKey = arg.Name; + if (token.IsKeyword()) + { + realKey += "_"; + } + _opDescriptions[target][realKey] = arg.Description ?? ""; + } + _opDescriptions[target]["SUMMARY"] = op.Summary ?? ""; + _opDescriptions[target]["DESC"] = op.Description ?? ""; + } + Console.WriteLine($"Warning: {errors} description files cannot be analyzed! Please revise it if " + + $"the failed files number is large, or ignore it."); + } + + /// + /// + /// + /// + /// + public void AppendDescription(OpDef fullOp, StringBuilder sb) + { + var opName = fullOp.Name; + if(_opDescriptions.TryGetValue(opName, out var op)) + { + var def = _opDescriptionDefs[opName]; + sb.AppendLine("/// "); + sb.AppendLine($"/// {op["SUMMARY"]}"); + sb.AppendLine("/// "); + + string totalDesc = op["DESC"]; + if (!string.IsNullOrEmpty(totalDesc)) + { + totalDesc = totalDesc.Replace(replaceStrInnerQuotationMarks, "\""); + sb.AppendLine("/// "); + string[] lines = totalDesc.Split(replaceStrInner); + foreach (var line in lines) + { + sb.AppendLine($"/// {line}"); + } + sb.AppendLine("/// "); + } + + var argNames = GetInputArgNames(fullOp); + foreach (var argName in argNames) + { + if(op.TryGetValue(argName, out var desc)) + { + desc = desc.Replace(replaceStrInnerQuotationMarks, "\""); + string[] lines = desc.Split(replaceStrInner); + sb.AppendLine($"/// "); + foreach (var line in lines) + { + sb.AppendLine($"/// {line}"); + } + sb.AppendLine("/// "); + } + else + { + sb.AppendLine($"/// "); + } + } + + List returnValueDescs = new(); + foreach (var arg in def.OutputArg) + { + if (!string.IsNullOrEmpty(arg.Description)) + { + returnValueDescs.Add($"{arg.Name}: {arg.Description}"); + } + } + string returnValueDesc = ""; + if (returnValueDescs.Count > 0) + { + returnValueDesc = string.Join(" && ", returnValueDescs); + } + sb.AppendLine($"/// {returnValueDesc}"); + } + else + { + sb.AppendLine("/// "); + sb.AppendLine($"///"); + sb.AppendLine("/// "); + + var argNames = GetInputArgNames(fullOp); + foreach (var argName in argNames) + { + sb.AppendLine($"/// "); + } + + sb.AppendLine($"/// "); + } + } + + /// + /// + /// + /// + /// + /// + /// + public List GetInputArgNames(OpDef op) + { + List names = new(); + foreach (var arg in op.InputArg) + { + string argName = arg.Name; + var token = SyntaxFactory.ParseToken(argName); + if (token.IsKeyword()) + { + argName = $"{argName}_"; + } + names.Add(argName); + } + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var dynamicDefaultValues); + foreach (var (key, typeStr, value) in attrValueDic) + { + var token = SyntaxFactory.ParseToken(key); + string realKey = key; + if (token.IsKeyword()) + { + realKey += "_"; + } + names.Add(realKey); + } + return names; + } + + private static OpList ReadOpDefs(string path) + { + var text = File.ReadAllText(path); + text = RemoveLintTags(text); + text = PreProcessText(text); + + string pattern = @"< { + string matchedText = match.Value; + string innerText = match.Groups[1].Value; + innerText = innerText.Replace("\"", replaceStrInnerQuotationMarks) + .Replace("\r\n", replaceStrInner).Replace("\n", replaceStrInner); // 替换内部换行符 + return replaceStrPrefix + innerText + replaceStrSuffix; // 替换首尾 + }, RegexOptions.Multiline); + + var opDefs = new TextParser(TextParser.Settings.Default.WithIgnoreUnknownFields(true)).Parse(replacedText); + return opDefs; + } + + static string PreProcessText(string input) + { + int depth = 0; + int endBlockDepth = -1; + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < input.Length; i++) + { + char c = input[i]; + if (c == '{') + { + depth++; + sb.Append(c); + } + else if (c == '}') + { + if (depth == endBlockDepth) + { + sb.Append("END\n"); + endBlockDepth = -1; + } + sb.Append(c); + depth--; + } + else if (c == '<' && i + 5 < input.Length && input.Substring(i, 5) == "< x.Item3 == "NOVALUE")) { var token = SyntaxFactory.ParseToken(key); @@ -226,7 +226,7 @@ public void AppendFastPathExecute(OpDef op, StringBuilder sb) } sb.Append("}, attrs = new Dictionary(){ "); - var attrValueDic = GetAttrsDefaultValue(op, out var _); + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var _); foreach (var (key, _, _) in attrValueDic) { sb.Append($"[\"{key}\"] = {key}, "); @@ -252,7 +252,7 @@ public void AppendEagerFallbackCall(OpDef op, StringBuilder sb) } sb.Append($"{inputArgRealName}, "); } - var attrValueDic = GetAttrsDefaultValue(op, out var _); + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var _); foreach (var (key, _, _) in attrValueDic) { string keyRealName = key; @@ -439,7 +439,7 @@ public void AppendFallBackFunctionArgs(OpDef op, StringBuilder sb) sb.Append($"Tensor {argName}, "); } } - var attrValueDic = GetAttrsDefaultValue(op, out var _); + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var _); foreach (var (key, typeStr, _) in attrValueDic) { var token = SyntaxFactory.ParseToken(key); @@ -465,7 +465,7 @@ public void AppendOpHelperCall(OpDef op, StringBuilder sb) } sb.AppendLine($"keywords[\"{arg.Name}\"] = {realArgName};"); } - var attrValueDic = GetAttrsDefaultValue(op, out var _); + var attrValueDic = Utils.GetAttrsDefaultValue(op, out var _); foreach (var (key, _, _) in attrValueDic) { sb.AppendLine($"keywords[\"{key}\"] = {key};"); @@ -473,195 +473,6 @@ public void AppendOpHelperCall(OpDef op, StringBuilder sb) sb.AppendLine($"var _op = tf.OpDefLib._apply_op_helper(\"{op.Name}\", name, keywords);"); } - // name, type string, default value - public List<(string, string, string)> GetAttrsDefaultValue(OpDef op, out Dictionary dynamicDefaultValues) - { - dynamicDefaultValues = new(); - List<(string, string, string)> res = new(); - foreach (var attr in op.Attr) - { - if (attr.Type == "type") - { - bool found = op.InputArg.Any(x => x.TypeAttr == attr.Name); - if (!found) - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) - { - string name = Enum.GetName(typeof(TF_DataType), attr.DefaultValue.Type.as_tf_dtype()); - string enumPath = typeof(TF_DataType).Name + "." + name; - res.Add((attr.Name, "TF_DataType", enumPath)); - } - else - { - res.Add((attr.Name, "TF_DataType", "NOVALUE")); - } - } - } - else if (attr.Type == "int") - { - if(op.InputArg.Any(x => x.NumberAttr == attr.Name)) - { - continue; - } - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.I) - { - res.Add((attr.Name, "int", attr.DefaultValue.I.ToString())); - } - else - { - res.Add((attr.Name, "int", "0")); - } - } - else if (attr.Type == "float") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.F) - { - res.Add((attr.Name, "float", attr.DefaultValue.F.ToString() + "f")); - } - else - { - res.Add((attr.Name, "float", "NOVALUE")); - } - } - else if (attr.Type == "string") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) - { - res.Add((attr.Name, "string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\"")); - } - else - { - res.Add((attr.Name, "string", "NOVALUE")); - } - } - else if (attr.Type == "bool") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.B) - { - res.Add((attr.Name, "bool", attr.DefaultValue.B.ToString().ToLower())); - } - else - { - res.Add((attr.Name, "bool", "NOVALUE")); - } - } - else if (attr.Type == "shape") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Shape) - { - if (attr.DefaultValue.Shape.UnknownRank) - { - res.Add((attr.Name, "Shape", $"null")); - } - else - { - Shape shape = new Shape(attr.DefaultValue.Shape); - string expression = $"new Shape({string.Join(", ", shape.dims)})"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "Shape", $"null")); - } - } - else - { - res.Add((attr.Name, "Shape", "NOVALUE")); - } - } - else if (attr.Type == "list(type)") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) - { - List values = new(); - foreach (var value in attr.DefaultValue.List.Type) - { - values.Add(value.as_tf_dtype()); - } - string expression = "new TF_DataType[]{" + $"{string.Join(", ", values)}" + "}"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "TF_DataType[]", $"null")); - } - else - { - res.Add((attr.Name, "TF_DataType[]", "NOVALUE")); - } - } - else if (attr.Type == "list(shape)") - { - res.Add((attr.Name, "Shape[]", "NOVALUE")); - } - else if (attr.Type == "list(string)") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) - { - List values = new(); - foreach (var value in attr.DefaultValue.List.S) - { - values.Add(value.ToStringUtf8()); - } - string expression = "new string[]{" + $"{string.Join(", ", values)}" + "}"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "string[]", $"null")); - } - else - { - res.Add((attr.Name, "string[]", "NOVALUE")); - } - } - else if (attr.Type == "list(int)") - { - if(attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) - { - List values = new(); - foreach(var value in attr.DefaultValue.List.I) - { - values.Add((int)value); - } - string expression = "new int[]{" + $"{string.Join(", ", values)}" +"}"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "int[]", $"null")); - } - else - { - res.Add((attr.Name, "int[]", "NOVALUE")); - } - } - else if (attr.Type == "list(float)") - { - if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) - { - List values = new(); - foreach (var value in attr.DefaultValue.List.F) - { - values.Add(value); - } - string expression = "new float[]{" + $"{string.Join(", ", values)}" + "}"; - dynamicDefaultValues[attr.Name] = expression; - res.Add((attr.Name, "float[]", $"null")); - } - else - { - res.Add((attr.Name, "float[]", "NOVALUE")); - } - } - else if (attr.Type == "func") - { - res.Add((attr.Name, "Func", "NOVALUE")); - } - else if (attr.Type == "list(func)") - { - res.Add((attr.Name, "Func[]", "NOVALUE")); - } - else if (attr.Type == "tensor") - { - res.Add((attr.Name, "TensorProto", "NOVALUE")); - } - else - { - throw new NotImplementedException(); - } - } - return res; - } - private static bool HasRefArgs(OpDef op) { return op.InputArg.Any(x => x.IsRef); diff --git a/Tensorflow.CodeGen/GenOpsWriter.cs b/Tensorflow.CodeGen/GenOpsWriter.cs index 2cd7bca50..7601acdbb 100644 --- a/Tensorflow.CodeGen/GenOpsWriter.cs +++ b/Tensorflow.CodeGen/GenOpsWriter.cs @@ -12,16 +12,18 @@ public class GenOpsWriter private string _basePath; private Dictionary _opMap; private OpClassifier _opClassifier; - private FunctionGenerator _g = new(); + private FunctionGenerator _fg = new(); + private DescriptionGenerator _dg; - public GenOpsWriter(string basePath, string pythonFilesDirectory, string opDefFilename) + public GenOpsWriter(string basePath, string pythonFilesDirectory, string apiDefFilesDirectory, string opDefFilename) { _basePath = basePath; - var opDefs = ReadAllOpDefs(opDefFilename); + var opDefs = Utils.ReadAllOpDefs(opDefFilename); _opMap = opDefs.Op.ToDictionary( - x => Tensorflow.CodeGen.Utils.ConvertToUnderscore(x.Name), x => x); + x => Utils.ConvertToUnderscore(x.Name), x => x); _opClassifier = new OpClassifier(pythonFilesDirectory, opDefs.Op.Select(x => Utils.ConvertToUnderscore(x.Name))); + _dg = new DescriptionGenerator(apiDefFilesDirectory); } public void WriteAll() @@ -53,12 +55,17 @@ public void WriteAll() if(_opMap.ContainsKey(funcName)) { var opDef = _opMap[funcName]; - _g.AppendFunction(opDef, sb); + + // write the descriptions. + _dg.AppendDescription(opDef, sb); + + // write the function body. + _fg.AppendFunction(opDef, sb); } else if (funcName.StartsWith("_")) { var opDef = _opMap[funcName.Substring(1)]; - _g.AppendFunction(opDef, sb); + _fg.AppendFunction(opDef, sb); } } @@ -69,12 +76,5 @@ public void WriteAll() File.WriteAllText(fullFilePath, sb.ToString()); } } - - private OpList ReadAllOpDefs(string path) - { - var text = File.ReadAllText(path); - var opDefs = OpList.Parser.ParseText(text); - return opDefs; - } } } diff --git a/Tensorflow.CodeGen/Program.cs b/Tensorflow.CodeGen/Program.cs index a26031cb3..f9d44ce83 100644 --- a/Tensorflow.CodeGen/Program.cs +++ b/Tensorflow.CodeGen/Program.cs @@ -5,10 +5,9 @@ using System.Xml.Linq; using Tensorflow.CodeGen; -//Console.WriteLine(Utils.ConvertToUnderscore("LRN")); - GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops", @"D:\Apps\miniconda3\envs\tf2.11\Lib\site-packages\tensorflow\python\ops", + @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\api_def\base_api", @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\ops\ops.pbtxt"); writer.WriteAll(); diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index a052eb692..865db126b 100644 --- a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -9,11 +9,11 @@ - + diff --git a/Tensorflow.CodeGen/Utils.cs b/Tensorflow.CodeGen/Utils.cs index 608222e01..d3f30d9f2 100644 --- a/Tensorflow.CodeGen/Utils.cs +++ b/Tensorflow.CodeGen/Utils.cs @@ -1,4 +1,5 @@ -using System; +using Protobuf.Text; +using System; using System.Collections.Generic; using System.Linq; using System.Reflection.Metadata.Ecma335; @@ -51,5 +52,201 @@ public static string ConvertToUnderscore(string input) return result.ToString(); } + + public static OpList ReadAllOpDefs(string path) + { + var text = File.ReadAllText(path); + var opDefs = OpList.Parser.ParseText(text); + return opDefs; + } + + // name, type string, default value + public static List<(string, string, string)> GetAttrsDefaultValue(OpDef op, out Dictionary dynamicDefaultValues) + { + dynamicDefaultValues = new(); + List<(string, string, string)> res = new(); + foreach (var attr in op.Attr) + { + if (attr.Type == "type") + { + bool found = op.InputArg.Any(x => x.TypeAttr == attr.Name); + if (!found) + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) + { + string name = Enum.GetName(typeof(TF_DataType), attr.DefaultValue.Type.as_tf_dtype()); + string enumPath = typeof(TF_DataType).Name + "." + name; + res.Add((attr.Name, "TF_DataType", enumPath)); + } + else + { + res.Add((attr.Name, "TF_DataType", "NOVALUE")); + } + } + } + else if (attr.Type == "int") + { + if (op.InputArg.Any(x => x.NumberAttr == attr.Name)) + { + continue; + } + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.I) + { + res.Add((attr.Name, "int", attr.DefaultValue.I.ToString())); + } + else + { + res.Add((attr.Name, "int", "0")); + } + } + else if (attr.Type == "float") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.F) + { + res.Add((attr.Name, "float", attr.DefaultValue.F.ToString() + "f")); + } + else + { + res.Add((attr.Name, "float", "NOVALUE")); + } + } + else if (attr.Type == "string") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + { + res.Add((attr.Name, "string", $"\"{attr.DefaultValue.S.ToStringUtf8()}\"")); + } + else + { + res.Add((attr.Name, "string", "NOVALUE")); + } + } + else if (attr.Type == "bool") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.B) + { + res.Add((attr.Name, "bool", attr.DefaultValue.B.ToString().ToLower())); + } + else + { + res.Add((attr.Name, "bool", "NOVALUE")); + } + } + else if (attr.Type == "shape") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Shape) + { + if (attr.DefaultValue.Shape.UnknownRank) + { + res.Add((attr.Name, "Shape", $"null")); + } + else + { + Shape shape = new Shape(attr.DefaultValue.Shape); + string expression = $"new Shape({string.Join(", ", shape.dims)})"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "Shape", $"null")); + } + } + else + { + res.Add((attr.Name, "Shape", "NOVALUE")); + } + } + else if (attr.Type == "list(type)") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.Type) + { + values.Add(value.as_tf_dtype()); + } + string expression = "new TF_DataType[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "TF_DataType[]", $"null")); + } + else + { + res.Add((attr.Name, "TF_DataType[]", "NOVALUE")); + } + } + else if (attr.Type == "list(shape)") + { + res.Add((attr.Name, "Shape[]", "NOVALUE")); + } + else if (attr.Type == "list(string)") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.S) + { + values.Add(value.ToStringUtf8()); + } + string expression = "new string[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "string[]", $"null")); + } + else + { + res.Add((attr.Name, "string[]", "NOVALUE")); + } + } + else if (attr.Type == "list(int)") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.I) + { + values.Add((int)value); + } + string expression = "new int[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "int[]", $"null")); + } + else + { + res.Add((attr.Name, "int[]", "NOVALUE")); + } + } + else if (attr.Type == "list(float)") + { + if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List) + { + List values = new(); + foreach (var value in attr.DefaultValue.List.F) + { + values.Add(value); + } + string expression = "new float[]{" + $"{string.Join(", ", values)}" + "}"; + dynamicDefaultValues[attr.Name] = expression; + res.Add((attr.Name, "float[]", $"null")); + } + else + { + res.Add((attr.Name, "float[]", "NOVALUE")); + } + } + else if (attr.Type == "func") + { + res.Add((attr.Name, "Func", "NOVALUE")); + } + else if (attr.Type == "list(func)") + { + res.Add((attr.Name, "Func[]", "NOVALUE")); + } + else if (attr.Type == "tensor") + { + res.Add((attr.Name, "TensorProto", "NOVALUE")); + } + else + { + throw new NotImplementedException(); + } + } + return res; + } } } From 970ab41ef7f9edd5a86cf7f7052638cd97afbc26 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Mon, 8 May 2023 02:00:08 +0800 Subject: [PATCH 4/5] refactor: gen_nn_ops, gen_math_ops, gen_array_ops and related codes. --- src/TensorFlowNET.Console/MemoryBasicTest.cs | 4 +- src/TensorFlowNET.Core/APIs/tf.array.cs | 16 +- src/TensorFlowNET.Core/APIs/tf.math.cs | 39 +- src/TensorFlowNET.Core/APIs/tf.nn.cs | 25 +- src/TensorFlowNET.Core/APIs/tf.reshape.cs | 2 +- src/TensorFlowNET.Core/APIs/tf.tensor.cs | 8 +- src/TensorFlowNET.Core/APIs/tf.tile.cs | 2 +- .../Attributes/c_api.ops.cs | 15 + .../_InitializeClustersOpFactory.cs | 2 +- .../Contexts/Context.ExecuteOp.cs | 2 +- .../Eager/EagerRunner.TFE_FastPathExecute.cs | 4 +- .../Eager/FastPathOpExecInfo.cs | 3 +- src/TensorFlowNET.Core/Eager/execute.cs | 12 +- .../Functions/EagerDefinedFunction.cs | 2 +- .../Gradients/GradientTape.cs | 9 + .../Gradients/array_grad.cs | 15 +- src/TensorFlowNET.Core/Gradients/math_grad.cs | 19 +- .../Gradients/math_grad_eager.cs | 4 +- src/TensorFlowNET.Core/Gradients/nn_grad.cs | 48 +- .../Operations/NnOps/AveragePoolFunction.cs | 2 +- .../Operations/NnOps/ConvolutionInternal.cs | 38 +- .../Operations/NnOps/gen_nn_ops.cs | 373 - .../Operations/OpDefLibrary.cs | 5 + .../Operations/Operation.cs | 64 +- .../Operations/array_ops.cs | 86 +- .../Operations/dataset_ops.cs | 4 +- .../Operations/gen_array_ops.cs | 10688 +++++++++++++++- .../Operations/gen_functional_ops.cs | 12 +- .../Operations/gen_io_ops.cs | 1378 ++ .../Operations/gen_logging_ops.cs | 2 +- .../Operations/gen_math_ops.cs | 10018 ++++++++++++++- .../Operations/gen_math_ops.eager.cs | 11 - .../Operations/gen_nn_ops.cs | 8084 ++++++++++++ src/TensorFlowNET.Core/Operations/gen_ops.cs | 22 +- .../Operations/gen_resource_variable_ops.cs | 10 +- .../Operations/image_ops_impl.cs | 26 +- src/TensorFlowNET.Core/Operations/io_ops.cs | 6 +- src/TensorFlowNET.Core/Operations/math_ops.cs | 45 +- .../Operations/nn_impl.py.cs | 2 +- src/TensorFlowNET.Core/Operations/nn_ops.cs | 11 +- .../Tensors/Ragged/RowPartition.cs | 2 +- .../Tensors/Tensor.Operators.cs | 176 +- src/TensorFlowNET.Core/Tensors/Tensors.cs | 3 + .../Training/Saving/BaseSaverBuilder.cs | 2 +- .../DataAdapters/TensorLikeDataAdapter.cs | 5 +- src/TensorFlowNET.Keras/Layers/Core/Dense.cs | 2 +- src/TensorFlowNET.Keras/Losses/Huber.cs | 2 +- src/TensorFlowNET.Keras/Losses/LogCosh.cs | 3 +- .../Losses/MeanAbsoluteError.cs | 2 +- .../Losses/MeanAbsolutePercentageError.cs | 2 +- .../Losses/MeanSquaredError.cs | 2 +- .../Losses/MeanSquaredLogarithmicError.cs | 10 +- .../ControlFlowTest/WhileContextTestCase.cs | 4 +- .../GradientTest/GradientTest.cs | 2 +- .../ManagedAPI/ArrayOpsTest.cs | 6 +- 55 files changed, 29617 insertions(+), 1724 deletions(-) delete mode 100644 src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs create mode 100644 src/TensorFlowNET.Core/Operations/gen_io_ops.cs delete mode 100644 src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs create mode 100644 src/TensorFlowNET.Core/Operations/gen_nn_ops.cs diff --git a/src/TensorFlowNET.Console/MemoryBasicTest.cs b/src/TensorFlowNET.Console/MemoryBasicTest.cs index 3b0deeabb..2bb11a02d 100644 --- a/src/TensorFlowNET.Console/MemoryBasicTest.cs +++ b/src/TensorFlowNET.Console/MemoryBasicTest.cs @@ -112,7 +112,7 @@ public Action Conv2DWithTensor var strides = new[] { 1, 1, 1, 1 }; var dilations = new[] { 1, 1, 1, 1 }; - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Conv2D", null, input, filter) + var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "Conv2D", null, input, filter) { attrs = ConvertToDict(new { @@ -134,7 +134,7 @@ public Action Conv2DWithVariable var strides = new[] { 1, 1, 1, 1 }; var dilations = new[] { 1, 1, 1, 1 }; - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Conv2D", null, input, filter) + var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "Conv2D", null, input, filter) { attrs = ConvertToDict(new { diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs index a2c91983e..6a646512a 100644 --- a/src/TensorFlowNET.Core/APIs/tf.array.cs +++ b/src/TensorFlowNET.Core/APIs/tf.array.cs @@ -44,7 +44,8 @@ public partial class tensorflow /// /// public Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null) - => gen_array_ops.batch_to_space_nd(input, block_shape, crops, name: name); + => gen_array_ops.batch_to_space_nd(ops.convert_to_tensor(input), ops.convert_to_tensor(block_shape), + ops.convert_to_tensor(crops), name: name); /// /// Apply boolean mask to tensor. @@ -91,7 +92,7 @@ public Tensor concat(IEnumerable values, int axis, string name = "concat }); } - return gen_array_ops.concat_v2(values.ToArray(), axis, name: name); + return gen_array_ops.concat_v2(values.ToArray(), ops.convert_to_tensor(axis), name: name); } /// @@ -115,7 +116,7 @@ public Tensor expand_dims(Tensor input, int axis = -1, string name = null) /// /// public Tensor fill(Tensor dims, T value, string name = null) - => gen_array_ops.fill(dims, value, name: name); + => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name); public Tensor fill(Shape dims, T value, string name = null) => array_ops.fill(dims, value, name: name); @@ -138,7 +139,7 @@ public Tensor identity(Tensor input, string name = null) /// /// public Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0) - => array_ops.gather(@params, indices, name: name, axis: axis); + => array_ops.gather(@params, indices, name: name, axis: ops.convert_to_tensor(axis)); /// /// Return the elements, either from `x` or `y`, depending on the `condition`. @@ -166,7 +167,7 @@ public Tensor transpose(T1 a, Axis perm = null, string name = "transpose", b /// /// public Tensor reverse(Tensor tensor, int[] axis, string name = null) - => gen_array_ops.reverse(tensor, axis, name: name); + => gen_array_ops.reverse(tensor, ops.convert_to_tensor(axis), name: name); public Tensor reverse(Tensor tensor, Tensor axis, string name = null) => gen_array_ops.reverse(tensor, axis, name: name); @@ -189,7 +190,8 @@ public Tensor rank(Tensor input, string name = null) /// A name for the operation (optional). /// A `Tensor` the same type as `input`. public Tensor slice(Tensor input, Tb[] begin, Ts[] size, string name = null) - => array_ops.slice(input, begin, size, name: name); + => array_ops.slice(input, begin.Select(x => ops.convert_to_tensor(x)).ToArray(), + size.Select(x => ops.convert_to_tensor(x)).ToArray(), name: name); public Tensor squeeze(Tensor input, int axis, string name = null, int squeeze_dims = -1) => array_ops.squeeze(input, new[] { axis }, name); @@ -255,7 +257,7 @@ public Tensor pad(Tensor tensor, Tensor paddings, string mode = "CONSTANT", stri /// A name for the operation (optional). /// A `Tensor`. Has the same type as `input`. public Tensor placeholder_with_default(T input, int[] shape, string name = null) - => gen_array_ops.placeholder_with_default(input, shape, name: name); + => gen_array_ops.placeholder_with_default(ops.convert_to_tensor(input), shape, name: name); /// /// Returns the shape of a tensor. diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs index 83653c8bb..75253700a 100644 --- a/src/TensorFlowNET.Core/APIs/tf.math.cs +++ b/src/TensorFlowNET.Core/APIs/tf.math.cs @@ -130,7 +130,7 @@ public Tensor add(Tensor a, Tensor b, string name = null) => gen_math_ops.add(a, b, name: name); public Tensor add(Tx a, Ty b, string name = null) - => gen_math_ops.add(a, b, name: name); + => gen_math_ops.add(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name); /// /// Adds all input tensors element-wise. @@ -151,10 +151,10 @@ public Tensor atan(Tensor x, string name = null) => gen_math_ops.atan(x, name); public Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => gen_math_ops.arg_max(input, dimension, output_type: output_type, name: name); + => gen_math_ops.arg_max(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name); public Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => gen_math_ops.arg_min(input, dimension, output_type: output_type, name: name); + => gen_math_ops.arg_min(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name); public Tensor is_finite(Tensor input, string name = null) => gen_math_ops.is_finite(input, name); @@ -199,7 +199,7 @@ public Tensor cos(Tensor x, string name = null) => gen_math_ops.cos(x, name); public Tensor cos(float x, string name = null) - => gen_math_ops.cos(x, name); + => gen_math_ops.cos(ops.convert_to_tensor(x), name); /// /// Computes hyperbolic cosine of x element-wise. @@ -235,7 +235,7 @@ public Tensor floor(Tensor x, string name = null) /// /// public Tensor greater(Tx x, Ty y, string name = null) - => gen_math_ops.greater(x, y, name); + => gen_math_ops.greater(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Returns the truth value of (x >= y) element-wise. @@ -247,7 +247,7 @@ public Tensor greater(Tx x, Ty y, string name = null) /// /// public Tensor greater_equal(Tx x, Ty y, string name = null) - => gen_math_ops.greater_equal(x, y, name); + => gen_math_ops.greater_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Returns the truth value of (x < y) element-wise. @@ -259,7 +259,7 @@ public Tensor greater_equal(Tx x, Ty y, string name = null) /// /// public Tensor less(Tx x, Ty y, string name = null) - => gen_math_ops.less(x, y, name); + => gen_math_ops.less(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Computes the log of the absolute value of `Gamma(x)` element-wise. @@ -280,7 +280,7 @@ public Tensor lgamma(Tensor x, string name = null) /// /// public Tensor less_equal(Tx x, Ty y, string name = null) - => gen_math_ops.less_equal(x, y, name); + => gen_math_ops.less_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Computes natural logarithm of (1 + x) element-wise. @@ -292,7 +292,7 @@ public Tensor log1p(Tensor x, string name = null) => gen_math_ops.log1p(x, name); public Tensor logical_and(T x, T y, string name = null) - => gen_math_ops.logical_and(x, y, name); + => gen_math_ops.logical_and(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); public Tensor logical_not(Tensor x, string name = null) => gen_math_ops.logical_not(x, name); @@ -301,7 +301,10 @@ public Tensor logical_or(Tensor x, Tensor y, string name = null) => gen_math_ops.logical_or(x, y, name); public Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor") - => gen_math_ops.logical_xor(x, y, name); + { + return gen_math_ops.logical_and(gen_math_ops.logical_or(x, y), + gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), name); + } /// /// Clips tensor values to a specified min and max. @@ -312,7 +315,7 @@ public Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor") /// /// public Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null) - => gen_math_ops._clip_by_value(t, clip_value_min, clip_value_max); + => gen_math_ops.clip_by_value(t, clip_value_min, clip_value_max); /// /// Clips tensor values to a specified min and max. @@ -345,7 +348,7 @@ public Tensor clip_by_value(Tensor t, T1 clip_value_min, T2 clip_value_m => clip_ops.clip_by_value(t, clip_value_min, clip_value_max, name); public Tensor sub(Tx a, Ty b, string name = null) - => gen_math_ops.sub(a, b, name: name); + => gen_math_ops.sub(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name); public Tensor divide(Tensor a, Tensor b) => a / b; @@ -396,7 +399,7 @@ public Tensor atan2(Tensor y, Tensor x, string name = null) /// /// public Tensor max(Tx input, Ty axis, bool keep_dims = false, string name = null) - => gen_math_ops._max(input, axis, keep_dims: keep_dims, name: name); + => gen_math_ops.max(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name); /// /// Computes the minimum of elements across dimensions of a tensor. @@ -409,7 +412,7 @@ public Tensor max(Tx input, Ty axis, bool keep_dims = false, string name /// /// public Tensor min(Tx input, Ty axis, bool keep_dims = false, string name = null) - => gen_math_ops._min(input, axis, keep_dims: keep_dims, name: name); + => gen_math_ops.min(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name); /// /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. @@ -421,7 +424,7 @@ public Tensor min(Tx input, Ty axis, bool keep_dims = false, string name /// /// public Tensor maximum(T1 x, T2 y, string name = null) - => gen_math_ops.maximum(x, y, name: name); + => gen_math_ops.maximum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); /// /// Returns the min of x and y (i.e. x < y ? x : y) element-wise. @@ -433,7 +436,7 @@ public Tensor maximum(T1 x, T2 y, string name = null) /// /// public Tensor minimum(T1 x, T2 y, string name = null) - => gen_math_ops.minimum(x, y, name: name); + => gen_math_ops.minimum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public Tensor multiply(Tensor x, Tensor y, string name = null) => gen_math_ops.mul(x, y, name: name); @@ -448,7 +451,7 @@ public Tensor multiply(Tensor x, Tensor y, string name = null) /// /// public Tensor multiply(Tx x, Ty y, string name = null) - => gen_math_ops.mul(x, y, name: name); + => gen_math_ops.mul(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public Tensor negative(Tensor x, string name = null) => gen_math_ops.neg(x, name); @@ -577,7 +580,7 @@ public Tensor sigmoid(T x, string name = null) => math_ops.sigmoid(x, name: name); public Tensor sum(Tensor input, int axis, bool keep_dims = false, string name = null) - => gen_math_ops._sum(input, axis, keep_dims: keep_dims, name: name); + => gen_math_ops.sum(input, ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name); public Tensor reduce_mean(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null, int? reduction_indices = null) => math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices); diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs index 1595e52fc..e0c29bfa7 100644 --- a/src/TensorFlowNET.Core/APIs/tf.nn.cs +++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs @@ -29,21 +29,8 @@ public class nn_internal public Tensor conv2d(Tensor input, Tensor filter, int[] strides, string padding, bool use_cudnn_on_gpu = true, string data_format = "NHWC", int[] dilations = null, string name = null) { - var parameters = new Conv2dParams - { - Input = input, - Filter = filter, - Strides = strides, - Padding = padding, - UseCudnnOnGpu = use_cudnn_on_gpu, - DataFormat = data_format, - Name = name - }; - - if (dilations != null) - parameters.Dilations = dilations; - - return gen_nn_ops.conv2d(parameters); + return gen_nn_ops.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, + data_format: data_format, dilations: dilations, name: name); } public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null) @@ -118,7 +105,7 @@ public Tensor embedding_lookup(Tensor @params, public IActivation softmax() => new softmax(); public Tensor tanh(Tensor x, string name = null) - => gen_nn_ops.tanh(x, name); + => gen_math_ops.tanh(x, name); public Tensor relu(Tensor features, string name = null) => gen_nn_ops.relu(features, name); @@ -146,14 +133,14 @@ public Tensor in_top_k(Tensor predictions, Tensor targets, int k, string name = => nn_ops.in_top_k(predictions, targets, k, name); public Tensor[] top_k(Tensor input, int k = 1, bool sorted = true, string name = null) - => gen_nn_ops.top_kv2(input, k: k, sorted: sorted, name: name); + => gen_nn_ops.top_kv2(input, k: ops.convert_to_tensor(k), sorted: sorted, name: name); public Tensor bias_add(Tensor value, IVariableV1 bias, string data_format = null, string name = null) { return tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => { name = scope; - return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); + return gen_nn_ops.bias_add(value, ops.convert_to_tensor(bias), data_format: data_format, name: name); }); } @@ -172,7 +159,7 @@ public Tensor l2_loss(Tensor t, string name = null) /// public Tensor lrn(Tensor input, int depth_radius = 5, int bias = 1, int alpha = 1, float beta = 0.5f, string name = null) - => gen_nn_ops.local_response_normalization(input, depth_radius: depth_radius, bias: bias, + => gen_nn_ops.lrn(input, depth_radius: depth_radius, bias: bias, alpha: alpha, beta: beta, name: name); public Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) diff --git a/src/TensorFlowNET.Core/APIs/tf.reshape.cs b/src/TensorFlowNET.Core/APIs/tf.reshape.cs index cdd5194a2..5da7b795f 100644 --- a/src/TensorFlowNET.Core/APIs/tf.reshape.cs +++ b/src/TensorFlowNET.Core/APIs/tf.reshape.cs @@ -31,6 +31,6 @@ public Tensor reshape(Tensor tensor, public Tensor reshape(Tensor tensor, object[] shape, string name = null) - => gen_array_ops.reshape(tensor, shape, name); + => gen_array_ops.reshape(tensor, ops.convert_to_tensor(shape), name); } } diff --git a/src/TensorFlowNET.Core/APIs/tf.tensor.cs b/src/TensorFlowNET.Core/APIs/tf.tensor.cs index 35efde06b..be8c2ab24 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tensor.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tensor.cs @@ -46,10 +46,10 @@ public Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides = n int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, - string name = null) => gen_array_ops.strided_slice(input: input, - begin: begin, - end: end, - strides: strides, + string name = null) => array_ops.strided_slice(input, + begin: ops.convert_to_tensor(begin), + end: ops.convert_to_tensor(end), + strides: ops.convert_to_tensor(strides), begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs index be03e453c..65975ac83 100644 --- a/src/TensorFlowNET.Core/APIs/tf.tile.cs +++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs @@ -23,7 +23,7 @@ public Tensor tile(Tensor input, Tensor multiples, string name = null) => gen_array_ops.tile(input, multiples, name); public Tensor tile(Tensor input, object[] multiples, string name = null) - => gen_array_ops.tile(input, multiples, name); + => gen_array_ops.tile(input, ops.convert_to_tensor(multiples), name); public Tensor tile(Tensor input, Shape multiples, string name = null) { diff --git a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs index 2a22413b0..ba6f653a1 100644 --- a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs +++ b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs @@ -57,6 +57,21 @@ public partial class c_api [DllImport(TensorFlowLibName)] public static extern int TF_OperationGetAttrValueProto(IntPtr oper, string attr_name, SafeBufferHandle output_attr_value, SafeStatusHandle status); + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrType(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status); + + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrInt(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status); + + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrFloat(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status); + + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrBool(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status); + + [DllImport(TensorFlowLibName)] + public static extern void TF_OperationGetAttrShape(IntPtr oper, string attr_name, long[] value, int num_dims, SafeStatusHandle status); + [DllImport(TensorFlowLibName)] public static extern void TF_SetAttrBool(IntPtr desc, string attr_name, bool value); diff --git a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs index adb26ef29..1b295fcfd 100644 --- a/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs +++ b/src/TensorFlowNET.Core/Clustering/_InitializeClustersOpFactory.cs @@ -88,7 +88,7 @@ private Tensor _initialize() public Tensor op() { - var x = control_flow_ops.cond(gen_math_ops.equal(_num_remaining, 0), + var x = control_flow_ops.cond(gen_math_ops.equal(_num_remaining, ops.convert_to_tensor(0)), () => { return check_ops.assert_equal(_cluster_centers_initialized, true); diff --git a/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs b/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs index ac1cd8660..f6e0911ca 100644 --- a/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs +++ b/src/TensorFlowNET.Core/Contexts/Context.ExecuteOp.cs @@ -49,7 +49,7 @@ Tensors ExecGraphAction(string OpType, string Name, ExecuteOpArgs args) Tensors ExecEagerAction(string OpType, string Name, ExecuteOpArgs args) { - var opExecInfo = new FastPathOpExecInfo(OpType, Name, args.OpInputArgs) + var opExecInfo = new FastPathOpExecInfo(tf.Context, OpType, Name, args.OpInputArgs) { attrs = args.OpAttrs }; diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs index fedc02cb9..f1a09ed7b 100644 --- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs +++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs @@ -68,7 +68,8 @@ public Tensor[] TFE_FastPathExecute(FastPathOpExecInfo op_exec_info) var input_arg = op_def.InputArg[i]; if (!string.IsNullOrEmpty(input_arg.NumberAttr)) { - int len = (input as object[]).Length; + var fast_input_array = input is Tensors tensors ? (object[])tensors : (object[])input; + int len = fast_input_array.Length; c_api.TFE_OpSetAttrInt(op, input_arg.NumberAttr, len); if (op_exec_info.run_callbacks) { @@ -79,7 +80,6 @@ public Tensor[] TFE_FastPathExecute(FastPathOpExecInfo op_exec_info) if (len > 0) { - var fast_input_array = (object[])op_exec_info.args[i]; // First item adds the type attr. if (!AddInputToOp(fast_input_array[i], true, input_arg, flattened_attrs, flattened_inputs, op, status)) return null; diff --git a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs index 2cdf025a1..307ca2ce4 100644 --- a/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs +++ b/src/TensorFlowNET.Core/Eager/FastPathOpExecInfo.cs @@ -17,8 +17,9 @@ public class FastPathOpExecInfo public bool run_callbacks { get; set; } public Action callbacks { get; set; } - public FastPathOpExecInfo(string opName, string name, params object[] inputArgs) + public FastPathOpExecInfo(Context ctx, string opName, string name, params object[] inputArgs) { + this.ctx = ctx; this.op_name = opName; this.name = name; this.args = inputArgs; diff --git a/src/TensorFlowNET.Core/Eager/execute.cs b/src/TensorFlowNET.Core/Eager/execute.cs index 1804992ac..e981c6c51 100644 --- a/src/TensorFlowNET.Core/Eager/execute.cs +++ b/src/TensorFlowNET.Core/Eager/execute.cs @@ -7,10 +7,11 @@ using static Tensorflow.ApiDef.Types; using static Tensorflow.CostGraphDef.Types; using static Tensorflow.Binding; +using Tensorflow.Gradients; namespace Tensorflow.Eager { - internal static class execute + internal static class _execute { public static (DataType[], Tensor[]) onvert_to_mixed_eager_tensors(Tensor[] values, Context ctx) { @@ -18,7 +19,7 @@ public static (DataType[], Tensor[]) onvert_to_mixed_eager_tensors(Tensor[] valu var types = v.Select(t => t.dtype.as_datatype_enum()); return (types.ToArray(), v.ToArray()); } - public static Tensor[] executes(string op_name, int num_outputs, Tensor[] inputs, object[] attrs, Context ctx, string name = null) + public static Tensor[] execute(string op_name, int num_outputs, Tensor[] inputs, object[] attrs, Context ctx, string name = null) { return quick_execute(op_name, num_outputs, inputs, attrs, ctx, name); } @@ -33,7 +34,12 @@ public static Tensor[] quick_execute(string op_name, int num_outputs, Tensor[] i } public static bool must_record_gradient() { - return false; + return tf.GetTapeSet().Count != 0; + } + + public static bool record_gradient(string op_name, Tensor[] inputs, object[] attrs, Tensor[] results) + { + return tf.Runner.RecordGradient(op_name, inputs, attrs, results); } } } diff --git a/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs b/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs index cc38683db..d547b6120 100644 --- a/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs +++ b/src/TensorFlowNET.Core/Functions/EagerDefinedFunction.cs @@ -147,7 +147,7 @@ public unsafe Tensors Call(Tensors args) Tensor[] outputs; if (executing_eagerly) { - outputs = execute.executes( + outputs = _execute.execute( Signature.Name, _num_outputs, args, diff --git a/src/TensorFlowNET.Core/Gradients/GradientTape.cs b/src/TensorFlowNET.Core/Gradients/GradientTape.cs index b5fd373e9..a714436a3 100644 --- a/src/TensorFlowNET.Core/Gradients/GradientTape.cs +++ b/src/TensorFlowNET.Core/Gradients/GradientTape.cs @@ -44,6 +44,15 @@ public ITape PushTape(bool persistent = false, return tape; } + public void PushTape(ITape tape) + { + // Enters a context inside which operations are recorded on this tape. + if (tf.Context.executing_eagerly()) + tf.Context.ensure_initialized(); + + _tapeSet.Push(tape); + } + ITape PopTape() { _tape.StopRecord(); diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs index c4cb9fbd1..f939f7b69 100644 --- a/src/TensorFlowNET.Core/Gradients/array_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs @@ -36,8 +36,7 @@ public static Tensor[] _BroadcastToGrad(Operation op, Tensor[] grads) var input_value = op.inputs[0]; var broadcast_shape = op.inputs[1]; var input_value_shape = array_ops.shape(input_value); - var (_, reduction_axes) = gen_array_ops.broadcast_gradient_args(broadcast_shape, - input_value_shape); + var reduction_axes = gen_array_ops.broadcast_gradient_args(broadcast_shape, input_value_shape)[1]; var updates_grad_reshaped = math_ops.reduce_sum(grad, axis: reduction_axes, keepdims: true); @@ -351,16 +350,16 @@ public static Tensor[] _StridedSliceGradGrad(Operation op, Tensor[] grads) null, null, null, - gen_array_ops.strided_slice( + array_ops.strided_slice( grad, begin, end, strides, - begin_mask: op.get_attr("begin_mask"), - end_mask: op.get_attr("end_mask"), - ellipsis_mask: op.get_attr("ellipsis_mask"), - new_axis_mask: op.get_attr("new_axis_mask"), - shrink_axis_mask: op.get_attr("shrink_axis_mask")) + begin_mask: (int)op.get_attr("begin_mask"), + end_mask: (int)op.get_attr("end_mask"), + ellipsis_mask: (int)op.get_attr("ellipsis_mask"), + new_axis_mask: (int)op.get_attr("new_axis_mask"), + shrink_axis_mask: (int)op.get_attr("shrink_axis_mask")) }; } diff --git a/src/TensorFlowNET.Core/Gradients/math_grad.cs b/src/TensorFlowNET.Core/Gradients/math_grad.cs index 89699d6bc..be1fbbba7 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad.cs @@ -53,7 +53,8 @@ public static Tensor[] _AddGrad(Operation op, Tensor[] grads) var sx = array_ops.shape(x); var sy = array_ops.shape(y); - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); var sum1 = math_ops.reduce_sum(grad, rx); var r1 = gen_array_ops.reshape(sum1, sx); @@ -101,7 +102,8 @@ public static Tensor[] _DivNoNanGrad(Operation op, Tensor[] grads) var y = op.inputs[1]; var sx = array_ops.shape(x); var sy = array_ops.shape(y); - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); x = math_ops.conj(x); y = math_ops.conj(y); @@ -427,7 +429,8 @@ private static Tensor[] _MaximumMinimumGrad(bool isMaximum, Operation op, Tensor isMaximum ? gen_math_ops.greater_equal(x, y) : gen_math_ops.less_equal(x, y); - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); var xgrad = array_ops.where(xmask, grad, zeros); var gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx); var ygrad = array_ops.where(xmask, zeros, grad); @@ -458,7 +461,7 @@ public static Tensor[] _SelectGrad(Operation op, Tensor[] grads) private static Tensor _safe_shape_div(Tensor x, Tensor y) { - return math_ops.floordiv(x, gen_math_ops.maximum(y, 1)); + return math_ops.floordiv(x, gen_math_ops.maximum(y, ops.convert_to_tensor(1))); } [RegisterGradient("Sub")] @@ -573,7 +576,8 @@ public static Tensor[] _RealDivGrad(Operation op, Tensor[] grads) var sx = array_ops.shape(x); var sy = array_ops.shape(y); - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); x = math_ops.conj(x); y = math_ops.conj(y); @@ -824,7 +828,7 @@ public static Tensor[] _PowGrad(Operation op, Tensor[] grads) mask = x > 0.0f; var ones = array_ops.ones_like(x); var safe_x = array_ops.where(mask, x, ones); - var x1 = gen_array_ops.log(safe_x); + var x1 = math_ops.log(safe_x); var y1 = array_ops.zeros_like(x); var log_x = array_ops.where(mask, x1, y1); var mul1 = grad * z * log_x; @@ -855,7 +859,8 @@ public static (Tensor, Tensor, bool)[] SmartBroadcastGradientArgs(Tensor x, Tens sy = array_ops.shape_internal(y, optimize: false); } - var (rx, ry) = gen_array_ops.broadcast_gradient_args(sx, sy); + var args = gen_array_ops.broadcast_gradient_args(sx, sy); + var (rx, ry) = (args[0], args[1]); return new[] { (sx, rx, !x.shape.Equals(grad.shape)), diff --git a/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs b/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs index 530bb6c08..f8b16090f 100644 --- a/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs +++ b/src/TensorFlowNET.Core/Gradients/math_grad_eager.cs @@ -47,8 +47,8 @@ public static Tensor[] _MulGrad(EagerOperation op, IntPtr[] grads) { return new Tensor[] { - gen_math_ops.mul(grad, y), - gen_math_ops.mul(grad, x) + math_ops.multiply(grad, y), + math_ops.multiply(grad, x) }; } diff --git a/src/TensorFlowNET.Core/Gradients/nn_grad.cs b/src/TensorFlowNET.Core/Gradients/nn_grad.cs index e95163930..a1ac97a97 100644 --- a/src/TensorFlowNET.Core/Gradients/nn_grad.cs +++ b/src/TensorFlowNET.Core/Gradients/nn_grad.cs @@ -192,17 +192,8 @@ public static Tensor[] _Conv2DBackpropInputGrad(Operation op, Tensor[] grads) explicit_paddings: explicit_paddings, dilations: dilations, data_format: data_format), - gen_nn_ops.conv2d(new Conv2dParams - { - Input = grad, - Filter = op.inputs[1], - Strides = strides, - Padding = padding, - DataFormat = data_format, - Dilations = dilations, - ExplicitPaddings = explicit_paddings, - UseCudnnOnGpu = use_cudnn_on_gpu - }) + gen_nn_ops.conv2d(grad, op.inputs[1], strides, padding, + use_cudnn_on_gpu, explicit_paddings, data_format, dilations) }; } @@ -265,20 +256,27 @@ public static Tensor[] _BaseFusedBatchNormGrad(Operation op, int version, Tensor var epsilon = op.get_attr("epsilon"); var data_format = op.get_attr("data_format"); var is_training = op.get_attr("is_training"); - Func grad_fun = null; - - switch (version) + Func grad_fun = (p) => { - case 2: - grad_fun = gen_nn_ops.fused_batch_norm_grad_v3; - break; - case 1: - // grad_fun = gen_nn_ops.fused_batch_norm_grad_v2; - throw new NotImplementedException(""); - default: - grad_fun = gen_nn_ops.fused_batch_norm_grad; - break; - } + if(version == 2) + { + return gen_nn_ops.fused_batch_norm_grad_v3(p.YBackprop, p.X, p.Scale, + p.ReserveSpace1, p.ReserveSpace2, p.ReserveSpace3, p.Epsilon, + p.DataFormat, p.IsTraining, p.Name); + } + else if(version == 1) + { + return gen_nn_ops.fused_batch_norm_grad_v2(p.YBackprop, p.X, p.Scale, + p.ReserveSpace1, p.ReserveSpace2, p.Epsilon, p.DataFormat, + p.IsTraining, p.Name); + } + else + { + return gen_nn_ops.fused_batch_norm_grad(p.YBackprop, p.X, p.Scale, + p.ReserveSpace1, p.ReserveSpace2, p.Epsilon, p.DataFormat, + p.IsTraining, p.Name); + } + }; if (is_training) { @@ -406,7 +404,7 @@ public static Tensor[] _TopKGrad(Operation op, Tensor[] grads) // finally reshaping it to the original input shape. var scatter = gen_array_ops.scatter_nd(array_ops.expand_dims(ind, -1), array_ops.reshape(grad, new int[] { -1 }), - new Tensor[] { math_ops.reduce_prod(in_shape) }); + math_ops.reduce_prod(in_shape)); return new Tensor[] { diff --git a/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs b/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs index d43f8a0c8..84ce56a4b 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/AveragePoolFunction.cs @@ -34,7 +34,7 @@ public Tensor Apply(Tensor value, { name = scope; value = ops.convert_to_tensor(value, name: "input"); - return gen_nn_ops.average_pool( + return gen_nn_ops.avg_pool( value, ksize: ksize, strides: strides, diff --git a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs index 958d79f42..ec70b1858 100644 --- a/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs +++ b/src/TensorFlowNET.Core/Operations/NnOps/ConvolutionInternal.cs @@ -67,16 +67,15 @@ public Tensor Apply(Tensors input, Tensor filters) var dilations = _get_sequence(args.DilationRate, num_spatial_dims, channel_index).ToArray(); var strides = _get_sequence(args.Strides, num_spatial_dims, channel_index).ToArray(); - result = gen_nn_ops.conv2d(new Conv2dParams - { - Input = input, - Filter = filters, - Strides = strides, - Padding = padding, - DataFormat = data_format, - Dilations = dilations, - Name = name - }); + result = gen_nn_ops.conv2d( + input, + filters, + strides, + padding, + data_format: data_format, + dilations: dilations, + name: name + ); } else { @@ -93,16 +92,15 @@ public Tensor Apply(Tensors input, Tensor filters) input = array_ops.expand_dims(input, spatial_start_dim); filters = array_ops.expand_dims(filters, 0); - result = gen_nn_ops.conv2d(new Conv2dParams - { - Input = input, - Filter = filters, - Strides = strides.ToArray(), - Padding = padding, - DataFormat = channel_first ? "NCHW" : "NHWC", - Dilations = dilations.ToArray(), - Name = name - }); + result = gen_nn_ops.conv2d( + input, + filters, + strides.ToArray(), + padding, + data_format: channel_first ? "NCHW" : "NHWC", + dilations: dilations.ToArray(), + name: name + ); result = array_ops.squeeze(result, new[] { spatial_start_dim }); } }); diff --git a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs deleted file mode 100644 index 408d06ebf..000000000 --- a/src/TensorFlowNET.Core/Operations/NnOps/gen_nn_ops.cs +++ /dev/null @@ -1,373 +0,0 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System.Linq; -using static Tensorflow.Binding; - -namespace Tensorflow.Operations -{ - public class gen_nn_ops - { - /// - /// Computes a 2-D convolution given 4-D `input` and `filter` tensors. - /// - /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` - /// and a filter / kernel tensor of shape - /// `[filter_height, filter_width, in_channels, out_channels]`, this op - /// performs the following: - /// - /// 1. Flattens the filter to a 2-D matrix with shape - /// `[filter_height * filter_width * in_channels, output_channels]`. - /// 2. Extracts image patches from the input tensor to form a *virtual* - /// tensor of shape `[batch, out_height, out_width, - /// filter_height * filter_width * in_channels]`. - /// 3. For each patch, right-multiplies the filter matrix and the image patch - /// vector. - /// - /// - /// - public static Tensor conv2d(Conv2dParams parameters) - => tf.Context.ExecuteOp("Conv2D", parameters.Name, new ExecuteOpArgs(parameters.Input, parameters.Filter) - .SetAttributes(new - { - strides = parameters.Strides, - padding = parameters.Padding, - use_cudnn_on_gpu = parameters.UseCudnnOnGpu, - explicit_paddings = parameters.ExplicitPaddings, - data_format = parameters.DataFormat, - dilations = parameters.Dilations - })); - - /// - /// Computes the gradients of convolution with respect to the filter. - /// - /// - /// - public static Tensor conv2d_backprop_filter(Tensor input, Tensor filter_sizes, Tensor out_backprop, - int[] strides, string padding, bool use_cudnn_on_gpu = true, - int[] explicit_paddings = null, - string data_format = "NHWC", - int[] dilations = null, - string name = null) - => tf.Context.ExecuteOp("Conv2DBackpropFilter", name, new ExecuteOpArgs(input, filter_sizes, out_backprop) - .SetAttributes(new - { - strides, - padding, - use_cudnn_on_gpu, - explicit_paddings = explicit_paddings ?? new int[0], - data_format, - dilations = dilations ?? new int[] { 1, 1, 1, 1 } - })); - - /// - /// Computes the gradients of convolution with respect to the input. - /// - /// - /// - public static Tensor conv2d_backprop_input(Tensor input_sizes, Tensor filter, Tensor out_backprop, - int[] strides, string padding, bool use_cudnn_on_gpu = true, - int[] explicit_paddings = null, - string data_format = "NHWC", - int[] dilations = null, - string name = null) - => tf.Context.ExecuteOp("Conv2DBackpropInput", name, new ExecuteOpArgs(input_sizes, filter, out_backprop) - .SetAttributes(new - { - strides, - padding, - use_cudnn_on_gpu, - explicit_paddings = explicit_paddings ?? new int[0], - data_format, - dilations = dilations ?? new int[] { 1, 1, 1, 1 } - })); - - public static Tensor bias_add(Tensor value, - IVariableV1 bias, - string data_format = null, - string name = null) - => tf.Context.ExecuteOp("BiasAdd", name, new ExecuteOpArgs(value, bias) - .SetAttributes(new { data_format = data_format ?? "NHWC" })); - - public static Tensor bias_add_grad(Tensor out_backprop, - string data_format = "NHWC", - string name = null) - => tf.Context.ExecuteOp("BiasAddGrad", name, new ExecuteOpArgs(out_backprop) - .SetAttributes(new { data_format = data_format ?? "NHWC" })); - - /// - /// Computes exponential linear: exp(features) - 1 if &lt; 0, features otherwise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Elu'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - /// ](http://arxiv.org/abs/1511.07289) - /// - public static Tensor elu(Tensor features, string name = "Elu") - { - var op = tf.OpDefLib._apply_op_helper("Elu", name: name, args: new { features }); - return op.output; - } - - /// - /// Gradient for batch normalization. - /// - /// - /// - public static Tensor[] fused_batch_norm_grad(FusedBatchNormParams @params) - { - var op = tf.OpDefLib._apply_op_helper("FusedBatchNormGrad", name: @params.Name, args: new - { - y_backprop = @params.YBackprop, - x = @params.X, - scale = @params.Scale, - reserve_space_1 = @params.ReserveSpace1, - reserve_space_2 = @params.ReserveSpace2, - epsilon = @params.Epsilon, - data_format = @params.DataFormat, - is_training = @params.IsTraining - }); - return op.outputs; - } - - public static Tensor[] fused_batch_norm_grad_v3(FusedBatchNormParams @params) - => tf.Context.ExecuteOp("FusedBatchNormGradV3", @params.Name, - new ExecuteOpArgs(@params.YBackprop, - @params.X, - @params.Scale, - @params.ReserveSpace1, - @params.ReserveSpace2, - @params.ReserveSpace3) - .SetAttributes(new - { - epsilon = @params.Epsilon, - data_format = @params.DataFormat, - is_training = @params.IsTraining - })); - - public static Tensor[] fused_batch_norm(Tensor x, - Tensor scale, - Tensor offset, - Tensor mean, - Tensor variance, - float epsilon = 0.0001f, - string data_format = "NHWC", - bool is_training = true, - string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("FusedBatchNorm", name: name, args: new - { - x, - scale, - offset, - mean, - variance, - epsilon, - data_format, - is_training - }); - - return _op.outputs; - } - - public static Tensors fused_batch_norm_v3(Tensor x, - Tensor scale, - Tensor offset, - Tensor mean, - Tensor variance, - float epsilon = 0.0001f, - float exponential_avg_factor = 1.0f, - string data_format = "NHWC", - bool is_training = true, - string name = null) - => tf.Context.ExecuteOp("FusedBatchNormV3", name, new ExecuteOpArgs(x, scale, offset, mean, variance) - .SetAttributes(new { epsilon, data_format, is_training })); - - /// - /// Local Response Normalization. - /// - /// - /// - /// - /// - /// - /// - /// - public static Tensor local_response_normalization(Tensor input, int depth_radius = 5, int bias = 1, - int alpha = 1, float beta = 0.5f, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("LRN", name: name, args: new - { - input, - depth_radius, - bias, - alpha, - beta - }); - - return _op.output; - } - - public static Tensor log_softmax(Tensor logits, string name = null) - => tf.Context.ExecuteOp("LogSoftmax", name, new ExecuteOpArgs(logits)); - - /// - /// Says whether the targets are in the top `K` predictions. - /// - /// - /// - /// - /// - /// A `Tensor` of type `bool`. - public static Tensor in_top_kv2(Tensor predictions, Tensor targets, int k, string name = null) - => tf.Context.ExecuteOp("InTopKV2", name, - new ExecuteOpArgs(predictions, targets, k)); - - public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null) - => tf.Context.ExecuteOp("LeakyRelu", name, - new ExecuteOpArgs(features).SetAttributes(new { alpha })); - - public static Tensor average_pool(Tensor input, - int[] ksize, - int[] strides, - string padding, - string data_format = "NHWC", - string name = null) - => tf.Context.ExecuteOp("AvgPool", name, new ExecuteOpArgs(input) - .SetAttributes(new - { - ksize, - strides, - padding, - data_format - })); - - public static Tensor max_pool(Tensor input, - int[] ksize, - int[] strides, - string padding, - string data_format = "NHWC", - string name = null) - => tf.Context.ExecuteOp("MaxPool", name, new ExecuteOpArgs(input) - .SetAttributes(new - { - ksize, - strides, - padding, - data_format - })); - - public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, - string data_format = "NHWC", string name = null) - => tf.Context.ExecuteOp("MaxPoolGrad", name, new ExecuteOpArgs(orig_input, orig_output, grad) - .SetAttributes(new - { - ksize, - strides, - padding, - data_format - })); - - public static Tensor[] top_kv2(Tensor input, T k, bool sorted = true, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("TopKV2", name: name, args: new - { - input, - k, - sorted - }); - - return _op.outputs; - } - - public static Tensor relu_grad(Tensor gradients, Tensor features, string name = null) - => tf.Context.ExecuteOp("ReluGrad", name, new ExecuteOpArgs(gradients, features)); - - public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string name = null) - => tf.Context.ExecuteOp("LeakyReluGrad", name, new ExecuteOpArgs(gradients, features) - .SetAttributes(new { alpha })); - - public static Tensor softmax(Tensor logits, string name = null) - => tf.Context.ExecuteOp("Softmax", name, new ExecuteOpArgs(logits)); - - /// - /// Computes softmax cross entropy cost and gradients to backpropagate. - /// - /// - /// - /// - /// - public static (Tensor, Tensor) softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = null) - { - var results = tf.Context.ExecuteOp("SoftmaxCrossEntropyWithLogits", name, new ExecuteOpArgs(features, labels)); - - return (results[0], results[1]); - } - - /// - /// Computes softmax cross entropy cost and gradients to backpropagate. - /// - /// - /// batch_size x num_classes matrix - /// - /// - /// batch_size vector with values in [0, num_classes). - /// This is the label for the given minibatch entry. - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'. - /// - /// - /// Returns a tuple with multiple values, as follows: - /// loss : Per example loss (batch_size vector). - /// backprop : backpropagated gradients (batch_size x num_classes matrix). - /// The Operation can be fetched from any of the Tensorreturned in the tuple values, by fetching the Operation property. - /// - /// - /// Unlike SoftmaxCrossEntropyWithLogits, this operation does not accept - /// a matrix of label probabilities, but rather a single label per row - /// of features. This label is considered to have probability 1.0 for the - /// given row. - /// - /// Inputs are the logits, not probabilities. - /// - public static (Tensor loss, Tensor backprop) sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string name = "SparseSoftmaxCrossEntropyWithLogits") - { - var results = tf.Context.ExecuteOp("SparseSoftmaxCrossEntropyWithLogits", name, new ExecuteOpArgs(features, labels)); - - return (results[0], results[1]); - } - - /// - /// Computes rectified linear: `max(features, 0)`. - /// - /// A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `features`. - public static Tensor relu(Tensor features, string name = null) - => tf.Context.ExecuteOp("Relu", name, new ExecuteOpArgs(features)); - - public static Tensor tanh(Tensor x, string name = null) - => tf.Context.ExecuteOp("Tanh", name, new ExecuteOpArgs(x)); - } -} diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs index 3ccf0c190..76a222ba3 100644 --- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs +++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs @@ -103,6 +103,11 @@ public Operation _apply_op_helper(string op_type_name, string name = null, Dicti DataType dtype = DataType.DtInvalid; DataType default_dtype = DataType.DtInvalid; + if (values is Tensors tensors) + { + values = (Tensor[])tensors; + } + if (_IsListParameter(input_arg)) { if (!_IsListValue(values)) diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs index 311f2184f..a789c5f4b 100644 --- a/src/TensorFlowNET.Core/Operations/Operation.cs +++ b/src/TensorFlowNET.Core/Operations/Operation.cs @@ -187,6 +187,33 @@ public void run(FeedItem[] feed_dict = null, Session session = null) public virtual T get_attr(string name) => (T)get_attr(name); + internal unsafe TF_DataType _get_attr_type(string name) + { + Status status = new(); + TF_DataType result; + c_api.TF_OperationGetAttrType(_handle, name, new IntPtr(&result), status); + status.Check(true); + return result; + } + + internal unsafe int _get_attr_int(string name) + { + Status status = new(); + int result; + c_api.TF_OperationGetAttrInt(_handle, name, new IntPtr(&result), status); + status.Check(true); + return result; + } + + internal unsafe bool _get_attr_bool(string name) + { + Status status = new(); + bool result; + c_api.TF_OperationGetAttrBool(_handle, name, new IntPtr(&result), status); + status.Check(true); + return result; + } + public virtual T[] get_attr_list(string name) { if (tf.executing_eagerly()) @@ -229,7 +256,42 @@ public virtual object get_attr(string name) if(oneof_value == AttrValue.ValueOneofCase.List) { - throw new NotImplementedException($"Unsupported field type in {oneof_value}"); + if (x.List.S is not null && x.List.S.Count > 0) + { + return x.List.S.Select(x => x.ToStringUtf8()).ToArray(); + } + else if (x.List.I is not null && x.List.I.Count > 0) + { + return x.List.I.ToArray(); + } + else if (x.List.F is not null && x.List.F.Count > 0) + { + return x.List.F.ToArray(); + } + else if (x.List.B is not null && x.List.B.Count > 0) + { + return x.List.B.ToArray(); + } + else if (x.List.Shape is not null && x.List.Shape.Count > 0) + { + return x.List.Shape.ToArray(); + } + else if (x.List.Tensor is not null && x.List.Tensor.Count > 0) + { + return x.List.Tensor.ToArray(); + } + else if (x.List.Func is not null && x.List.Func.Count > 0) + { + return x.List.Func.ToArray(); + } + else if (x.List.Type is not null && x.List.Type.Count > 0) + { + return x.List.Type.Select(x => x.as_tf_dtype()).ToArray(); + } + else + { + return null; + } } if(oneof_value == AttrValue.ValueOneofCase.Type) { diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs index 2767e8219..a0b47aace 100644 --- a/src/TensorFlowNET.Core/Operations/array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/array_ops.cs @@ -22,12 +22,13 @@ limitations under the License. using Tensorflow.Eager; using Tensorflow.Framework; using static Tensorflow.Binding; +using System.Diagnostics; namespace Tensorflow { public class array_ops { - public static Tensor placeholder_with_default(T input, int[] shape, string name = null) + public static Tensor placeholder_with_default(Tensor input, int[] shape, string name = null) => gen_array_ops.placeholder_with_default(input, shape, name); /// @@ -132,7 +133,7 @@ public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boo if (ndims_mask < 1) throw new ValueError("mask cannot be scalar."); - var leading_size = gen_math_ops.prod(shape(tensor_tensor)[$"{axis}:{axis + ndims_mask}"], new[] { 0 }); + var leading_size = gen_math_ops.prod(shape(tensor_tensor)[$"{axis}:{axis + ndims_mask}"], ops.convert_to_tensor(new[] { 0 })); var shape1 = concat(new[] { shape(tensor_tensor)[$":{axis}"], @@ -153,7 +154,7 @@ public static Tensor boolean_mask(T1 tensor, T2 mask, string name = "boo private static Tensor _apply_mask_1d(Tensor reshaped_tensor, Tensor mask, int axis = 0) { var indices = squeeze(where(mask), axis: new[] { 1 }); - return gather(reshaped_tensor, indices, axis: axis); + return gather(reshaped_tensor, indices, axis: ops.convert_to_tensor(axis)); } public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) @@ -293,7 +294,7 @@ public static Tensor _autopacking_helper(IEnumerable list_or_tuple, TF_D } public static Tensor expand_dims(Tensor input, int axis = -1, string name = null) - => gen_array_ops.expand_dims(input, axis, name); + => gen_array_ops.expand_dims(input, ops.convert_to_tensor(axis), name); /// /// Creates a tensor filled with a scalar value. @@ -304,7 +305,7 @@ public static Tensor expand_dims(Tensor input, int axis = -1, string name = null /// Optional string. The name of the output `tf.Tensor`. /// A `tf.Tensor` with shape `dims` and the same dtype as `value`. public static Tensor fill(Shape dims, T value, string name = null) - => gen_array_ops.fill(dims, value, name: name); + => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name); /// /// Returns the rank of a tensor. @@ -368,7 +369,7 @@ public static Tensor reshape(Tensor tensor, Shape shape, string name = null) => gen_array_ops.reshape(tensor, shape, name: name); public static Tensor reshape(Tensor tensor, object[] shape, string name = null) - => gen_array_ops.reshape(tensor, shape, name: name); + => gen_array_ops.reshape(tensor, ops.convert_to_tensor(shape), name: name); private static Tensor ones_like_impl(T tensor, TF_DataType dtype, string name, bool optimize = true) { @@ -466,7 +467,11 @@ public static Tensor one_hot(Tensor indices, Tensor depth, } public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null) - => gen_array_ops.unique(x, out_idx: out_idx, name: name); + { + var res = gen_array_ops.unique(x, out_idx: out_idx, name: name); + Debug.Assert(res.Length == 2); + return (res[0], res[1]); + } public static Tensor stack(Tensor[] values, int axis = 0, string name = "stack") { @@ -492,12 +497,12 @@ public static Tensor where(Tensor condition, object x = null, object y = null, s { name = scope; condition = ops.convert_to_tensor(condition, preferred_dtype: dtypes.@bool, name: "condition"); - return gen_array_ops.where(condition: condition, name: name); + return gen_array_ops.where(condition, name: name); }); } else if (x != null && y != null) { - return gen_array_ops.select(condition, x, y, name); + return gen_math_ops.select(condition, ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); } else { @@ -505,7 +510,6 @@ public static Tensor where(Tensor condition, object x = null, object y = null, s } } - public static Tensor where_v2(Tensor condition, object x = null, object y = null, string name = null) { if (x == null && y == null) @@ -514,18 +518,19 @@ public static Tensor where_v2(Tensor condition, object x = null, object y = null { name = scope; condition = ops.convert_to_tensor(condition, preferred_dtype: dtypes.@bool, name: "condition"); - return gen_array_ops.where(condition: condition, name: name); + return gen_array_ops.where(condition, name: name); }); } else if (x != null && y != null) { - return gen_array_ops.select_v2(condition, x, y, name); + return gen_math_ops.select_v2(condition, ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); } else { throw new ValueError("x and y must both be non-None or both be None."); } } + /// /// Returns the shape of a tensor. /// @@ -634,7 +639,13 @@ public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.D /// /// public static Tensor stop_gradient(Tensor input, string name = null) - => tf.Context.ExecuteOp("StopGradient", name, new ExecuteOpArgs(input)); + { + var tape = tf.GradientTape().stop_recording(); + var result = gen_array_ops.stop_gradient(input, name); + tape.StartRecord(); + tf.GradientTape().PushTape(tape); + return result; + } /// /// Extracts a strided slice of a tensor (generalized python array indexing). @@ -858,7 +869,7 @@ public static Tensor concat(Tensor[] values, int axis, string name = "concat") }); } - return gen_array_ops.concat_v2(values, axis, name: name); + return gen_array_ops.concat_v2(values, ops.convert_to_tensor(axis), name: name); } public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat") @@ -868,7 +879,7 @@ public static Tensor concat(Tensor[] values, Tensor axis, string name = "concat" public static Tensor concat(object[] values, int axis, string name = "concat") { - return gen_array_ops.concat_v2(values, axis, name: name); + return tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); } /// @@ -886,18 +897,33 @@ public static Tensor concat(object[] values, int axis, string name = "concat") /// /// An integer. The number of batch dimensions. Must be less than or equal to rank(indices). /// - public static Tensor gather(T1 @params, T2 indices, string name = null, int axis = 0, int batch_dims = 0) + public static Tensor gather(Tensor @params, Tensor indices, string name = null, Tensor axis = null, int batch_dims = 0) { - if (axis != 0) - return gen_array_ops.gather_v2(@params, indices, axis, name: name); - - if (@params is ResourceVariable variable && - indices is Tensor indices_tensor) - return variable.sparse_read(indices_tensor, name); + if (axis is null) + axis = tf.convert_to_tensor(batch_dims); + if(tensor_util.constant_value(axis) != 0) + { + return gen_array_ops.gather_v2(@params, indices, axis, batch_dims: batch_dims, name: name); + } return gen_array_ops.gather_v2(@params, indices, axis, name: name); } + public static Tensor gather(Tensor @params, Tensor indices, int axis, string name = null, int batch_dims = 0) + => gather(@params, indices, name, ops.convert_to_tensor(axis), batch_dims); + + public static Tensor gather(ResourceVariable @params, Tensor indices, string name = null, Tensor axis = null, int batch_dims = 0) + { + if (axis is null) + axis = tf.convert_to_tensor(batch_dims); + if (tensor_util.constant_value(axis) != 0) + { + throw new NotImplementedException(); + } + + return @params.sparse_read(indices, name); + } + public static Tensor transpose(T1 a, Axis perm, string name = "transpose", bool conjugate = false) { return tf_with(ops.name_scope(name, "transpose", new { a }), scope => @@ -927,7 +953,7 @@ public static Tensor[] split(Tensor value, Tensor size_splits, int axis, int num if (num == -1) num = (int)size_splits.shape[0]; - return gen_array_ops.split_v(value, size_splits, axis, num, name: name); + return gen_array_ops.split_v(value, size_splits, tf.convert_to_tensor(axis), num, name: name); } public static Tensor[] split(Tensor value, int num_split, T axis, @@ -956,20 +982,10 @@ private static Tensor[] split_eager_fallback(Ta axis, Tv value, int num_ } public static Tensor slice(Tensor input, Tensor[] begin, Tensor[] size, string name = null) - => gen_array_ops.slice(input, begin, size, name: name); - - public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null) - => gen_array_ops.slice(input, begin, size, name: name); + => gen_array_ops.slice(input, ops.convert_to_tensor(begin), ops.convert_to_tensor(size), name: name); public static Tensor slice(Tensor input, Tensor begin, Tensor size, string name = null) - => tf.Context.ExecuteOp("Slice", name, new ExecuteOpArgs(input, begin, size) - { - GetGradientAttrs = (op) => new - { - T = op.get_attr("T"), - Index = op.get_attr("Index") - } - }); + => gen_array_ops.slice(input, begin, size, name: name); public static Tensor stack(object values, int axis = 0, string name = "stack") diff --git a/src/TensorFlowNET.Core/Operations/dataset_ops.cs b/src/TensorFlowNET.Core/Operations/dataset_ops.cs index c7e627772..061fb95e3 100644 --- a/src/TensorFlowNET.Core/Operations/dataset_ops.cs +++ b/src/TensorFlowNET.Core/Operations/dataset_ops.cs @@ -233,7 +233,7 @@ public Tensor anonymous_iterator_v3(TF_DataType[] output_types, Shape[] output_s { try { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("AnonymousIteratorV3", name) + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AnonymousIteratorV3", name) { attrs = attrs }); @@ -250,7 +250,7 @@ public Tensor anonymous_iterator_v3(TF_DataType[] output_types, Shape[] output_s public Tensor anonymous_iterator_v3_eager_fallback(TF_DataType[] output_types, Shape[] output_shapes, string name, Context ctx) { object[] attrs = new object[] { output_types, output_shapes }; - var result = execute.quick_execute("AnonymousIteratorV3", 1, new Tensor[] { }, attrs, ctx, name); + var result = _execute.quick_execute("AnonymousIteratorV3", 1, new Tensor[] { }, attrs, ctx, name); return result[0]; } diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs index 1dc6504ab..9810d32f3 100644 --- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs @@ -1,543 +1,10327 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Linq; -using Tensorflow.Contexts; using Tensorflow.Eager; +using Tensorflow.Contexts; using static Tensorflow.Binding; -namespace Tensorflow +namespace Tensorflow; + +public static class gen_array_ops { - public static class gen_array_ops + /// + /// + /// + /// + /// + /// + /// + public static Tensor batch_matrix_band_part(Tensor input, Tensor num_lower, Tensor num_upper, string? name = null) { - public static Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null) + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - var _op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name: name, args: new { input, block_shape, crops }); - - return _op.output; + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_matrix_band_part_eager_fallback(input, num_lower, num_upper, name: name, ctx: _ctx); + } + catch (Exception) + { + } } - - public static Tensor check_numerics(Tensor tensor, string message, string name = null) + Dictionary keywords = new(); + keywords["input"] = input; + keywords["num_lower"] = num_lower; + keywords["num_upper"] = num_upper; + var _op = tf.OpDefLib._apply_op_helper("BatchMatrixBandPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("CheckNumerics", name: name, args: new { tensor, message }); - - return _op.output; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BatchMatrixBandPart", _op.inputs, _attrs, _result); } + return _result[0]; + } - /// - /// Concatenates tensors along one dimension. - /// - /// - /// - /// - /// - public static Tensor concat_v2(T[] values, Ta axis, string name = null) - => tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); - - public static Tensor concat_v2(Tensor[] values, Tensor axis, string name = null) + public static Tensor batch_matrix_band_part_eager_fallback(Tensor input, Tensor num_lower, Tensor num_upper, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, num_lower, num_upper }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("BatchMatrixBandPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatrixBandPart", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + public static Tensor batch_matrix_diag(Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - if (tf.Context.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_matrix_diag_eager_fallback(diagonal, name: name, ctx: _ctx); + } + catch (Exception) { - return concat_v2_eager_fallback(values, axis, name, tf.Context); } - - var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name: name, args: new { values, axis }); - return _op.output; } - - public static Tensor concat_v2(Tensor[] values, int axis, string name = null) - => tf.Context.ExecuteOp("ConcatV2", name, new ExecuteOpArgs(values, axis)); - - private static Tensor concat_v2_eager_fallback(T1[] values, T2 axis, string name, Context ctx) + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("BatchMatrixDiag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _attr_N = len(values); - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: values.Select(x => (object)x).ToArray()); - var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new object[] { axis }); - var _inputs_flat = input.concat(axis1); - var _attrs = new object[] { "N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx }; - - return tf.Runner.Execute(ctx, "ConcatV2", 1, _inputs_flat, _attrs, name: name)[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BatchMatrixDiag", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor[] concat_offset(Tensor concat_dim, Tensor[] shape, string name = null) + public static Tensor batch_matrix_diag_eager_fallback(Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal }; + object[] _attrs = new object[] { "T", diagonal.dtype }; + var _result = _execute.execute("BatchMatrixDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("ConcatOffset", name: name, args: new { concat_dim, shape }); - - return _op.outputs; + _execute.record_gradient("BatchMatrixDiag", _inputs_flat, _attrs, _result); } - - /// - /// Returns a diagonal tensor with a given diagonal values. - /// - /// - /// Rank k tensor where k is at most 1. - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Diag'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Given a diagonal, this operation returns a tensor with the diagonal and - /// everything else padded with zeros. The diagonal is computed as follows: - /// - /// Assume diagonal has dimensions [D1,..., Dk], then the output is a tensor of - /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: - /// - /// output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik] and 0 everywhere else. - /// - /// For example: - /// - /// - /// # 'diagonal' is [1, 2, 3, 4] - /// tf.diag(diagonal) ==&gt; [[1, 0, 0, 0] - /// [0, 2, 0, 0] - /// [0, 0, 3, 0] - /// [0, 0, 0, 4]] - /// - /// - public static Tensor diag(Tensor diagonal, string name = null) - => tf.Context.ExecuteOp("Diag", name, new ExecuteOpArgs(diagonal)); - - public static Tensor diag_part(Tensor diagonal, string name = null) - => tf.Context.ExecuteOp("DiagPart", name, new ExecuteOpArgs(diagonal)); - - public static Tensor expand_dims(Tensor input, int axis, string name = null) - => tf.Context.ExecuteOp("ExpandDims", name, new ExecuteOpArgs(input, axis) - .SetAttributes(new { dim = axis })); - - public static Tensor gather_v2(T1 @params, T2 indices, int axis, int batch_dims = 0, string name = null) + return _result[0]; + } + /// + /// + /// + /// + /// + public static Tensor batch_matrix_diag_part(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_matrix_diag_part_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("BatchMatrixDiagPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var result = tf.Context.ExecuteOp("GatherV2", name, new ExecuteOpArgs( - @params, - indices, - axis).SetAttributes(new { batch_dims })); - return result [0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BatchMatrixDiagPart", _op.inputs, _attrs, _result); } + return _result[0]; + } - private static Tensor gather_v2_eager_fallback(object @params, object indices, int axis, string name, Context ctx) + public static Tensor batch_matrix_diag_part_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("BatchMatrixDiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var (_attr_T, param) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { @params }); - var (_attr_Tindice, indice) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { indices }); - var (_attr_Taxis, axiss) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new object[] { axis }); - var _inputs_flat = param.concat(indice).concat(axiss); - var _attrs = new object[] { "batch_dims", 0, "Tparams", _attr_T, "Tindices", _attr_Tindice, "Taxis", _attr_Taxis }; - - var results = tf.Runner.Execute(ctx, "GatherV2", 1, _inputs_flat, _attrs, name: name); - if (tf.Runner.MustRecordGradient()) - tf.Runner.RecordGradient("GatherV2", _inputs_flat, _attrs, results); - return results[0]; + _execute.record_gradient("BatchMatrixDiagPart", _inputs_flat, _attrs, _result); } - - - public static Tensor pad(Tensor input, Tensor paddings, string name = null) + return _result[0]; + } + /// + /// + /// + /// + /// + /// + public static Tensor batch_matrix_set_diag(Tensor input, Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - if (tf.Context.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_matrix_set_diag_eager_fallback(input, diagonal, name: name, ctx: _ctx); + } + catch (Exception) { - /*var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, - "Pad", name, - null, - input, paddings); - return results[0];*/ - return pad_eager_fallback(input, paddings, name: name, ctx: tf.Context); } - - var _op = tf.OpDefLib._apply_op_helper("Pad", name: name, args: new { input, paddings }); - - return _op.output; } - - private static Tensor pad_eager_fallback(Tensor inputs, Tensor padding, string name = null, Context ctx = null) + Dictionary keywords = new(); + keywords["input"] = input; + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("BatchMatrixSetDiag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs }); - var (_attr_Tpaddings, paddings) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { padding }); - var _inputs_flat = input.concat(paddings); - var _attrs = new object[] { "T", _attr_T, "Tpaddings", _attr_Tpaddings }; - - var results = tf.Runner.Execute(ctx, "Pad", 1, _inputs_flat, _attrs, name: name); - if (tf.Runner.MustRecordGradient()) - tf.Runner.RecordGradient("Pad", _inputs_flat, _attrs, results); - return results[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BatchMatrixSetDiag", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor pack(Tensor[] values, int axis = 0, string name = null) - => tf.Context.ExecuteOp("Pack", name, new ExecuteOpArgs() + public static Tensor batch_matrix_set_diag_eager_fallback(Tensor input, Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, diagonal }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("BatchMatrixSetDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatrixSetDiag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// BatchToSpace for 4-D tensors of type T. + /// + /// + /// + /// This is a legacy version of the more general BatchToSpaceND. + /// + /// Rearranges (permutes) data from batch into blocks of spatial data, followed by + /// cropping. This is the reverse transformation of SpaceToBatch. More specifically, + /// this op outputs a copy of the input tensor where values from the `batch` + /// dimension are moved in spatial blocks to the `height` and `width` dimensions, + /// followed by cropping along the `height` and `width` dimensions. + /// + /// + /// + /// + /// + /// + public static Tensor batch_to_space(Tensor input, Tensor crops, int block_size = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try { - OpInputArgs = new object[] { values } - }.SetAttributes(new { axis })); - - /// - /// Return a tensor with the same shape and contents as the input tensor or value. - /// - /// - /// - public static Tensor identity(Tensor input, string name = null) - => tf.Context.ExecuteOp("Identity", name, new ExecuteOpArgs(input)); - - public static Tensor invert_permutation(Tensor x, string name = null) + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpace", name) { args = new object[] { input, crops }, attrs = new Dictionary() { ["block_size"] = block_size } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_to_space_eager_fallback(input, crops, block_size: block_size, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["crops"] = crops; + keywords["block_size"] = block_size; + var _op = tf.OpDefLib._apply_op_helper("BatchToSpace", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("InvertPermutation", name, new { x }); - - return _op.outputs[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("BatchToSpace", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor log(Tensor x, string name = null) - => tf.Context.ExecuteOp("Log", name, new ExecuteOpArgs(x)); - - - public static Tensor rank(Tensor input, string name = null) - => tf.Context.ExecuteOp("Rank", name, new ExecuteOpArgs(input)); - - /// - /// Creates a tensor filled with a scalar value. - /// - /// A `Tensor`. - /// A `Tensor`. 0-D (scalar). Value to fill the returned tensor. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `value`. - public static Tensor fill(Tensor dims, T value, string name = null) + public static Tensor batch_to_space_eager_fallback(Tensor input, Tensor crops, int block_size, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, crops }; + object[] _attrs = new object[] { "T", input.dtype, "block_size", block_size, "Tidx", crops.dtype }; + var _result = _execute.execute("BatchToSpace", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchToSpace", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// BatchToSpace for N-D tensors of type T. + /// + /// + /// + /// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + /// `block_shape + [batch]`, interleaves these blocks back into the grid defined by + /// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as + /// the input. The spatial dimensions of this intermediate result are then + /// optionally cropped according to `crops` to produce the output. This is the + /// reverse of SpaceToBatch. See below for a precise description. + /// + /// + /// + /// + /// + /// + public static Tensor batch_to_space_nd(Tensor input, Tensor block_shape, Tensor crops, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - var ctx = tf.Context; - if (ctx.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpaceND", name) { args = new object[] { input, block_shape, crops }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) { - try - { - var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Fill", name, dims, value)); - return _result[0]; - } - catch (Exception) - { - - } - try - { - return fill_eager_fallback(dims, value as Tensor, name, ctx); - } - catch (Exception) - { - - } } - Dictionary attrs = new Dictionary(); - attrs["dims"] = dims; - attrs["value"] = value; - var result = tf.OpDefLib._apply_op_helper("Fill", name, attrs); - if (execute.must_record_gradient()) + try + { + return batch_to_space_nd_eager_fallback(input, block_shape, crops, name: name, ctx: _ctx); + } + catch (Exception) { - throw new NotImplementedException(); } - return result.output; } - - public static Tensor fill_eager_fallback(Tensor dims, Tensor value, string name, Context ctx) + Dictionary keywords = new(); + keywords["input"] = input; + keywords["block_shape"] = block_shape; + keywords["crops"] = crops; + var _op = tf.OpDefLib._apply_op_helper("BatchToSpaceND", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - object[] attrs = new object[] { "T", dims.dtype.as_datatype_enum(), "index_type", dims.dtype.as_datatype_enum() }; - var _result = execute.executes("Fill", 1, new Tensor[] { dims, value }, attrs, ctx, name); + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tblock_shape", _op._get_attr_type("Tblock_shape"), "Tcrops", _op._get_attr_type("Tcrops") }; + _execute.record_gradient("BatchToSpaceND", _op.inputs, _attrs, _result); + } + return _result[0]; + } - if (execute.must_record_gradient()) + public static Tensor batch_to_space_nd_eager_fallback(Tensor input, Tensor block_shape, Tensor crops, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, block_shape, crops }; + object[] _attrs = new object[] { "T", input.dtype, "Tblock_shape", block_shape.dtype, "Tcrops", crops.dtype }; + var _result = _execute.execute("BatchToSpaceND", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchToSpaceND", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Bitcasts a tensor from one type to another without copying data. + /// + /// + /// + /// Given a tensor `input`, this operation returns a tensor that has the same buffer + /// data as `input` with datatype `type`. + /// + /// If the input datatype `T` is larger than the output datatype `type` then the + /// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + /// + /// If `T` is smaller than `type`, the operator requires that the rightmost + /// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + /// [..., sizeof(`type`)/sizeof(`T`)] to [...]. + /// + /// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + /// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() + /// gives module error. + /// For example, + /// + /// Example 1: + /// + /// >>> a = [1., 2., 3.] + /// >>> equality_bitcast = tf.bitcast(a, tf.complex128) + /// Traceback (most recent call last): + /// ... + /// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + /// >>> equality_cast = tf.cast(a, tf.complex128) + /// >>> print(equality_cast) + /// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + /// + /// Example 2: + /// + /// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + /// + /// + /// Example 3: + /// + /// >>> x = [1., 2., 3.] + /// >>> y = [0., 2., 3.] + /// >>> equality= tf.equal(x,y) + /// >>> equality_cast = tf.cast(equality,tf.float32) + /// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + /// >>> print(equality) + /// tf.Tensor([False True True], shape=(3,), dtype=bool) + /// >>> print(equality_cast) + /// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + /// >>> print(equality_bitcast) + /// tf.Tensor( + /// [[ 0 0 0 0] + /// [ 0 0 128 63] + /// [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + /// + /// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different + /// endian orderings will give different results. + /// + /// + /// + /// + /// + public static Tensor bitcast(Tensor input, TF_DataType type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bitcast", name) { args = new object[] { input }, attrs = new Dictionary() { ["type"] = type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bitcast_eager_fallback(input, type: type, name: name, ctx: _ctx); + } + catch (Exception) { - throw new NotImplementedException(); } - return _result[0]; } - //=> tf.Context.ExecuteOp("Fill", name, new ExecuteOpArgs(dims, value)); - - /// - /// Return the reduction indices for computing gradients of s0 op s1 with broadcast. - /// - /// A `Tensor`. Must be one of the following types: `int32`, `int64`. - /// A `Tensor`. Must have the same type as `s0`. - /// A name for the operation (optional). - /// A tuple of `Tensor` objects (r0, r1). - public static (Tensor, Tensor) broadcast_gradient_args(Tensor s0, Tensor s1, string name = "") + Dictionary keywords = new(); + keywords["input"] = input; + keywords["type"] = type; + var _op = tf.OpDefLib._apply_op_helper("Bitcast", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var results = tf.Context.ExecuteOp("BroadcastGradientArgs", name, new ExecuteOpArgs(s0, s1)); - return (results[0], results[1]); + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "type", _op._get_attr_type("type") }; + _execute.record_gradient("Bitcast", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor reverse(Tensor tensor, T axis, string name = null) + public static Tensor bitcast_eager_fallback(Tensor input, TF_DataType type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "type", type }; + var _result = _execute.execute("Bitcast", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("ReverseV2", name, new { tensor, axis }); - return _op.output; + _execute.record_gradient("Bitcast", _inputs_flat, _attrs, _result); } - - public static Tensor reshape(Tensor tensor, T shape, string name = null) - => tf.Context.ExecuteOp("Reshape", name, new ExecuteOpArgs(tensor, shape)); - - public static Tensor reshape(Tensor tensor, object[] shape, string name = null) - => tf.Context.ExecuteOp("Reshape", name, new ExecuteOpArgs(tensor, shape)); - - private static Tensor reshape_eager_fallback(Tensor tensor, object[] shape, string name, Context ctx) + return _result[0]; + } + /// + /// Return the shape of s0 op s1 with broadcast. + /// + /// + /// + /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the + /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. + /// + /// + /// + /// + /// + public static Tensor broadcast_args(Tensor s0, Tensor s1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - var (_attr_T, _input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { tensor }); - var (_attr_Tshape, _input_shape) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { shape }, default_dtype: TF_DataType.TF_INT32); - var _inputs_flat = new[] { _input[0], _input_shape[0] }; - var _attrs = new object[] { "T", _attr_T, "Tshape", _attr_Tshape }; - - var results = tf.Runner.Execute(ctx, "Reshape", 1, _inputs_flat, _attrs, name: name); - if (tf.Runner.MustRecordGradient()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return broadcast_args_eager_fallback(s0, s1, name: name, ctx: _ctx); + } + catch (Exception) { - tf.Runner.RecordGradient("Reshape", _inputs_flat, _attrs, results); } - return results[0]; } - - /// - /// Finds unique elements in a 1-D tensor. - /// - /// - /// - /// - /// - public static (Tensor, Tensor) unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string name = null) + Dictionary keywords = new(); + keywords["s0"] = s0; + keywords["s1"] = s1; + var _op = tf.OpDefLib._apply_op_helper("BroadcastArgs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("Unique", name, new { x, out_idx }); - // TODO - //var _result = _UniqueOutput._make(_op.outputs); - return (_op.outputs[0], _op.outputs[1]); + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BroadcastArgs", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor[] unpack(Tensor value, int num, int axis = 0, string name = null) - => tf.Context.ExecuteOp("Unpack", name, new ExecuteOpArgs(value, num) - .SetAttributes(new { axis, num })); - - public static Tensor where(Tensor condition, string name = null) + public static Tensor broadcast_args_eager_fallback(Tensor s0, Tensor s1, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { s0, s1 }; + object[] _attrs = new object[] { "T", s0.dtype }; + var _result = _execute.execute("BroadcastArgs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("Where", name, new { input = condition }); - return _op.output; + _execute.record_gradient("BroadcastArgs", _inputs_flat, _attrs, _result); } - - public static Tensor one_hot(Tensor indices, Tensor depth, - Tensor on_value = null, - Tensor off_value = null, - TF_DataType dtype = TF_DataType.DtInvalid, - int axis = -1, - string name = null) - => tf.Context.ExecuteOp("OneHot", name, new ExecuteOpArgs(indices, depth, on_value, off_value) - .SetAttributes(new { axis })); - - /// - /// A placeholder op that passes through `input` when its output is not fed. - /// - /// The default value to produce when output is not fed. - /// - /// - /// - public static Tensor placeholder_with_default(T input, int[] shape, string name = null) + return _result[0]; + } + /// + /// Return the reduction indices for computing gradients of s0 op s1 with broadcast. + /// + /// + /// + /// This is typically used by gradient computations for a broadcasting operation. + /// + /// + /// + /// + /// + public static Tensor[] broadcast_gradient_args(Tensor s0, Tensor s1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastGradientArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return broadcast_gradient_args_eager_fallback(s0, s1, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["s0"] = s0; + keywords["s1"] = s1; + var _op = tf.OpDefLib._apply_op_helper("BroadcastGradientArgs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name, new { input, shape, name }); - return _op.outputs[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BroadcastGradientArgs", _op.inputs, _attrs, _result); } + return _result; + } - public static Tensor select(Tensor condition, Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Select", name, new ExecuteOpArgs(condition, x, y)); - - public static Tensor select_v2(Tensor condition, Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("SelectV2", name, new ExecuteOpArgs(condition, x, y)); - - public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor[] shape, string name = null) + public static Tensor[] broadcast_gradient_args_eager_fallback(Tensor s0, Tensor s1, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { s0, s1 }; + object[] _attrs = new object[] { "T", s0.dtype }; + var _result = _execute.execute("BroadcastGradientArgs", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("ScatterNd", name, new { indices, updates, shape }); - return _op.outputs[0]; + _execute.record_gradient("BroadcastGradientArgs", _inputs_flat, _attrs, _result); } - - public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) - => tf.Context.ExecuteOp("Shape", name, new ExecuteOpArgs(input) - .SetAttributes(new { out_type })); - - /// - /// Returns shape of tensors. - /// - /// - /// - /// - /// - public static Tensor[] shape_n(Tensor[] input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) - => tf.Context.ExecuteOp("ShapeN", name, new ExecuteOpArgs() + return _result; + } + /// + /// Broadcast an array for a compatible shape. + /// + /// + /// + /// Broadcasting is the process of making arrays to have compatible shapes + /// for arithmetic operations. Two shapes are compatible if for each + /// dimension pair they are either equal or one of them is one. + /// + /// For example: + /// + /// >>> x = tf.constant([[1, 2, 3]]) # Shape (1, 3,) + /// >>> y = tf.broadcast_to(x, [2, 3]) + /// >>> print(y) + /// tf.Tensor( + /// [[1 2 3] + /// [1 2 3]], shape=(2, 3), dtype=int32) + /// + /// In the above example, the input Tensor with the shape of `[1, 3]` + /// is broadcasted to output Tensor with shape of `[2, 3]`. + /// + /// When broadcasting, if a tensor has fewer axes than necessary its shape is + /// padded on the left with ones. So this gives the same result as the previous + /// example: + /// + /// >>> x = tf.constant([1, 2, 3]) # Shape (3,) + /// >>> y = tf.broadcast_to(x, [2, 3]) + /// + /// + /// When doing broadcasted operations such as multiplying a tensor + /// by a scalar, broadcasting (usually) confers some time or space + /// benefit, as the broadcasted tensor is never materialized. + /// + /// However, `broadcast_to` does not carry with it any such benefits. + /// The newly-created tensor takes the full memory of the broadcasted + /// shape. (In a graph context, `broadcast_to` might be fused to + /// subsequent operation and then be optimized away, however.) + /// + /// + /// + /// + /// + public static Tensor broadcast_to(Tensor input, Tensor shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try { - OpInputArgs = new object[] { input } - }.SetAttributes(new { out_type })); - - public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string name = null) + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastTo", name) { args = new object[] { input, shape }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return broadcast_to_eager_fallback(input, shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("BroadcastTo", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("Size", name, new { input, out_type }); - return _op.outputs[0]; + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("BroadcastTo", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor slice(Tensor input, Tensor[] begin, Tensor[] size, string name = null) + public static Tensor broadcast_to_eager_fallback(Tensor input, Tensor shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, shape }; + object[] _attrs = new object[] { "T", input.dtype, "Tidx", shape.dtype }; + var _result = _execute.execute("BroadcastTo", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BroadcastTo", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Checks a tensor for NaN and Inf values. + /// + /// + /// + /// When run, reports an `InvalidArgument` error if `tensor` has any values + /// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input + /// tensor. + /// + /// Example usage: + /// + /// ``` python + /// a = tf.Variable(1.0) + /// tf.debugging.check_numerics(a, message='') + /// + /// b = tf.Variable(np.nan) + /// try: + /// tf.debugging.check_numerics(b, message='Checking b') + /// except Exception as e: + /// assert "Checking b : Tensor had NaN values" in e.message + /// + /// c = tf.Variable(np.inf) + /// try: + /// tf.debugging.check_numerics(c, message='Checking c') + /// except Exception as e: + /// assert "Checking c : Tensor had Inf values" in e.message + /// ``` + /// + /// + /// + /// + /// + /// + /// Prefix of the error message. + /// + /// + /// + public static Tensor check_numerics(Tensor tensor, string message, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) { - if (tf.executing_eagerly()) + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumerics", name) { args = new object[] { tensor }, attrs = new Dictionary() { ["message"] = message } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return check_numerics_eager_fallback(tensor, message: message, name: name, ctx: _ctx); + } + catch (Exception) { - var result = slice_eager_fallback(input, begin, size, name, tf.Context); - return result; } - - var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size }); - return _op.outputs[0]; } - - private static Tensor slice_eager_fallback(Tensor inputs, Tensor[] begin, Tensor[] size, string name, Context ctx) + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["message"] = message; + var _op = tf.OpDefLib._apply_op_helper("CheckNumerics", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs }); - var (_attr_Tidx, _inputs_Index) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { begin, size }); - var _inputs_flat = input.concat(_inputs_Index); - var _attrs = new object[] { "T", _attr_T, "Index", _attr_Tidx }; + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") }; + _execute.record_gradient("CheckNumerics", _op.inputs, _attrs, _result); + } + return _result[0]; + } - var results = tf.Runner.Execute(ctx, "Slice", 1, _inputs_flat, _attrs, name: name); - if (tf.Runner.MustRecordGradient()) + public static Tensor check_numerics_eager_fallback(Tensor tensor, string message, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor }; + object[] _attrs = new object[] { "T", tensor.dtype, "message", message }; + var _result = _execute.execute("CheckNumerics", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("CheckNumerics", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Checks a tensor for NaN, -Inf and +Inf values. + /// + /// + /// + /// When run, reports an `InvalidArgument` error if `tensor` has any values + /// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input + /// tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf + /// in the errors it throws. + /// + /// + /// + /// + /// + /// Prefix of the error message. + /// + /// + /// + public static Tensor check_numerics_v2(Tensor tensor, string message, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumericsV2", name) { args = new object[] { tensor }, attrs = new Dictionary() { ["message"] = message } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return check_numerics_v2_eager_fallback(tensor, message: message, name: name, ctx: _ctx); + } + catch (Exception) { - tf.Runner.RecordGradient("Slice", _inputs_flat, _attrs, results); } - return results[0]; } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["message"] = message; + var _op = tf.OpDefLib._apply_op_helper("CheckNumericsV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") }; + _execute.record_gradient("CheckNumericsV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor slice(Tensor input, Tb begin, Ts size, string name = null) + public static Tensor check_numerics_v2_eager_fallback(Tensor tensor, string message, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor }; + object[] _attrs = new object[] { "T", tensor.dtype, "message", message }; + var _result = _execute.execute("CheckNumericsV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) { - if (tf.executing_eagerly()) + _execute.record_gradient("CheckNumericsV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Concatenates tensors along one dimension. + /// + /// + /// + /// + public static Tensor concat(Tensor concat_dim, Tensors values, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Concat", name) { args = new object[] { concat_dim, values }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return concat_eager_fallback(concat_dim, values, name: name, ctx: _ctx); + } + catch (Exception) { - var outputs = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("Slice", name, input, begin, size)); - return outputs[0]; } - - var _op = tf.OpDefLib._apply_op_helper("Slice", name, new { input, begin, size }); - return _op.outputs[0]; } + Dictionary keywords = new(); + keywords["concat_dim"] = concat_dim; + keywords["values"] = values; + var _op = tf.OpDefLib._apply_op_helper("Concat", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("Concat", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor[] split_v(Tensor value, Tensor size_splits, - int axis, int num_split, string name = null) - => tf.Context.ExecuteOp("SplitV", name, new ExecuteOpArgs(value, size_splits, axis) - .SetAttributes(new { num_split })); - - public static Tensor tile(Tensor input, Tensor multiples, string name = null) - => tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, multiples)); - - public static Tensor tile(Tensor input, object[] multiples, string name = null) - => tf.Context.ExecuteOp("Tile", name, new ExecuteOpArgs(input, multiples)); - - public static Tensor transpose(Tensor x, T1 perm, string name = null) - => tf.Context.ExecuteOp("Transpose", name, new ExecuteOpArgs(x, perm)); - - public static Tensor ones_like(Tensor x, string name = null) - => tf.Context.ExecuteOp("OnesLike", name, new ExecuteOpArgs(x)); - - public static Tensor zeros_like(Tensor x, string name = null) - => tf.Context.ExecuteOp("ZerosLike", name, new ExecuteOpArgs(x)); - - public static Tensor stop_gradient(Tensor x, string name = null) + public static Tensor concat_eager_fallback(Tensor concat_dim, Tensors values, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.Add(concat_dim); + _inputs_flat_list.AddRange(values); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype }; + var _result = _execute.execute("Concat", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Concat", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes offsets of concat inputs within its output. + /// + /// + /// + /// For example: + /// + /// >>> x = [2, 2, 7] + /// >>> y = [2, 3, 7] + /// >>> z = [2, 9, 7] + /// >>> offsets = concat_offset(1, [x, y, z]) + /// >>> [list(off.numpy()) for off in offsets] + /// [[0, 0, 0], [0, 2, 0], [0, 5, 0]] + /// + /// This is typically used by gradient computations for a concat operation. + /// + /// + /// + /// + /// + public static Tensor[] concat_offset(Tensor concat_dim, Tensors shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatOffset", name) { args = new object[] { concat_dim, shape }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return concat_offset_eager_fallback(concat_dim, shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["concat_dim"] = concat_dim; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("ConcatOffset", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("StopGradient", name, args: new { input = x, name }); + object[] _attrs = new object[] { "N", _op._get_attr_int("N") }; + _execute.record_gradient("ConcatOffset", _op.inputs, _attrs, _result); + } + return _result; + } - return _op.output; + public static Tensor[] concat_offset_eager_fallback(Tensor concat_dim, Tensors shape, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.Add(concat_dim); + _inputs_flat_list.AddRange(shape); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", shape.Length }; + var _result = _execute.execute("ConcatOffset", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ConcatOffset", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Concatenates tensors along one dimension. + /// + /// + /// + /// + public static Tensor concat_v2(Tensors values, Tensor axis, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatV2", name) { args = new object[] { values, axis }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return concat_v2_eager_fallback(values, axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["values"] = values; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("ConcatV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("ConcatV2", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides, - long begin_mask = 0, - long end_mask = 0, - long ellipsis_mask = 0, - long new_axis_mask = 0, - long shrink_axis_mask = 0, - string name = null) - => tf.Context.ExecuteOp("StridedSlice", name, new ExecuteOpArgs(input, begin, end, strides) - .SetAttributes(new - { - begin_mask, - end_mask, - ellipsis_mask, - new_axis_mask, - shrink_axis_mask - })); - - public static Tensor resource_strided_slice_assign(Tensor input, Tensor begin, Tensor end, Tensor strides, Tensor value, - int begin_mask = 0, - int end_mask = 0, - int ellipsis_mask = 0, - int new_axis_mask = 0, - int shrink_axis_mask = 0, - string name = null) - => tf.Context.ExecuteOp("ResourceStridedSliceAssign", name, new ExecuteOpArgs(input, begin, end, strides, value) - .SetAttributes(new - { - begin_mask, - end_mask, - ellipsis_mask, - new_axis_mask, - shrink_axis_mask - })); - - public static Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides, - int begin_mask = 0, - int end_mask = 0, - int ellipsis_mask = 0, - int new_axis_mask = 0, - int shrink_axis_mask = 0, - string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, new - { - input, - begin, - end, - strides, - begin_mask, - end_mask, - ellipsis_mask, - new_axis_mask, - shrink_axis_mask - }); - - return _op.outputs[0]; - } - - /// - /// Removes dimensions of size 1 from the shape of a tensor. - /// Given a tensor `input`, this operation returns a tensor of the same type with - /// all dimensions of size 1 removed.If you don't want to remove all size 1 - /// dimensions, you can remove specific size 1 dimensions by specifying - /// `axis`. - /// - /// A `Tensor`. The `input` to squeeze. - /// An optional list of `ints`. Defaults to `[]`. If specified, only squeezes the dimensions listed. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `input`. - public static Tensor squeeze(Tensor input, int[] axis = null, string name = null) - => tf.Context.ExecuteOp("Squeeze", name, new ExecuteOpArgs(input) - .SetAttributes(new { squeeze_dims = axis })); - - /// - /// Return the shape of s0 op s1 with broadcast. - /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the - /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. - /// - /// A `Tensor`. Must be one of the following types: `int32`, `int64`. - /// A `Tensor`. Must have the same type as `s0`. - /// A name for the operation (optional). - /// `Tensor`. Has the same type as `s0`. - public static Tensor broadcast_args(Tensor s0, Tensor s1, string name = null) - => tf.Context.ExecuteOp("BroadcastArgs", name, new ExecuteOpArgs(s0, s1)); - - /// - /// Broadcast an array for a compatible shape. - /// - /// - /// - /// - /// - public static Tensor broadcast_to(Tensor input, T shape, string name = null) - => tf.Context.ExecuteOp("BroadcastTo", name, new ExecuteOpArgs(input, shape)); + public static Tensor concat_v2_eager_fallback(Tensors values, Tensor axis, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(values); + _inputs_flat_list.Add(axis); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "Tidx", axis.dtype }; + var _result = _execute.execute("ConcatV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ConcatV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Shuffle dimensions of x according to a permutation and conjugate the result. + /// + /// + /// + /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + /// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])` + /// + /// + /// + /// + /// + public static Tensor conjugate_transpose(Tensor x, Tensor perm, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConjugateTranspose", name) { args = new object[] { x, perm }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conjugate_transpose_eager_fallback(x, perm, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["perm"] = perm; + var _op = tf.OpDefLib._apply_op_helper("ConjugateTranspose", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tperm", _op._get_attr_type("Tperm") }; + _execute.record_gradient("ConjugateTranspose", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conjugate_transpose_eager_fallback(Tensor x, Tensor perm, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, perm }; + object[] _attrs = new object[] { "T", x.dtype, "Tperm", perm.dtype }; + var _result = _execute.execute("ConjugateTranspose", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ConjugateTranspose", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a constant tensor. + /// + /// + /// + /// Attr `value` is the tensor to return. + /// + /// + /// + /// + public static Tensor _const(TensorProto value, TF_DataType dtype, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Const", name) { args = new object[] { }, attrs = new Dictionary() { ["value"] = value, ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return const_eager_fallback(value: value, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("Const", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "value", _op.get_attr("value"), "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("Const", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor const_eager_fallback(TensorProto value, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "value", value, "dtype", dtype }; + var _result = _execute.execute("Const", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Const", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Identity op for gradient debugging. + /// + /// + /// + /// This op is hidden from public in Python. It is used by TensorFlow Debugger to + /// register gradient tensors for gradient debugging. + /// This op operates on non-reference-type tensors. + /// + /// + /// + /// + public static Tensor debug_gradient_identity(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DebugGradientIdentity", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return debug_gradient_identity_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("DebugGradientIdentity", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DebugGradientIdentity", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor debug_gradient_identity_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("DebugGradientIdentity", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DebugGradientIdentity", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Identity op for gradient debugging. + /// + /// + /// + /// This op is hidden from public in Python. It is used by TensorFlow Debugger to + /// register gradient tensors for gradient debugging. + /// This op operates on reference-type tensors. + /// + /// + /// + /// + public static Tensor debug_gradient_ref_identity(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg input is a ref."); + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("DebugGradientRefIdentity", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DebugGradientRefIdentity", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor debug_gradient_ref_identity_eager_fallback(Tensor input, string name, Context ctx) + { + throw new RuntimeError($"debug_gradient_ref_identity op does not support eager execution. Arg 'input' is a ref."); + } + /// + /// Makes a copy of `x`. + /// + /// + /// + public static Tensor deep_copy(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DeepCopy", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return deep_copy_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("DeepCopy", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DeepCopy", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor deep_copy_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("DeepCopy", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DeepCopy", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// DepthToSpace for tensors of type T. + /// + /// + /// + /// Rearranges data from depth into blocks of spatial data. + /// This is the reverse transformation of SpaceToDepth. More specifically, + /// this op outputs a copy of the input tensor where values from the `depth` + /// dimension are moved in spatial blocks to the `height` and `width` dimensions. + /// The attr `block_size` indicates the input block size and how the data is moved. + /// + /// * Chunks of data of size `block_size * block_size` from depth are rearranged + /// into non-overlapping blocks of size `block_size x block_size` + /// * The width of the output tensor is `input_depth * block_size`, whereas the + /// height is `input_height * block_size`. + /// * The Y, X coordinates within each block of the output image are determined + /// by the high order component of the input channel index. + /// * The depth of the input tensor must be divisible by + /// `block_size * block_size`. + /// + /// The `data_format` attr specifies the layout of the input and output tensors + /// with the following options: + /// "NHWC": `[ batch, height, width, channels ]` + /// "NCHW": `[ batch, channels, height, width ]` + /// "NCHW_VECT_C": + /// `qint8 [ batch, channels / 4, height, width, 4 ]` + /// + /// It is useful to consider the operation as transforming a 6-D Tensor. + /// e.g. for data_format = NHWC, + /// Each element in the input tensor can be specified via 6 coordinates, + /// ordered by decreasing memory layout significance as: + /// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + /// within the input image, bX, bY means coordinates + /// within the output block, oC means output channels). + /// The output would be the input transposed to the following layout: + /// n,iY,bY,iX,bX,oC + /// + /// This operation is useful for resizing the activations between convolutions + /// (but keeping all data), e.g. instead of pooling. It is also useful for training + /// purely convolutional models. + /// + /// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and + /// block_size = 2: + /// + /// ``` + /// x = [[[[1, 2, 3, 4]]]] + /// + /// ``` + /// + /// This operation will output a tensor of shape `[1, 2, 2, 1]`: + /// + /// ``` + /// [[[[1], [2]], + /// [[3], [4]]]] + /// ``` + /// + /// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + /// the corresponding output will have 2x2 elements and will have a depth of + /// 1 channel (1 = `4 / (block_size * block_size)`). + /// The output element shape is `[2, 2, 1]`. + /// + /// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. + /// + /// ``` + /// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + /// ``` + /// + /// This operation, for block size of 2, will return the following tensor of shape + /// `[1, 2, 2, 3]` + /// + /// ``` + /// [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// ``` + /// + /// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: + /// + /// ``` + /// x = [[[[1, 2, 3, 4], + /// [5, 6, 7, 8]], + /// [[9, 10, 11, 12], + /// [13, 14, 15, 16]]]] + /// ``` + /// + /// the operator will return the following tensor of shape `[1 4 4 1]`: + /// + /// ``` + /// x = [[[ [1], [2], [5], [6]], + /// [ [3], [4], [7], [8]], + /// [ [9], [10], [13], [14]], + /// [ [11], [12], [15], [16]]]] + /// + /// ``` + /// + /// + /// + /// + /// + /// The size of the spatial block, same as in Space2Depth. + /// + /// + /// + /// + public static Tensor depth_to_space(Tensor input, int block_size = 0, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthToSpace", name) { args = new object[] { input }, attrs = new Dictionary() { ["block_size"] = block_size, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return depth_to_space_eager_fallback(input, block_size: block_size, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["block_size"] = block_size; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("DepthToSpace", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("DepthToSpace", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor depth_to_space_eager_fallback(Tensor input, int block_size, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "block_size", block_size, "data_format", data_format }; + var _result = _execute.execute("DepthToSpace", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DepthToSpace", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Dequantize the 'input' tensor into a float or bfloat16 Tensor. + /// + /// + /// + /// [min_range, max_range] are scalar floats that specify the range for + /// the output. The 'mode' attribute controls exactly which calculations are + /// used to convert the float values to their quantized equivalents. + /// + /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + /// + /// ``` + /// if T == qint8: in[i] += (range(T) + 1)/ 2.0 + /// out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + /// ``` + /// here `range(T) = numeric_limits::max() - numeric_limits::min()` + /// + /// *MIN_COMBINED Mode Example* + /// + /// If the input comes from a QuantizedRelu6, the output type is + /// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + /// 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + /// Dequantize on quint8 will take each value, cast to float, and multiply + /// by 6 / 255. + /// Note that if quantizedtype is qint8, the operation will additionally add + /// each value by 128 prior to casting. + /// + /// If the mode is 'MIN_FIRST', then this approach is used: + /// + /// ```c++ + /// num_discrete_values = 1 << (# of bits in T) + /// range_adjust = num_discrete_values / (num_discrete_values - 1) + /// range = (range_max - range_min) * range_adjust + /// range_scale = range / num_discrete_values + /// const double offset_input = static_cast(input) - lowest_quantized; + /// result = range_min + ((input - numeric_limits::min()) * range_scale) + /// ``` + /// + /// If the mode is `SCALED`, dequantization is performed by multiplying each + /// input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + /// + /// The scaling_factor is determined from `min_range`, `max_range`, and + /// `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}` + /// and `QuantizeV2`, using the following algorithm: + /// + /// ```c++ + /// + /// const int min_expected_T = std::numeric_limits::min() + + /// (narrow_range ? 1 : 0); + /// const int max_expected_T = std::numeric_limits::max(); + /// const float max_expected_T = std::numeric_limits::max(); + /// + /// const float scale_factor = + /// (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + /// : std::max(min_range / min_expected_T, + /// max_range / max_expected_T); + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Type of the output tensor. Currently Dequantize supports float and bfloat16. + /// If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. + /// + /// + /// + public static Tensor dequantize(Tensor input, Tensor min_range, Tensor max_range, string mode = "MIN_COMBINED", bool narrow_range = false, int axis = -1, TF_DataType dtype = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dequantize", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary() { ["mode"] = mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dequantize_eager_fallback(input, min_range, max_range, mode: mode, narrow_range: narrow_range, axis: axis, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (mode is null) + { + mode = "MIN_COMBINED"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["min_range"] = min_range; + keywords["max_range"] = max_range; + keywords["mode"] = mode; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("Dequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis"), "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("Dequantize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dequantize_eager_fallback(Tensor input, Tensor min_range, Tensor max_range, string mode, bool narrow_range, int axis, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, min_range, max_range }; + object[] _attrs = new object[] { "T", input.dtype, "mode", mode, "narrow_range", narrow_range, "axis", axis, "dtype", dtype }; + var _result = _execute.execute("Dequantize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Dequantize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a diagonal tensor with a given diagonal values. + /// + /// + /// + /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and + /// everything else padded with zeros. The diagonal is computed as follows: + /// + /// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of + /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: + /// + /// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. + /// + /// For example: + /// + /// ``` + /// # 'diagonal' is [1, 2, 3, 4] + /// tf.diag(diagonal) ==> [[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]] + /// ``` + /// + /// + /// + /// + public static Tensor diag(Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Diag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return diag_eager_fallback(diagonal, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("Diag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Diag", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor diag_eager_fallback(Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal }; + object[] _attrs = new object[] { "T", diagonal.dtype }; + var _result = _execute.execute("Diag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Diag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the diagonal part of the tensor. + /// + /// + /// + /// This operation returns a tensor with the `diagonal` part + /// of the `input`. The `diagonal` part is computed as follows: + /// + /// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a + /// tensor of rank `k` with dimensions `[D1,..., Dk]` where: + /// + /// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. + /// + /// For example: + /// + /// ``` + /// # 'input' is [[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]] + /// + /// tf.diag_part(input) ==> [1, 2, 3, 4] + /// ``` + /// + /// + /// + /// + public static Tensor diag_part(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return diag_part_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("DiagPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DiagPart", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor diag_part_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("DiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DiagPart", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the (possibly normalized) Levenshtein Edit Distance. + /// + /// + /// + /// The inputs are variable-length sequences provided by SparseTensors + /// (hypothesis_indices, hypothesis_values, hypothesis_shape) + /// and + /// (truth_indices, truth_values, truth_shape). + /// + /// The inputs are: + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// boolean (if true, edit distances are normalized by length of truth). + /// + /// The output is: + /// + /// + /// + public static Tensor edit_distance(Tensor hypothesis_indices, Tensor hypothesis_values, Tensor hypothesis_shape, Tensor truth_indices, Tensor truth_values, Tensor truth_shape, bool normalize = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EditDistance", name) { args = new object[] { hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape }, attrs = new Dictionary() { ["normalize"] = normalize } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return edit_distance_eager_fallback(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize: normalize, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["hypothesis_indices"] = hypothesis_indices; + keywords["hypothesis_values"] = hypothesis_values; + keywords["hypothesis_shape"] = hypothesis_shape; + keywords["truth_indices"] = truth_indices; + keywords["truth_values"] = truth_values; + keywords["truth_shape"] = truth_shape; + keywords["normalize"] = normalize; + var _op = tf.OpDefLib._apply_op_helper("EditDistance", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "normalize", _op._get_attr_bool("normalize"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("EditDistance", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor edit_distance_eager_fallback(Tensor hypothesis_indices, Tensor hypothesis_values, Tensor hypothesis_shape, Tensor truth_indices, Tensor truth_values, Tensor truth_shape, bool normalize, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape }; + object[] _attrs = new object[] { "normalize", normalize, "T", hypothesis_values.dtype }; + var _result = _execute.execute("EditDistance", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EditDistance", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor empty(Tensor shape, TF_DataType dtype, bool init = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Empty", name) { args = new object[] { shape }, attrs = new Dictionary() { ["dtype"] = dtype, ["init"] = init } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return empty_eager_fallback(shape, dtype: dtype, init: init, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["shape"] = shape; + keywords["dtype"] = dtype; + keywords["init"] = init; + var _op = tf.OpDefLib._apply_op_helper("Empty", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "init", _op._get_attr_bool("init") }; + _execute.record_gradient("Empty", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor empty_eager_fallback(Tensor shape, TF_DataType dtype, bool init, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { shape }; + object[] _attrs = new object[] { "dtype", dtype, "init", init }; + var _result = _execute.execute("Empty", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Empty", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Ensures that the tensor's shape matches the expected shape. + /// + /// + /// + /// Raises an error if the input tensor's shape does not match the specified shape. + /// Returns the input tensor otherwise. + /// + /// + /// + /// + /// + /// The expected (possibly partially specified) shape of the input tensor. + /// + /// + /// + public static Tensor ensure_shape(Tensor input, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EnsureShape", name) { args = new object[] { input }, attrs = new Dictionary() { ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ensure_shape_eager_fallback(input, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("EnsureShape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "shape", _op.get_attr("shape"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("EnsureShape", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ensure_shape_eager_fallback(Tensor input, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "shape", shape, "T", input.dtype }; + var _result = _execute.execute("EnsureShape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EnsureShape", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Inserts a dimension of 1 into a tensor's shape. + /// + /// + /// + /// Given a tensor `input`, this operation inserts a dimension of 1 at the + /// dimension index `dim` of `input`'s shape. The dimension index `dim` starts at + /// zero; if you specify a negative number for `dim` it is counted backward from + /// the end. + /// + /// This operation is useful if you want to add a batch dimension to a single + /// element. For example, if you have a single image of shape `[height, width, + /// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + /// which will make the shape `[1, height, width, channels]`. + /// + /// Other examples: + /// + /// ``` + /// # 't' is a tensor of shape [2] + /// shape(expand_dims(t, 0)) ==> [1, 2] + /// shape(expand_dims(t, 1)) ==> [2, 1] + /// shape(expand_dims(t, -1)) ==> [2, 1] + /// + /// # 't2' is a tensor of shape [2, 3, 5] + /// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + /// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + /// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + /// ``` + /// + /// This operation requires that: + /// + /// `-1-input.dims() <= dim <= input.dims()` + /// + /// This operation is related to `squeeze()`, which removes dimensions of + /// size 1. + /// + /// + /// + /// + /// + public static Tensor expand_dims(Tensor input, Tensor dim, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExpandDims", name) { args = new object[] { input, dim }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return expand_dims_eager_fallback(input, dim, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["dim"] = dim; + var _op = tf.OpDefLib._apply_op_helper("ExpandDims", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tdim", _op._get_attr_type("Tdim") }; + _execute.record_gradient("ExpandDims", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor expand_dims_eager_fallback(Tensor input, Tensor dim, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, dim }; + object[] _attrs = new object[] { "T", input.dtype, "Tdim", dim.dtype }; + var _result = _execute.execute("ExpandDims", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ExpandDims", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Extract `patches` from `images` and put them in the "depth" output dimension. + /// + /// + /// + /// + /// The size of the sliding window for each dimension of `images`. + /// + /// + /// + /// + /// How far the centers of two consecutive patches are in + /// the images. Must be: `[1, stride_rows, stride_cols, 1]`. + /// + /// + /// + /// + /// Must be: `[1, rate_rows, rate_cols, 1]`. This is the + /// input stride, specifying how far two consecutive patch samples are in the + /// input. Equivalent to extracting patches with + /// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by + /// subsampling them spatially by a factor of `rates`. This is equivalent to + /// `rate` in dilated (a.k.a. Atrous) convolutions. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor extract_image_patches(Tensor images, int[] ksizes, int[] strides, int[] rates, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractImagePatches", name) { args = new object[] { images }, attrs = new Dictionary() { ["ksizes"] = ksizes, ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return extract_image_patches_eager_fallback(images, ksizes: ksizes, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["images"] = images; + keywords["ksizes"] = ksizes; + keywords["strides"] = strides; + keywords["rates"] = rates; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("ExtractImagePatches", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("ExtractImagePatches", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor extract_image_patches_eager_fallback(Tensor images, int[] ksizes, int[] strides, int[] rates, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { images }; + object[] _attrs = new object[] { "ksizes", ksizes, "strides", strides, "rates", rates, "T", images.dtype, "padding", padding }; + var _result = _execute.execute("ExtractImagePatches", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ExtractImagePatches", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`. + /// + /// + /// + /// + /// The size of the sliding window for each dimension of `input`. + /// + /// + /// + /// + /// 1-D of length 5. How far the centers of two consecutive patches are in + /// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// The size-related attributes are specified as follows: + /// + /// ```python + /// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] + /// strides = [1, stride_planes, strides_rows, strides_cols, 1] + /// ``` + /// + /// + /// + public static Tensor extract_volume_patches(Tensor input, int[] ksizes, int[] strides, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractVolumePatches", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksizes"] = ksizes, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return extract_volume_patches_eager_fallback(input, ksizes: ksizes, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksizes"] = ksizes; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("ExtractVolumePatches", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("ExtractVolumePatches", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor extract_volume_patches_eager_fallback(Tensor input, int[] ksizes, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "ksizes", ksizes, "strides", strides, "T", input.dtype, "padding", padding }; + var _result = _execute.execute("ExtractVolumePatches", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ExtractVolumePatches", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. + /// + /// + /// + /// Attributes + /// + /// * `[min; max]` define the clamping range for the `inputs` data. + /// * `inputs` values are quantized into the quantization range ( + /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + /// when it is true) and then de-quantized and output as floats in `[min; max]` + /// interval. + /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// + /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// Quantization is called fake since the output is still in floating point. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor fake_quant_with_min_max_args(Tensor inputs, float min = -6f, float max = 6f, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgs", name) { args = new object[] { inputs }, attrs = new Dictionary() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_args_eager_fallback(inputs, min: min, max: max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxArgs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxArgs", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_quant_with_min_max_args_eager_fallback(Tensor inputs, float min, float max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { inputs }; + object[] _attrs = new object[] { "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxArgs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute gradients for a FakeQuantWithMinMaxArgs operation. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor fake_quant_with_min_max_args_gradient(Tensor gradients, Tensor inputs, float min = -6f, float max = 6f, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgsGradient", name) { args = new object[] { gradients, inputs }, attrs = new Dictionary() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_args_gradient_eager_fallback(gradients, inputs, min: min, max: max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxArgsGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxArgsGradient", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_quant_with_min_max_args_gradient_eager_fallback(Tensor gradients, Tensor inputs, float min, float max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, inputs }; + object[] _attrs = new object[] { "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxArgsGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Fake-quantize the 'inputs' tensor of type float via global float scalars + /// + /// + /// + /// Fake-quantize the `inputs` tensor of type float via global float scalars + /// `min` and `max` to `outputs` tensor of same shape as `inputs`. + /// + /// Attributes + /// + /// * `[min; max]` define the clamping range for the `inputs` data. + /// * `inputs` values are quantized into the quantization range ( + /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + /// when it is true) and then de-quantized and output as floats in `[min; max]` + /// interval. + /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// + /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// This operation has a gradient and thus allows for training `min` and `max` + /// values. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor fake_quant_with_min_max_vars(Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVars", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_vars_eager_fallback(inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVars", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxVars", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_quant_with_min_max_vars_eager_fallback(Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { inputs, min, max }; + object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxVars", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute gradients for a FakeQuantWithMinMaxVars operation. + /// + /// + /// + /// + /// + /// + /// + /// The bitwidth of the quantization; between 2 and 8, inclusive. + /// + /// + /// + /// + /// Whether to quantize into 2^num_bits - 1 distinct values. + /// + /// + /// + public static Tensor[] fake_quant_with_min_max_vars_gradient(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_vars_gradient_eager_fallback(gradients, inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxVarsGradient", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fake_quant_with_min_max_vars_gradient_eager_fallback(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, inputs, min, max }; + object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxVarsGradient", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Fake-quantize the 'inputs' tensor of type float via per-channel floats + /// + /// + /// + /// Fake-quantize the `inputs` tensor of type float per-channel and one of the + /// shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` + /// of shape `[d]` to `outputs` tensor of same shape as `inputs`. + /// + /// Attributes + /// + /// * `[min; max]` define the clamping range for the `inputs` data. + /// * `inputs` values are quantized into the quantization range ( + /// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + /// when it is true) and then de-quantized and output as floats in `[min; max]` + /// interval. + /// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// + /// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// This operation has a gradient and thus allows for training `min` and `max` + /// values. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor fake_quant_with_min_max_vars_per_channel(Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannel", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_vars_per_channel_eager_fallback(inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannel", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannel", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fake_quant_with_min_max_vars_per_channel_eager_fallback(Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { inputs, min, max }; + object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxVarsPerChannel", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. + /// + /// + /// + /// + /// + /// + /// + /// The bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// + /// + /// + /// Whether to quantize into 2^num_bits - 1 distinct values. + /// + /// + /// + public static Tensor[] fake_quant_with_min_max_vars_per_channel_gradient(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits = 8, bool narrow_range = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannelGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(gradients, inputs, min, max, num_bits: num_bits, narrow_range: narrow_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["inputs"] = inputs; + keywords["min"] = min; + keywords["max"] = max; + keywords["num_bits"] = num_bits; + keywords["narrow_range"] = narrow_range; + var _op = tf.OpDefLib._apply_op_helper("FakeQuantWithMinMaxVarsPerChannelGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range") }; + _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannelGradient", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(Tensor gradients, Tensor inputs, Tensor min, Tensor max, int num_bits, bool narrow_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, inputs, min, max }; + object[] _attrs = new object[] { "num_bits", num_bits, "narrow_range", narrow_range }; + var _result = _execute.execute("FakeQuantWithMinMaxVarsPerChannelGradient", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Creates a tensor filled with a scalar value. + /// + /// + /// + /// This operation creates a tensor of shape `dims` and fills it with `value`. + /// + /// For example: + /// + /// ``` + /// # Output tensor has shape [2, 3]. + /// fill([2, 3], 9) ==> [[9, 9, 9] + /// [9, 9, 9]] + /// ``` + /// + /// `tf.fill` differs from `tf.constant` in a few ways: + /// + /// * `tf.fill` only supports scalar contents, whereas `tf.constant` supports + /// Tensor values. + /// * `tf.fill` creates an Op in the computation graph that constructs the actual + /// Tensor value at runtime. This is in contrast to `tf.constant` which embeds + /// the entire Tensor into the graph with a `Const` node. + /// * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes + /// based on other runtime Tensors, unlike `tf.constant`. + /// + /// + /// + /// + /// + public static Tensor fill(Tensor dims, Tensor value, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fill", name) { args = new object[] { dims, value }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fill_eager_fallback(dims, value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dims"] = dims; + keywords["value"] = value; + var _op = tf.OpDefLib._apply_op_helper("Fill", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "index_type", _op._get_attr_type("index_type") }; + _execute.record_gradient("Fill", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fill_eager_fallback(Tensor dims, Tensor value, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { dims, value }; + object[] _attrs = new object[] { "T", value.dtype, "index_type", dims.dtype }; + var _result = _execute.execute("Fill", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Fill", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Generates fingerprint values. + /// + /// + /// + /// Generates fingerprint values of `data`. + /// + /// Fingerprint op considers the first dimension of `data` as the batch dimension, + /// and `output[i]` contains the fingerprint value generated from contents in + /// `data[i, ...]` for all `i`. + /// + /// Fingerprint op writes fingerprint values as byte arrays. For example, the + /// default method `farmhash64` generates a 64-bit fingerprint value at a time. + /// This 8-byte value is written out as an `uint8` array of size 8, in little-endian + /// order. + /// + /// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), + /// and that the fingerprint method is `farmhash64`. In this case, the output shape + /// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of + /// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in + /// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers + /// in `data[1, :, :]`. + /// + /// Note that this op fingerprints the raw underlying buffer, and it does not + /// fingerprint Tensor's metadata such as data type and/or shape. For example, the + /// fingerprint values are invariant under reshapes and bitcasts as long as the + /// batch dimension remain the same: + /// + /// ``` + /// Fingerprint(data) == Fingerprint(Reshape(data, ...)) + /// Fingerprint(data) == Fingerprint(Bitcast(data, ...)) + /// ``` + /// + /// For string data, one should expect `Fingerprint(data) != + /// Fingerprint(ReduceJoin(data))` in general. + /// + /// + /// + /// + /// + public static Tensor fingerprint(Tensor data, Tensor method, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fingerprint", name) { args = new object[] { data, method }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fingerprint_eager_fallback(data, method, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["method"] = method; + var _op = tf.OpDefLib._apply_op_helper("Fingerprint", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Fingerprint", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fingerprint_eager_fallback(Tensor data, Tensor method, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, method }; + object[] _attrs = new object[] { "T", data.dtype }; + var _result = _execute.execute("Fingerprint", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Fingerprint", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gather slices from `params` according to `indices`. + /// + /// + /// + /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + /// + /// ```python + /// # Scalar indices + /// output[:, ..., :] = params[indices, :, ... :] + /// + /// # Vector indices + /// output[i, :, ..., :] = params[indices[i], :, ... :] + /// + /// # Higher rank indices + /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + /// ``` + /// + /// If `indices` is a permutation and `len(indices) == params.shape[0]` then + /// this operation will permute `params` accordingly. + /// + /// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in + /// `indices` are always validated to be within range. If assigned to GPU, + /// out-of-bound indices result in safe but unspecified behavior, which may include + /// raising an error. + /// + ///
+ /// + ///
+ /// + ///
+ /// + /// + /// + /// + public static Tensor gather(Tensor params_, Tensor indices, bool validate_indices = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Gather", name) { args = new object[] { params_, indices }, attrs = new Dictionary() { ["validate_indices"] = validate_indices } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return gather_eager_fallback(params_, indices, validate_indices: validate_indices, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["params"] = params_; + keywords["indices"] = indices; + keywords["validate_indices"] = validate_indices; + var _op = tf.OpDefLib._apply_op_helper("Gather", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "validate_indices", _op._get_attr_bool("validate_indices"), "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("Gather", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor gather_eager_fallback(Tensor params_, Tensor indices, bool validate_indices, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { params_, indices }; + object[] _attrs = new object[] { "validate_indices", validate_indices, "Tparams", params_.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("Gather", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Gather", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gather slices from `params` into a Tensor with shape specified by `indices`. + /// + /// + /// + /// `indices` is a K-dimensional integer tensor, best thought of as a + /// (K-1)-dimensional tensor of indices into `params`, where each element defines a + /// slice of `params`: + /// + /// output[\(i_0, ..., i_{K-2}\)] = params[indices[\(i_0, ..., i_{K-2}\)]] + /// + /// Whereas in `tf.gather` `indices` defines slices into the `axis` + /// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the + /// first `N` dimensions of `params`, where `N = indices.shape[-1]`. + /// + /// The last dimension of `indices` can be at most the rank of + /// `params`: + /// + /// indices.shape[-1] <= params.rank + /// + /// The last dimension of `indices` corresponds to elements + /// (if `indices.shape[-1] == params.rank`) or slices + /// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + /// of `params`. The output tensor has shape + /// + /// indices.shape[:-1] + params.shape[indices.shape[-1]:] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, a 0 is stored in the + /// corresponding output value. + /// + /// Some examples below. + /// + /// Simple indexing into a matrix: + /// + /// ```python + /// indices = [[0, 0], [1, 1]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = ['a', 'd'] + /// ``` + /// + /// Slice indexing into a matrix: + /// + /// ```python + /// indices = [[1], [0]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [['c', 'd'], ['a', 'b']] + /// ``` + /// + /// Indexing into a 3-tensor: + /// + /// ```python + /// indices = [[1]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[['a1', 'b1'], ['c1', 'd1']]] + /// + /// + /// indices = [[0, 1], [1, 0]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [['c0', 'd0'], ['a1', 'b1']] + /// + /// + /// indices = [[0, 0, 1], [1, 0, 1]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = ['b0', 'b1'] + /// ``` + /// + /// Batched indexing into a matrix: + /// + /// ```python + /// indices = [[[0, 0]], [[0, 1]]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [['a'], ['b']] + /// ``` + /// + /// Batched slice indexing into a matrix: + /// + /// ```python + /// indices = [[[1]], [[0]]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [[['c', 'd']], [['a', 'b']]] + /// ``` + /// + /// Batched indexing into a 3-tensor: + /// + /// ```python + /// indices = [[[1]], [[0]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[[['a1', 'b1'], ['c1', 'd1']]], + /// [[['a0', 'b0'], ['c0', 'd0']]]] + /// + /// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[['c0', 'd0'], ['a1', 'b1']], + /// [['a0', 'b0'], ['c1', 'd1']]] + /// + /// + /// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [['b0', 'b1'], ['d0', 'c1']] + /// ``` + /// + /// See also `tf.gather` and `tf.batch_gather`. + /// + /// + /// + /// + /// + public static Tensor gather_nd(Tensor params_, Tensor indices, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherNd", name) { args = new object[] { params_, indices }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return gather_nd_eager_fallback(params_, indices, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["params"] = params_; + keywords["indices"] = indices; + var _op = tf.OpDefLib._apply_op_helper("GatherNd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("GatherNd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor gather_nd_eager_fallback(Tensor params_, Tensor indices, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { params_, indices }; + object[] _attrs = new object[] { "Tparams", params_.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("GatherNd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("GatherNd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gather slices from `params` axis `axis` according to `indices`. + /// + /// + /// + /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape `params.shape[:axis] + + /// indices.shape[batch_dims:] + params.shape[axis + 1:]` where: + /// + /// ```python + /// # Scalar indices (output is rank(params) - 1). + /// output[a_0, ..., a_n, b_0, ..., b_n] = + /// params[a_0, ..., a_n, indices, b_0, ..., b_n] + /// + /// # Vector indices (output is rank(params)). + /// output[a_0, ..., a_n, i, b_0, ..., b_n] = + /// params[a_0, ..., a_n, indices[i], b_0, ..., b_n] + /// + /// # Higher rank indices (output is rank(params) + rank(indices) - 1). + /// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + /// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + /// ``` + /// + ///
+ /// + ///
+ /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, a 0 is stored in the + /// corresponding output value. + /// + /// See also `tf.batch_gather` and `tf.gather_nd`. + /// + ///
+ /// + /// + /// + /// + /// + public static Tensor gather_v2(Tensor params_, Tensor indices, Tensor axis, int batch_dims = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherV2", name) { args = new object[] { params_, indices, axis }, attrs = new Dictionary() { ["batch_dims"] = batch_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return gather_v2_eager_fallback(params_, indices, axis, batch_dims: batch_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["params"] = params_; + keywords["indices"] = indices; + keywords["axis"] = axis; + keywords["batch_dims"] = batch_dims; + var _op = tf.OpDefLib._apply_op_helper("GatherV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "batch_dims", _op._get_attr_int("batch_dims"), "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices"), "Taxis", _op._get_attr_type("Taxis") }; + _execute.record_gradient("GatherV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor gather_v2_eager_fallback(Tensor params_, Tensor indices, Tensor axis, int batch_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { params_, indices, axis }; + object[] _attrs = new object[] { "batch_dims", batch_dims, "Tparams", params_.dtype, "Tindices", indices.dtype, "Taxis", axis.dtype }; + var _result = _execute.execute("GatherV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("GatherV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gives a guarantee to the TF runtime that the input tensor is a constant. + /// + /// + /// + /// The runtime is then free to make optimizations based on this. + /// + /// Only accepts value typed tensors as inputs and rejects resource variable handles + /// as input. + /// + /// Returns the input tensor without modification. + /// + /// + /// + /// + public static Tensor guarantee_const(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GuaranteeConst", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return guarantee_const_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("GuaranteeConst", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("GuaranteeConst", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor guarantee_const_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("GuaranteeConst", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("GuaranteeConst", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return a tensor with the same shape and contents as the input tensor or value. + /// + /// + /// + public static Tensor identity(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Identity", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return identity_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Identity", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Identity", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor identity_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Identity", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Identity", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a list of tensors with the same shapes and contents as the input + /// + /// + /// + /// tensors. + /// + /// This op can be used to override the gradient for complicated functions. For + /// example, suppose y = f(x) and we wish to apply a custom function g for backprop + /// such that dx = g(dy). In Python, + /// + /// ```python + /// with tf.get_default_graph().gradient_override_map( + /// {'IdentityN': 'OverrideGradientWithG'}): + /// y, _ = identity_n([f(x), x]) + /// + /// @tf.RegisterGradient('OverrideGradientWithG') + /// def ApplyG(op, dy, _): + /// return [None, g(dy)] # Do not backprop to f(x). + /// ``` + /// + /// + /// + /// + /// + public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityN", name) { args = new object[] { input }, attrs = new Dictionary() { ["T"] = T } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return identity_n_eager_fallback(input, T: T, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["T"] = T; + var _op = tf.OpDefLib._apply_op_helper("IdentityN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T") }; + _execute.record_gradient("IdentityN", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor identity_n_eager_fallback(Tensor input, TF_DataType[] T, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", T }; + var _result = _execute.execute("IdentityN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IdentityN", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns immutable tensor from memory region. + /// + /// + /// + /// The current implementation memmaps the tensor from a file. + /// + /// + /// + /// + /// Type of the returned tensor. + /// + /// + /// + /// + /// Shape of the returned tensor. + /// + /// + /// + /// + /// Name of readonly memory region used by the tensor, see + /// NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + /// + /// + /// + public static Tensor immutable_const(TF_DataType dtype, Shape shape, string memory_region_name, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ImmutableConst", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape, ["memory_region_name"] = memory_region_name } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return immutable_const_eager_fallback(dtype: dtype, shape: shape, memory_region_name: memory_region_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dtype"] = dtype; + keywords["shape"] = shape; + keywords["memory_region_name"] = memory_region_name; + var _op = tf.OpDefLib._apply_op_helper("ImmutableConst", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape"), "memory_region_name", _op.get_attr("memory_region_name") }; + _execute.record_gradient("ImmutableConst", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor immutable_const_eager_fallback(TF_DataType dtype, Shape shape, string memory_region_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "dtype", dtype, "shape", shape, "memory_region_name", memory_region_name }; + var _result = _execute.execute("ImmutableConst", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ImmutableConst", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor inplace_add(Tensor x, Tensor i, Tensor v, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceAdd", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inplace_add_eager_fallback(x, i, v, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["i"] = i; + keywords["v"] = v; + var _op = tf.OpDefLib._apply_op_helper("InplaceAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InplaceAdd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inplace_add_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, i, v }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("InplaceAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InplaceAdd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor inplace_sub(Tensor x, Tensor i, Tensor v, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceSub", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inplace_sub_eager_fallback(x, i, v, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["i"] = i; + keywords["v"] = v; + var _op = tf.OpDefLib._apply_op_helper("InplaceSub", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InplaceSub", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inplace_sub_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, i, v }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("InplaceSub", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InplaceSub", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor inplace_update(Tensor x, Tensor i, Tensor v, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceUpdate", name) { args = new object[] { x, i, v }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inplace_update_eager_fallback(x, i, v, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["i"] = i; + keywords["v"] = v; + var _op = tf.OpDefLib._apply_op_helper("InplaceUpdate", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InplaceUpdate", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inplace_update_eager_fallback(Tensor x, Tensor i, Tensor v, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, i, v }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("InplaceUpdate", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InplaceUpdate", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the inverse permutation of a tensor. + /// + /// + /// + /// This operation computes the inverse of an index permutation. It takes a 1-D + /// integer tensor `x`, which represents the indices of a zero-based array, and + /// swaps each value with its index position. In other words, for an output tensor + /// `y` and an input tensor `x`, this operation computes the following: + /// + /// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` + /// + /// The values must include 0. There can be no duplicate values or negative values. + /// + /// For example: + /// + /// ``` + /// # tensor `x` is [3, 4, 0, 2, 1] + /// invert_permutation(x) ==> [2, 4, 3, 0, 1] + /// ``` + /// + /// + /// + /// + public static Tensor invert_permutation(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvertPermutation", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return invert_permutation_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("InvertPermutation", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InvertPermutation", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor invert_permutation_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("InvertPermutation", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InvertPermutation", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the difference between two lists of numbers or strings. + /// + /// + /// + /// Given a list `x` and a list `y`, this operation returns a list `out` that + /// represents all values that are in `x` but not in `y`. The returned list `out` + /// is sorted in the same order that the numbers appear in `x` (duplicates are + /// preserved). This operation also returns a list `idx` that represents the + /// position of each `out` element in `x`. In other words: + /// + /// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + /// + /// For example, given this input: + /// + /// ``` + /// x = [1, 2, 3, 4, 5, 6] + /// y = [1, 3, 5] + /// ``` + /// + /// This operation would return: + /// + /// ``` + /// out ==> [2, 4, 6] + /// idx ==> [1, 3, 5] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor[] list_diff(Tensor x, Tensor y, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ListDiff", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return list_diff_eager_fallback(x, y, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("ListDiff", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("ListDiff", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] list_diff_eager_fallback(Tensor x, Tensor y, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "out_idx", out_idx }; + var _result = _execute.execute("ListDiff", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ListDiff", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Applies lower_bound(sorted_search_values, values) along each row. + /// + /// + /// + /// Each set of rows with the same index in (sorted_inputs, values) is treated + /// independently. The resulting row is the equivalent of calling + /// `np.searchsorted(sorted_inputs, values, side='left')`. + /// + /// The result is not a global index to the entire + /// `Tensor`, but rather just the index in the last dimension. + /// + /// A 2-D example: + /// sorted_sequence = [[0, 3, 9, 9, 10], + /// [1, 2, 3, 4, 5]] + /// values = [[2, 4, 9], + /// [0, 2, 6]] + /// + /// result = LowerBound(sorted_sequence, values) + /// + /// result == [[1, 2, 2], + /// [0, 1, 5]] + /// + /// + /// + /// + /// + /// + public static Tensor lower_bound(Tensor sorted_inputs, Tensor values, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LowerBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return lower_bound_eager_fallback(sorted_inputs, values, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["sorted_inputs"] = sorted_inputs; + keywords["values"] = values; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("LowerBound", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("LowerBound", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor lower_bound_eager_fallback(Tensor sorted_inputs, Tensor values, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { sorted_inputs, values }; + object[] _attrs = new object[] { "T", sorted_inputs.dtype, "out_type", out_type }; + var _result = _execute.execute("LowerBound", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LowerBound", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Copy a tensor setting everything outside a central band in each innermost matrix to zero. + /// + /// + /// + /// The `band` part is computed as follows: + /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + /// tensor with the same shape where + /// + /// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. + /// + /// The indicator function + /// + /// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && + /// (num_upper < 0 || (n-m) <= num_upper)`. + /// + /// For example: + /// + /// ``` + /// # if 'input' is [[ 0, 1, 2, 3] + /// # [-1, 0, 1, 2] + /// # [-2, -1, 0, 1] + /// # [-3, -2, -1, 0]], + /// + /// tf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + /// [-1, 0, 1, 2] + /// [ 0, -1, 0, 1] + /// [ 0, 0, -1, 0]], + /// + /// tf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + /// [-1, 0, 1, 0] + /// [-2, -1, 0, 1] + /// [ 0, -2, -1, 0]] + /// ``` + /// + /// Useful special cases: + /// + /// ``` + /// tf.linalg.band_part(input, 0, -1) ==> Upper triangular part. + /// tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. + /// tf.linalg.band_part(input, 0, 0) ==> Diagonal. + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor matrix_band_part(Tensor input, Tensor num_lower, Tensor num_upper, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_band_part_eager_fallback(input, num_lower, num_upper, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["num_lower"] = num_lower; + keywords["num_upper"] = num_upper; + var _op = tf.OpDefLib._apply_op_helper("MatrixBandPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindex", _op._get_attr_type("Tindex") }; + _execute.record_gradient("MatrixBandPart", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_band_part_eager_fallback(Tensor input, Tensor num_lower, Tensor num_upper, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, num_lower, num_upper }; + object[] _attrs = new object[] { "T", input.dtype, "Tindex", num_lower.dtype }; + var _result = _execute.execute("MatrixBandPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixBandPart", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched diagonal tensor with a given batched diagonal values. + /// + /// + /// + /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and + /// everything else padded with zeros. The diagonal is computed as follows: + /// + /// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a + /// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where: + /// + /// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. + /// + /// For example: + /// + /// ``` + /// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] + /// + /// and diagonal.shape = (2, 4) + /// + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0] + /// [0, 6, 0, 0] + /// [0, 0, 7, 0] + /// [0, 0, 0, 8]]] + /// + /// which has shape (2, 4, 4) + /// ``` + /// + /// + /// + /// + public static Tensor matrix_diag(Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_eager_fallback(diagonal, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixDiag", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_eager_fallback(Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal }; + object[] _attrs = new object[] { "T", diagonal.dtype }; + var _result = _execute.execute("MatrixDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the batched diagonal part of a batched tensor. + /// + /// + /// + /// This operation returns a tensor with the `diagonal` part + /// of the batched `input`. The `diagonal` part is computed as follows: + /// + /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + /// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where: + /// + /// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// ``` + /// # 'input' is [[[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0] + /// [0, 6, 0, 0] + /// [0, 0, 7, 0] + /// [0, 0, 0, 8]]] + /// + /// and input.shape = (2, 4, 4) + /// + /// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] + /// + /// which has shape (2, 4) + /// ``` + /// + /// + /// + /// + public static Tensor matrix_diag_part(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_part_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPart", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixDiagPart", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_part_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("MatrixDiagPart", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagPart", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the batched diagonal part of a batched tensor. + /// + /// + /// + /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + /// `input`. + /// + /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + /// Let `max_diag_len` be the maximum length among all diagonals to be extracted, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// Let `num_diags` be the number of diagonals to extract, + /// `num_diags = k[1] - k[0] + 1`. + /// + /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + /// `[I, J, ..., L, max_diag_len]` and values: + /// + /// ``` + /// diagonal[i, j, ..., l, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + /// + /// Otherwise, the output tensor has rank `r` with dimensions + /// `[I, J, ..., L, num_diags, max_diag_len]` with values: + /// + /// ``` + /// diagonal[i, j, ..., l, m, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// ``` + /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + /// [5, 6, 7, 8], + /// [9, 8, 7, 6]], + /// [[5, 4, 3, 2], + /// [1, 2, 3, 4], + /// [5, 6, 7, 8]]]) + /// + /// # A main diagonal from each batch. + /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + /// [5, 2, 7]] + /// + /// # A superdiagonal from each batch. + /// tf.matrix_diag_part(input, k = 1) + /// ==> [[2, 7, 6], # Output shape: (2, 3) + /// [4, 3, 8]] + /// + /// # A tridiagonal band from each batch. + /// tf.matrix_diag_part(input, k = (-1, 1)) + /// ==> [[[2, 7, 6], # Output shape: (2, 3, 3) + /// [1, 6, 7], + /// [5, 8, 0]], + /// [[4, 3, 8], + /// [5, 2, 7], + /// [1, 6, 0]]] + /// + /// # Padding value = 9 + /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + /// ==> [[[4, 9, 9], # Output shape: (2, 3, 3) + /// [3, 8, 9], + /// [2, 7, 6]], + /// [[2, 9, 9], + /// [3, 4, 9], + /// [4, 3, 8]]] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor matrix_diag_part_v2(Tensor input, Tensor k, Tensor padding_value, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV2", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_part_v2_eager_fallback(input, k, padding_value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["padding_value"] = padding_value; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPartV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixDiagPartV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_part_v2_eager_fallback(Tensor input, Tensor k, Tensor padding_value, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, k, padding_value }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("MatrixDiagPartV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagPartV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the batched diagonal part of a batched tensor. + /// + /// + /// + /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + /// `input`. + /// + /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + /// Let `max_diag_len` be the maximum length among all diagonals to be extracted, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// Let `num_diags` be the number of diagonals to extract, + /// `num_diags = k[1] - k[0] + 1`. + /// + /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + /// `[I, J, ..., L, max_diag_len]` and values: + /// + /// ``` + /// diagonal[i, j, ..., l, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + /// + /// Otherwise, the output tensor has rank `r` with dimensions + /// `[I, J, ..., L, num_diags, max_diag_len]` with values: + /// + /// ``` + /// diagonal[i, j, ..., l, m, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. + /// + /// `offset` is zero except when the alignment of the diagonal is to the right. + /// ``` + /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + /// and `d >= 0`) or + /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + /// and `d <= 0`) + /// 0 ; otherwise + /// ``` + /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// ``` + /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + /// [5, 6, 7, 8], + /// [9, 8, 7, 6]], + /// [[5, 4, 3, 2], + /// [1, 2, 3, 4], + /// [5, 6, 7, 8]]]) + /// + /// # A main diagonal from each batch. + /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + /// [5, 2, 7]] + /// + /// # A superdiagonal from each batch. + /// tf.matrix_diag_part(input, k = 1) + /// ==> [[2, 7, 6], # Output shape: (2, 3) + /// [4, 3, 8]] + /// + /// # A band from each batch. + /// tf.matrix_diag_part(input, k = (-1, 2)) + /// ==> [[[0, 3, 8], # Output shape: (2, 4, 3) + /// [2, 7, 6], + /// [1, 6, 7], + /// [5, 8, 0]], + /// [[0, 3, 4], + /// [4, 3, 8], + /// [5, 2, 7], + /// [1, 6, 0]]] + /// + /// # LEFT_RIGHT alignment. + /// tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") + /// ==> [[[3, 8, 0], # Output shape: (2, 4, 3) + /// [2, 7, 6], + /// [1, 6, 7], + /// [0, 5, 8]], + /// [[3, 4, 0], + /// [4, 3, 8], + /// [5, 2, 7], + /// [0, 1, 6]]] + /// + /// # max_diag_len can be shorter than the main diagonal. + /// tf.matrix_diag_part(input, k = (-2, -1)) + /// ==> [[[5, 8], + /// [9, 0]], + /// [[1, 6], + /// [5, 0]]] + /// + /// # padding_value = 9 + /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + /// ==> [[[9, 9, 4], # Output shape: (2, 3, 3) + /// [9, 3, 8], + /// [2, 7, 6]], + /// [[9, 9, 2], + /// [9, 3, 4], + /// [4, 3, 8]]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + /// a string specifying how superdiagonals and subdiagonals should be aligned, + /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), + /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + /// to the right (left-pads the row) and subdiagonals to the left (right-pads the + /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + /// the opposite alignment. + /// + /// + /// + public static Tensor matrix_diag_part_v3(Tensor input, Tensor k, Tensor padding_value, string align = "RIGHT_LEFT", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV3", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary() { ["align"] = align } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_part_v3_eager_fallback(input, k, padding_value, align: align, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (align is null) + { + align = "RIGHT_LEFT"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["padding_value"] = padding_value; + keywords["align"] = align; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagPartV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") }; + _execute.record_gradient("MatrixDiagPartV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_part_v3_eager_fallback(Tensor input, Tensor k, Tensor padding_value, string align, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, k, padding_value }; + object[] _attrs = new object[] { "T", input.dtype, "align", align }; + var _result = _execute.execute("MatrixDiagPartV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagPartV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched diagonal tensor with given batched diagonal values. + /// + /// + /// + /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + /// diagonals of a matrix, with everything else padded with `padding`. `num_rows` + /// and `num_cols` specify the dimension of the innermost matrix of the output. If + /// both are not specified, the op assumes the innermost matrix is square and infers + /// its size from `k` and the innermost dimension of `diagonal`. If only one of them + /// is specified, the op assumes the unspecified value is the smallest possible + /// based on other criteria. + /// + /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + /// + /// The second innermost dimension of `diagonal` has double meaning. + /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + /// [I, J, ..., M], and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + /// padding_value ; otherwise + /// ``` + /// + /// Otherwise, `M` is treated as the number of diagonals for the matrix in the + /// same batch (`M = k[1]-k[0]+1`), and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// padding_value ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + /// [5, 6, 7, 8]]) + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + /// [0, 2, 0, 0], + /// [0, 0, 3, 0], + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0], + /// [0, 6, 0, 0], + /// [0, 0, 7, 0], + /// [0, 0, 0, 8]]] + /// + /// # A superdiagonal (per batch). + /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_diag(diagonal, k = 1) + /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + /// [0, 0, 2, 0], + /// [0, 0, 0, 3], + /// [0, 0, 0, 0]], + /// [[0, 4, 0, 0], + /// [0, 0, 5, 0], + /// [0, 0, 0, 6], + /// [0, 0, 0, 0]]] + /// + /// # A band of diagonals. + /// diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) + /// [4, 5, 0]], + /// [[6, 7, 9], + /// [9, 1, 0]]]) + /// tf.matrix_diag(diagonals, k = (-1, 0)) + /// ==> [[[1, 0, 0], # Output shape: (2, 3, 3) + /// [4, 2, 0], + /// [0, 5, 3]], + /// [[6, 0, 0], + /// [9, 7, 0], + /// [0, 1, 9]]] + /// + /// # Rectangular matrix. + /// diagonal = np.array([1, 2]) # Input shape: (2) + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + /// ==> [[0, 0, 0, 0], # Output shape: (3, 4) + /// [1, 0, 0, 0], + /// [0, 2, 0, 0]] + /// + /// # Rectangular matrix with inferred num_cols and padding_value = 9. + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + /// ==> [[9, 9], # Output shape: (3, 2) + /// [1, 9], + /// [9, 2]] + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor matrix_diag_v2(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV2", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_v2_eager_fallback(diagonal, k, num_rows, num_cols, padding_value, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + keywords["k"] = k; + keywords["num_rows"] = num_rows; + keywords["num_cols"] = num_cols; + keywords["padding_value"] = padding_value; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixDiagV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_v2_eager_fallback(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal, k, num_rows, num_cols, padding_value }; + object[] _attrs = new object[] { "T", diagonal.dtype }; + var _result = _execute.execute("MatrixDiagV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched diagonal tensor with given batched diagonal values. + /// + /// + /// + /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + /// diagonals of a matrix, with everything else padded with `padding`. `num_rows` + /// and `num_cols` specify the dimension of the innermost matrix of the output. If + /// both are not specified, the op assumes the innermost matrix is square and infers + /// its size from `k` and the innermost dimension of `diagonal`. If only one of them + /// is specified, the op assumes the unspecified value is the smallest possible + /// based on other criteria. + /// + /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + /// + /// The second innermost dimension of `diagonal` has double meaning. + /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + /// [I, J, ..., M], and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + /// padding_value ; otherwise + /// ``` + /// + /// Otherwise, `M` is treated as the number of diagonals for the matrix in the + /// same batch (`M = k[1]-k[0]+1`), and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// padding_value ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = [k] - d`, and + /// `index_in_diag = n - max(d, 0) + offset`. + /// + /// `offset` is zero except when the alignment of the diagonal is to the right. + /// ``` + /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + /// and `d >= 0`) or + /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + /// and `d <= 0`) + /// 0 ; otherwise + /// ``` + /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + /// [5, 6, 7, 8]]) + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + /// [0, 2, 0, 0], + /// [0, 0, 3, 0], + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0], + /// [0, 6, 0, 0], + /// [0, 0, 7, 0], + /// [0, 0, 0, 8]]] + /// + /// # A superdiagonal (per batch). + /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_diag(diagonal, k = 1) + /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + /// [0, 0, 2, 0], + /// [0, 0, 0, 3], + /// [0, 0, 0, 0]], + /// [[0, 4, 0, 0], + /// [0, 0, 5, 0], + /// [0, 0, 0, 6], + /// [0, 0, 0, 0]]] + /// + /// # A tridiagonal band (per batch). + /// diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) + /// [1, 2, 3], + /// [4, 5, 0]], + /// [[0, 2, 3], + /// [6, 7, 9], + /// [9, 1, 0]]]) + /// tf.matrix_diag(diagonals, k = (-1, 1)) + /// ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + /// [4, 2, 9], + /// [0, 5, 3]], + /// [[6, 2, 0], + /// [9, 7, 3], + /// [0, 1, 9]]] + /// + /// # LEFT_RIGHT alignment. + /// diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) + /// [1, 2, 3], + /// [0, 4, 5]], + /// [[2, 3, 0], + /// [6, 7, 9], + /// [0, 9, 1]]]) + /// tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") + /// ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + /// [4, 2, 9], + /// [0, 5, 3]], + /// [[6, 2, 0], + /// [9, 7, 3], + /// [0, 1, 9]]] + /// + /// # Rectangular matrix. + /// diagonal = np.array([1, 2]) # Input shape: (2) + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + /// ==> [[0, 0, 0, 0], # Output shape: (3, 4) + /// [1, 0, 0, 0], + /// [0, 2, 0, 0]] + /// + /// # Rectangular matrix with inferred num_cols and padding_value = 9. + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + /// ==> [[9, 9], # Output shape: (3, 2) + /// [1, 9], + /// [9, 2]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + /// a string specifying how superdiagonals and subdiagonals should be aligned, + /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), + /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + /// to the right (left-pads the row) and subdiagonals to the left (right-pads the + /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + /// the opposite alignment. + /// + /// + /// + public static Tensor matrix_diag_v3(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string align = "RIGHT_LEFT", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV3", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary() { ["align"] = align } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_diag_v3_eager_fallback(diagonal, k, num_rows, num_cols, padding_value, align: align, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (align is null) + { + align = "RIGHT_LEFT"; + } + Dictionary keywords = new(); + keywords["diagonal"] = diagonal; + keywords["k"] = k; + keywords["num_rows"] = num_rows; + keywords["num_cols"] = num_cols; + keywords["padding_value"] = padding_value; + keywords["align"] = align; + var _op = tf.OpDefLib._apply_op_helper("MatrixDiagV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") }; + _execute.record_gradient("MatrixDiagV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_diag_v3_eager_fallback(Tensor diagonal, Tensor k, Tensor num_rows, Tensor num_cols, Tensor padding_value, string align, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { diagonal, k, num_rows, num_cols, padding_value }; + object[] _attrs = new object[] { "T", diagonal.dtype, "align", align }; + var _result = _execute.execute("MatrixDiagV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixDiagV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// + /// + /// Given `input` and `diagonal`, this operation returns a tensor with the + /// same shape and values as `input`, except for the main diagonal of the + /// innermost matrices. These will be overwritten by the values in `diagonal`. + /// + /// The output is computed as follows: + /// + /// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has + /// `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a + /// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where: + /// + /// * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`. + /// * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`. + /// + /// + /// + /// + /// + public static Tensor matrix_set_diag(Tensor input, Tensor diagonal, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_set_diag_eager_fallback(input, diagonal, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["diagonal"] = diagonal; + var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixSetDiag", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_set_diag_eager_fallback(Tensor input, Tensor diagonal, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, diagonal }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("MatrixSetDiag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixSetDiag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// + /// + /// Given `input` and `diagonal`, this operation returns a tensor with the + /// same shape and values as `input`, except for the specified diagonals of the + /// innermost matrices. These will be overwritten by the values in `diagonal`. + /// + /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or + /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. + /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. + /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. + /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// + /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. + /// If `k` is scalar or `k[0] == k[1]`: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// + /// Otherwise, + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]], + /// [[7, 7, 7, 7], + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]]]) + /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + /// [7, 2, 7, 7], + /// [7, 7, 3, 7]], + /// [[4, 7, 7, 7], + /// [7, 5, 7, 7], + /// [7, 7, 6, 7]]] + /// + /// # A superdiagonal (per batch). + /// tf.matrix_set_diag(diagonal, k = 1) + /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + /// [7, 7, 2, 7], + /// [7, 7, 7, 3]], + /// [[7, 4, 7, 7], + /// [7, 7, 5, 7], + /// [7, 7, 7, 6]]] + /// + /// # A band of diagonals. + /// diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3) + /// [4, 5, 0]], + /// [[6, 1, 2], + /// [3, 4, 0]]]) + /// tf.matrix_set_diag(diagonals, k = (-1, 0)) + /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + /// [4, 2, 7, 7], + /// [0, 5, 3, 7]], + /// [[6, 7, 7, 7], + /// [3, 1, 7, 7], + /// [7, 4, 2, 7]]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor matrix_set_diag_v2(Tensor input, Tensor diagonal, Tensor k, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV2", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_set_diag_v2_eager_fallback(input, diagonal, k, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["diagonal"] = diagonal; + keywords["k"] = k; + var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiagV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatrixSetDiagV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_set_diag_v2_eager_fallback(Tensor input, Tensor diagonal, Tensor k, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, diagonal, k }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("MatrixSetDiagV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixSetDiagV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// + /// + /// Given `input` and `diagonal`, this operation returns a tensor with the + /// same shape and values as `input`, except for the specified diagonals of the + /// innermost matrices. These will be overwritten by the values in `diagonal`. + /// + /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or + /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. + /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. + /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. + /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// + /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. + /// If `k` is scalar or `k[0] == k[1]`: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// + /// Otherwise, + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = k[1] - d`, and + /// `index_in_diag = n - max(d, 0) + offset`. + /// + /// `offset` is zero except when the alignment of the diagonal is to the right. + /// ``` + /// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + /// and `d >= 0`) or + /// (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + /// and `d <= 0`) + /// 0 ; otherwise + /// ``` + /// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]], + /// [[7, 7, 7, 7], + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]]]) + /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_set_diag(input, diagonal) + /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + /// [7, 2, 7, 7], + /// [7, 7, 3, 7]], + /// [[4, 7, 7, 7], + /// [7, 5, 7, 7], + /// [7, 7, 6, 7]]] + /// + /// # A superdiagonal (per batch). + /// tf.matrix_set_diag(input, diagonal, k = 1) + /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + /// [7, 7, 2, 7], + /// [7, 7, 7, 3]], + /// [[7, 4, 7, 7], + /// [7, 7, 5, 7], + /// [7, 7, 7, 6]]] + /// + /// # A band of diagonals. + /// diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) + /// [6, 5, 8], + /// [1, 2, 3], + /// [4, 5, 0]], + /// [[0, 1, 2], + /// [5, 6, 4], + /// [6, 1, 2], + /// [3, 4, 0]]]) + /// tf.matrix_set_diag(input, diagonals, k = (-1, 2)) + /// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + /// [4, 2, 5, 1], + /// [7, 5, 3, 8]], + /// [[6, 5, 1, 7], + /// [3, 1, 6, 2], + /// [7, 4, 2, 4]]] + /// + /// # LEFT_RIGHT alignment. + /// diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) + /// [6, 5, 8], + /// [1, 2, 3], + /// [0, 4, 5]], + /// [[1, 2, 0], + /// [5, 6, 4], + /// [6, 1, 2], + /// [0, 3, 4]]]) + /// tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") + /// ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + /// [4, 2, 5, 1], + /// [7, 5, 3, 8]], + /// [[6, 5, 1, 7], + /// [3, 1, 6, 2], + /// [7, 4, 2, 4]]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + /// a string specifying how superdiagonals and subdiagonals should be aligned, + /// respectively. There are four possible alignments: "RIGHT_LEFT" (default), + /// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + /// to the right (left-pads the row) and subdiagonals to the left (right-pads the + /// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + /// the opposite alignment. + /// + /// + /// + public static Tensor matrix_set_diag_v3(Tensor input, Tensor diagonal, Tensor k, string align = "RIGHT_LEFT", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV3", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary() { ["align"] = align } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matrix_set_diag_v3_eager_fallback(input, diagonal, k, align: align, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (align is null) + { + align = "RIGHT_LEFT"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["diagonal"] = diagonal; + keywords["k"] = k; + keywords["align"] = align; + var _op = tf.OpDefLib._apply_op_helper("MatrixSetDiagV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "align", _op.get_attr("align") }; + _execute.record_gradient("MatrixSetDiagV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matrix_set_diag_v3_eager_fallback(Tensor input, Tensor diagonal, Tensor k, string align, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, diagonal, k }; + object[] _attrs = new object[] { "T", input.dtype, "align", align }; + var _result = _execute.execute("MatrixSetDiagV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatrixSetDiagV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Pads a tensor with mirrored values. + /// + /// + /// + /// This operation pads a `input` with mirrored values according to the `paddings` + /// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is + /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many values to add before the contents of `input` in that dimension, and + /// `paddings[D, 1]` indicates how many values to add after the contents of `input` + /// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater + /// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true + /// (if false, respectively). + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 2, 3], [4, 5, 6]]. + /// # 'paddings' is [[1, 1]], [2, 2]]. + /// # 'mode' is SYMMETRIC. + /// # rank of 't' is 2. + /// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + /// [2, 1, 1, 2, 3, 3, 2] + /// [5, 4, 4, 5, 6, 6, 5] + /// [5, 4, 4, 5, 6, 6, 5]] + /// ``` + /// + /// + /// + /// + /// + /// + /// Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + /// do not include the borders, while in symmetric mode the padded regions + /// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` + /// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and + /// it is `[1, 2, 3, 3, 2]` in symmetric mode. + /// + /// + /// + public static Tensor mirror_pad(Tensor input, Tensor paddings, string mode, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["mode"] = mode } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mirror_pad_eager_fallback(input, paddings, mode: mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["mode"] = mode; + var _op = tf.OpDefLib._apply_op_helper("MirrorPad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "mode", _op.get_attr("mode") }; + _execute.record_gradient("MirrorPad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mirror_pad_eager_fallback(Tensor input, Tensor paddings, string mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype, "mode", mode }; + var _result = _execute.execute("MirrorPad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MirrorPad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor. + /// + /// + /// + /// This operation folds the padded areas of `input` by `MirrorPad` according to the + /// `paddings` you specify. `paddings` must be the same as `paddings` argument + /// given to the corresponding `MirrorPad` op. + /// + /// The folded size of each dimension D of the output is: + /// + /// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. + /// # 'paddings' is [[0, 1]], [0, 1]]. + /// # 'mode' is SYMMETRIC. + /// # rank of 't' is 2. + /// pad(t, paddings) ==> [[ 1, 5] + /// [11, 28]] + /// ``` + /// + /// + /// + /// + /// + /// + /// The mode used in the `MirrorPad` op. + /// + /// + /// + public static Tensor mirror_pad_grad(Tensor input, Tensor paddings, string mode, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPadGrad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["mode"] = mode } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mirror_pad_grad_eager_fallback(input, paddings, mode: mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["mode"] = mode; + var _op = tf.OpDefLib._apply_op_helper("MirrorPadGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "mode", _op.get_attr("mode") }; + _execute.record_gradient("MirrorPadGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mirror_pad_grad_eager_fallback(Tensor input, Tensor paddings, string mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype, "mode", mode }; + var _result = _execute.execute("MirrorPadGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MirrorPadGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a one-hot tensor. + /// + /// + /// + /// The locations represented by indices in `indices` take value `on_value`, + /// while all other locations take value `off_value`. + /// + /// If the input `indices` is rank `N`, the output will have rank `N+1`, + /// The new axis is created at dimension `axis` (default: the new axis is + /// appended at the end). + /// + /// If `indices` is a scalar the output shape will be a vector of length `depth`. + /// + /// If `indices` is a vector of length `features`, the output shape will be: + /// ``` + /// features x depth if axis == -1 + /// depth x features if axis == 0 + /// ``` + /// + /// If `indices` is a matrix (batch) with shape `[batch, features]`, + /// the output shape will be: + /// ``` + /// batch x features x depth if axis == -1 + /// batch x depth x features if axis == 1 + /// depth x batch x features if axis == 0 + /// ``` + /// + /// + /// Examples + /// ========= + /// + /// Suppose that + /// ``` + /// indices = [0, 2, -1, 1] + /// depth = 3 + /// on_value = 5.0 + /// off_value = 0.0 + /// axis = -1 + /// ``` + /// + /// Then output is `[4 x 3]`: + /// ``` + /// output = + /// [5.0 0.0 0.0] // one_hot(0) + /// [0.0 0.0 5.0] // one_hot(2) + /// [0.0 0.0 0.0] // one_hot(-1) + /// [0.0 5.0 0.0] // one_hot(1) + /// ``` + /// + /// Suppose that + /// ``` + /// indices = [0, 2, -1, 1] + /// depth = 3 + /// on_value = 0.0 + /// off_value = 3.0 + /// axis = 0 + /// ``` + /// + /// Then output is `[3 x 4]`: + /// ``` + /// output = + /// [0.0 3.0 3.0 3.0] + /// [3.0 3.0 3.0 0.0] + /// [3.0 3.0 3.0 3.0] + /// [3.0 0.0 3.0 3.0] + /// // ^ one_hot(0) + /// // ^ one_hot(2) + /// // ^ one_hot(-1) + /// // ^ one_hot(1) + /// ``` + /// + /// Suppose that + /// ``` + /// indices = [[0, 2], [1, -1]] + /// depth = 3 + /// on_value = 1.0 + /// off_value = 0.0 + /// axis = -1 + /// ``` + /// + /// Then output is `[2 x 2 x 3]`: + /// ``` + /// output = + /// [ + /// [1.0, 0.0, 0.0] // one_hot(0) + /// [0.0, 0.0, 1.0] // one_hot(2) + /// ][ + /// [0.0, 1.0, 0.0] // one_hot(1) + /// [0.0, 0.0, 0.0] // one_hot(-1) + /// ] + /// ``` + /// + /// + /// + /// + /// + /// + /// + /// + /// The axis to fill (default: -1, a new inner-most axis). + /// + /// + /// + public static Tensor one_hot(Tensor indices, Tensor depth, Tensor on_value, Tensor off_value, int axis = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OneHot", name) { args = new object[] { indices, depth, on_value, off_value }, attrs = new Dictionary() { ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return one_hot_eager_fallback(indices, depth, on_value, off_value, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["indices"] = indices; + keywords["depth"] = depth; + keywords["on_value"] = on_value; + keywords["off_value"] = off_value; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("OneHot", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "axis", _op._get_attr_int("axis"), "T", _op._get_attr_type("T"), "TI", _op._get_attr_type("TI") }; + _execute.record_gradient("OneHot", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor one_hot_eager_fallback(Tensor indices, Tensor depth, Tensor on_value, Tensor off_value, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { indices, depth, on_value, off_value }; + object[] _attrs = new object[] { "axis", axis, "T", on_value.dtype, "TI", indices.dtype }; + var _result = _execute.execute("OneHot", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("OneHot", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a tensor of ones with the same shape and type as x. + /// + /// + /// + public static Tensor ones_like(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OnesLike", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ones_like_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("OnesLike", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("OnesLike", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ones_like_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("OnesLike", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("OnesLike", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. + /// + /// + /// + /// Packs the `N` tensors in `values` into a tensor with rank one higher than each + /// tensor in `values`, by packing them along the `axis` dimension. + /// Given a list of tensors of shape `(A, B, C)`; + /// + /// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + /// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. + /// Etc. + /// + /// For example: + /// + /// ``` + /// # 'x' is [1, 4] + /// # 'y' is [2, 5] + /// # 'z' is [3, 6] + /// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + /// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + /// ``` + /// + /// This is the opposite of `unpack`. + /// + /// + /// + /// + /// + /// Dimension along which to pack. Negative values wrap around, so the + /// valid range is `[-(R+1), R+1)`. + /// + /// + /// + public static Tensor pack(Tensors values, int axis = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pack", name) { args = new object[] { values }, attrs = new Dictionary() { ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return pack_eager_fallback(values, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["values"] = values; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("Pack", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("Pack", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor pack_eager_fallback(Tensors values, int axis, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(values); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "axis", axis }; + var _result = _execute.execute("Pack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Pack", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Pads a tensor with zeros. + /// + /// + /// + /// This operation pads a `input` with zeros according to the `paddings` you + /// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the + /// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many zeros to add before the contents of `input` in that dimension, and + /// `paddings[D, 1]` indicates how many zeros to add after the contents of `input` + /// in that dimension. + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 1], [2, 2]] + /// # 'paddings' is [[1, 1], [2, 2]] + /// # rank of 't' is 2 + /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + /// [0, 0, 1, 1, 0, 0] + /// [0, 0, 2, 2, 0, 0] + /// [0, 0, 0, 0, 0, 0]] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor pad(Tensor input, Tensor paddings, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pad", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return pad_eager_fallback(input, paddings, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + var _op = tf.OpDefLib._apply_op_helper("Pad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings") }; + _execute.record_gradient("Pad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor pad_eager_fallback(Tensor input, Tensor paddings, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype }; + var _result = _execute.execute("Pad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Pad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Pads a tensor. + /// + /// + /// + /// This operation pads `input` according to the `paddings` and `constant_values` + /// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is + /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many padding values to add before the contents of `input` in that dimension, + /// and `paddings[D, 1]` indicates how many padding values to add after the contents + /// of `input` in that dimension. `constant_values` is a scalar tensor of the same + /// type as `input` that indicates the value to use for padding `input`. + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 1], [2, 2]] + /// # 'paddings' is [[1, 1], [2, 2]] + /// # 'constant_values' is 0 + /// # rank of 't' is 2 + /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + /// [0, 0, 1, 1, 0, 0] + /// [0, 0, 2, 2, 0, 0] + /// [0, 0, 0, 0, 0, 0]] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor pad_v2(Tensor input, Tensor paddings, Tensor constant_values, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PadV2", name) { args = new object[] { input, paddings, constant_values }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return pad_v2_eager_fallback(input, paddings, constant_values, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["constant_values"] = constant_values; + var _op = tf.OpDefLib._apply_op_helper("PadV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings") }; + _execute.record_gradient("PadV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor pad_v2_eager_fallback(Tensor input, Tensor paddings, Tensor constant_values, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings, constant_values }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype }; + var _result = _execute.execute("PadV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PadV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Concatenates a list of `N` tensors along the first dimension. + /// + /// + /// + /// The input tensors are all required to have size 1 in the first dimension. + /// + /// For example: + /// + /// ``` + /// # 'x' is [[1, 4]] + /// # 'y' is [[2, 5]] + /// # 'z' is [[3, 6]] + /// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + /// ``` + /// + /// The difference between concat and parallel_concat is that concat requires all + /// of the inputs be computed before the operation will begin but doesn't require + /// that the input shapes be known during graph construction. Parallel concat + /// will copy pieces of the input into the output as they become available, in + /// some situations this can provide a performance benefit. + /// + /// + /// + /// + /// + /// the final shape of the result; should be equal to the shapes of any input + /// but with the number of input values in the first dimension. + /// + /// + /// + public static Tensor parallel_concat(Tensors values, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ParallelConcat", name) { args = new object[] { values }, attrs = new Dictionary() { ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return parallel_concat_eager_fallback(values, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["values"] = values; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("ParallelConcat", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("ParallelConcat", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor parallel_concat_eager_fallback(Tensors values, Shape shape, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(values); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype, "shape", shape }; + var _result = _execute.execute("ParallelConcat", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ParallelConcat", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// A placeholder op for a value that will be fed into the computation. + /// + /// + /// + /// N.B. This operation will fail with an error if it is executed. It is + /// intended as a way to represent a value that will always be fed, and to + /// provide attrs that enable the fed value to be checked at runtime. + /// + /// + /// + /// + /// The type of elements in the tensor. + /// + /// + /// + /// + /// (Optional) The shape of the tensor. If the shape has 0 dimensions, the + /// shape is unconstrained. + /// + /// + /// + public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Placeholder", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return placeholder_eager_fallback(dtype: dtype, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dtype"] = dtype; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("Placeholder", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("Placeholder", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor placeholder_eager_fallback(TF_DataType dtype, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "dtype", dtype, "shape", shape }; + var _result = _execute.execute("Placeholder", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Placeholder", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// A placeholder op for a value that will be fed into the computation. + /// + /// + /// + /// N.B. This operation will fail with an error if it is executed. It is + /// intended as a way to represent a value that will always be fed, and to + /// provide attrs that enable the fed value to be checked at runtime. + /// + /// + /// + /// + /// The type of elements in the tensor. + /// + /// + /// + /// + /// The shape of the tensor. The shape can be any partially-specified + /// shape. To be unconstrained, pass in a shape with unknown rank. + /// + /// + /// + public static Tensor placeholder_v2(TF_DataType dtype, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderV2", name) { args = new object[] { }, attrs = new Dictionary() { ["dtype"] = dtype, ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return placeholder_v2_eager_fallback(dtype: dtype, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dtype"] = dtype; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("PlaceholderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("PlaceholderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor placeholder_v2_eager_fallback(TF_DataType dtype, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "dtype", dtype, "shape", shape }; + var _result = _execute.execute("PlaceholderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PlaceholderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// A placeholder op that passes through `input` when its output is not fed. + /// + /// + /// + /// + /// The (possibly partial) shape of the tensor. + /// + /// + /// + public static Tensor placeholder_with_default(Tensor input, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderWithDefault", name) { args = new object[] { input }, attrs = new Dictionary() { ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return placeholder_with_default_eager_fallback(input, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("PlaceholderWithDefault", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("PlaceholderWithDefault", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor placeholder_with_default_eager_fallback(Tensor input, Shape shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "dtype", input.dtype, "shape", shape }; + var _result = _execute.execute("PlaceholderWithDefault", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PlaceholderWithDefault", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// An identity op that triggers an error if a gradient is requested. + /// + /// + /// + /// When executed in a graph, this op outputs its input tensor as-is. + /// + /// When building ops to compute gradients, the TensorFlow gradient system + /// will return an error when trying to lookup the gradient of this op, + /// because no gradient must ever be registered for this function. This + /// op exists to prevent subtle bugs from silently returning unimplemented + /// gradients in some corner cases. + /// + /// + /// + /// + /// + /// Will be printed in the error when anyone tries to differentiate + /// this operation. + /// + /// + /// + public static Tensor prevent_gradient(Tensor input, string message = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PreventGradient", name) { args = new object[] { input }, attrs = new Dictionary() { ["message"] = message } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return prevent_gradient_eager_fallback(input, message: message, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (message is null) + { + message = ""; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["message"] = message; + var _op = tf.OpDefLib._apply_op_helper("PreventGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "message", _op.get_attr("message") }; + _execute.record_gradient("PreventGradient", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor prevent_gradient_eager_fallback(Tensor input, string message, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "message", message }; + var _result = _execute.execute("PreventGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("PreventGradient", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Use QuantizeAndDequantizeV2 instead. + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor quantize_and_dequantize(Tensor input, bool signed_input = true, int num_bits = 8, bool range_given = false, float input_min = 0f, float input_max = 0f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantize", name) { args = new object[] { input }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["input_min"] = input_min, ["input_max"] = input_max } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantize_and_dequantize_eager_fallback(input, signed_input: signed_input, num_bits: num_bits, range_given: range_given, input_min: input_min, input_max: input_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["signed_input"] = signed_input; + keywords["num_bits"] = num_bits; + keywords["range_given"] = range_given; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "input_min", _op.get_attr("input_min"), "input_max", _op.get_attr("input_max"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("QuantizeAndDequantize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantize_and_dequantize_eager_fallback(Tensor input, bool signed_input, int num_bits, bool range_given, float input_min, float input_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "input_min", input_min, "input_max", input_max, "T", input.dtype }; + var _result = _execute.execute("QuantizeAndDequantize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeAndDequantize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Quantizes then dequantizes a tensor. + /// + /// + /// + /// This op simulates the precision loss from the quantized forward pass by: + /// + /// 1. Quantizing the tensor to fixed point numbers, which should match the target + /// quantization method when it is used in inference. + /// 2. Dequantizing it back to floating point numbers for the following ops, most + /// likely matmul. + /// + /// There are different ways to quantize. This version uses only scaling, so 0.0 + /// maps to 0. + /// + /// From the specified 'num_bits' in the quantized output type, it determines + /// minimum and maximum representable quantized values. + /// + /// e.g. + /// + /// * [-128, 127] for signed, num_bits = 8, or + /// * [0, 255] for unsigned, num_bits = 8. + /// + /// If range_given == False, the initial input_min, input_max will be determined + /// automatically as the minimum and maximum values in the input tensor, otherwise + /// the specified values of input_min, input_max are used. + /// + /// Note: If the input_min, input_max are specified, they do not need to equal the + /// actual minimum and maximum values in the tensor. e.g. in some cases it may be + /// beneficial to specify these values such that the low probability extremes of the + /// input distribution are clipped. + /// + /// This op determines the maximum scale_factor that would map the initial + /// [input_min, input_max] range to a range that lies within the representable + /// quantized range. + /// + /// It determines the scale from one of input_min and input_max, then updates the + /// other one to maximize the representable range. + /// + /// e.g. + /// + /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, + /// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it + /// would update input_max to be 127 / 12.8 = 9.921875 + /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, + /// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it + /// would update input_min to be 128.0 / 12.7 = -10.07874 + /// * if the output is unsigned, input_min is forced to be 0, and only the + /// specified input_max is used. + /// + /// After determining the scale_factor and updating the input range, it applies the + /// following to each value in the 'input' tensor. + /// + /// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. + /// + /// The above round function rounds the value based on the given round_mode. + /// + /// + /// + /// + /// + /// + /// + /// + /// Whether the quantization is signed or unsigned. (actually this parameter should + /// have been called `signed_output`) + /// + /// + /// + /// + /// The bitwidth of the quantization. + /// + /// + /// + /// + /// Whether the range is given or should be determined from the `input` tensor. + /// + /// + /// + /// + /// The 'round_mode' attribute controls which rounding tie-breaking algorithm is + /// used when rounding float values to their quantized equivalents. The following + /// rounding modes are currently supported: + /// + /// * HALF_TO_EVEN: this is the default round_mode. + /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 + /// rounds up to -7. + /// + /// + /// + /// + /// + /// If True, then the absolute value of the quantized minimum value is the same as + /// the quantized maximum value, instead of 1 greater. + /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. + /// + /// + /// + /// + /// If specified, this axis is treated as a channel or slice axis, and a separate + /// quantization range is used for each channel or slice along this axis. + /// + /// + /// + public static Tensor quantize_and_dequantize_v2(Tensor input, Tensor input_min, Tensor input_max, bool signed_input = true, int num_bits = 8, bool range_given = false, string round_mode = "HALF_TO_EVEN", bool narrow_range = false, int axis = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV2", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantize_and_dequantize_v2_eager_fallback(input, input_min, input_max, signed_input: signed_input, num_bits: num_bits, range_given: range_given, round_mode: round_mode, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (round_mode is null) + { + round_mode = "HALF_TO_EVEN"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["signed_input"] = signed_input; + keywords["num_bits"] = num_bits; + keywords["range_given"] = range_given; + keywords["round_mode"] = round_mode; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("QuantizeAndDequantizeV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantize_and_dequantize_v2_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, bool signed_input, int num_bits, bool range_given, string round_mode, bool narrow_range, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "T", input.dtype, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis }; + var _result = _execute.execute("QuantizeAndDequantizeV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Quantizes then dequantizes a tensor. + /// + /// + /// + /// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + /// tensor, so its value can change during training. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor quantize_and_dequantize_v3(Tensor input, Tensor input_min, Tensor input_max, Tensor num_bits, bool signed_input = true, bool range_given = true, bool narrow_range = false, int axis = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV3", name) { args = new object[] { input, input_min, input_max, num_bits }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["range_given"] = range_given, ["narrow_range"] = narrow_range, ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantize_and_dequantize_v3_eager_fallback(input, input_min, input_max, num_bits, signed_input: signed_input, range_given: range_given, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["num_bits"] = num_bits; + keywords["signed_input"] = signed_input; + keywords["range_given"] = range_given; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("QuantizeAndDequantizeV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantize_and_dequantize_v3_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, Tensor num_bits, bool signed_input, bool range_given, bool narrow_range, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max, num_bits }; + object[] _attrs = new object[] { "signed_input", signed_input, "range_given", range_given, "T", input.dtype, "narrow_range", narrow_range, "axis", axis }; + var _result = _execute.execute("QuantizeAndDequantizeV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Quantizes then dequantizes a tensor. + /// + /// + /// + /// This is almost identical to QuantizeAndDequantizeV2, except that it returns a + /// gradient of 1 for inputs that are within the quantization range, or 0 otherwise. + /// + /// + /// + /// + /// + /// + /// + /// Whether the quantization is signed or unsigned. (actually this parameter should + /// have been called `signed_output`) + /// + /// + /// + /// + /// The bitwidth of the quantization. + /// + /// + /// + /// + /// Whether the range is given or should be determined from the `input` tensor. + /// + /// + /// + /// + /// The 'round_mode' attribute controls which rounding tie-breaking algorithm is + /// used when rounding float values to their quantized equivalents. The following + /// rounding modes are currently supported: + /// + /// * HALF_TO_EVEN: this is the default round_mode. + /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 + /// rounds up to -7. + /// + /// + /// + /// + /// + /// If True, then the absolute value of the quantized minimum value is the same as + /// the quantized maximum value, instead of 1 greater. + /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. + /// + /// + /// + /// + /// If specified, this axis is treated as a channel or slice axis, and a separate + /// quantization range is used for each channel or slice along this axis. + /// + /// + /// + public static Tensor quantize_and_dequantize_v4(Tensor input, Tensor input_min, Tensor input_max, bool signed_input = true, int num_bits = 8, bool range_given = false, string round_mode = "HALF_TO_EVEN", bool narrow_range = false, int axis = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV4", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantize_and_dequantize_v4_eager_fallback(input, input_min, input_max, signed_input: signed_input, num_bits: num_bits, range_given: range_given, round_mode: round_mode, narrow_range: narrow_range, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (round_mode is null) + { + round_mode = "HALF_TO_EVEN"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["signed_input"] = signed_input; + keywords["num_bits"] = num_bits; + keywords["range_given"] = range_given; + keywords["round_mode"] = round_mode; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("QuantizeAndDequantizeV4", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("QuantizeAndDequantizeV4", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantize_and_dequantize_v4_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, bool signed_input, int num_bits, bool range_given, string round_mode, bool narrow_range, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "T", input.dtype, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis }; + var _result = _execute.execute("QuantizeAndDequantizeV4", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeAndDequantizeV4", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. + /// + /// + /// + /// [min_range, max_range] are scalar floats that specify the range for + /// the 'input' data. The 'mode' attribute controls exactly which calculations are + /// used to convert the float values to their quantized equivalents. The + /// 'round_mode' attribute controls which rounding tie-breaking algorithm is used + /// when rounding float values to their quantized equivalents. + /// + /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + /// + /// ``` + /// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + /// if T == qint8: out[i] -= (range(T) + 1) / 2.0 + /// ``` + /// + /// here `range(T) = numeric_limits::max() - numeric_limits::min()` + /// + /// *MIN_COMBINED Mode Example* + /// + /// Assume the input is type float and has a possible range of [0.0, 6.0] and the + /// output type is quint8 ([0, 255]). The min_range and max_range values should be + /// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + /// value of the input by 255/6 and cast to quint8. + /// + /// If the output type was qint8 ([-128, 127]), the operation will additionally + /// subtract each value by 128 prior to casting, so that the range of values aligns + /// with the range of qint8. + /// + /// If the mode is 'MIN_FIRST', then this approach is used: + /// + /// ``` + /// num_discrete_values = 1 << (# of bits in T) + /// range_adjust = num_discrete_values / (num_discrete_values - 1) + /// range = (range_max - range_min) * range_adjust + /// range_scale = num_discrete_values / range + /// quantized = round(input * range_scale) - round(range_min * range_scale) + + /// numeric_limits::min() + /// quantized = max(quantized, numeric_limits::min()) + /// quantized = min(quantized, numeric_limits::max()) + /// ``` + /// + /// The biggest difference between this and MIN_COMBINED is that the minimum range + /// is rounded first, before it's subtracted from the rounded value. With + /// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + /// and dequantizing will introduce a larger and larger error. + /// + /// *SCALED mode Example* + /// + /// `SCALED` mode matches the quantization approach used in + /// `QuantizeAndDequantize{V2|V3}`. + /// + /// If the mode is `SCALED`, the quantization is performed by multiplying each + /// input value by a scaling_factor. + /// The scaling_factor is determined from `min_range` and `max_range` to be as large + /// as possible such that the range from `min_range` to `max_range` is representable + /// within values of type T. + /// + /// ```c++ + /// + /// const int min_T = std::numeric_limits::min(); + /// const int max_T = std::numeric_limits::max(); + /// const float max_float = std::numeric_limits::max(); + /// + /// const float scale_factor_from_min_side = + /// (min_T * min_range > 0) ? min_T / min_range : max_float; + /// const float scale_factor_from_max_side = + /// (max_T * max_range > 0) ? max_T / max_range : max_float; + /// + /// const float scale_factor = std::min(scale_factor_from_min_side, + /// scale_factor_from_max_side); + /// ``` + /// + /// We next use the scale_factor to adjust min_range and max_range as follows: + /// + /// ```c++ + /// min_range = min_T / scale_factor; + /// max_range = max_T / scale_factor; + /// ``` + /// + /// + /// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + /// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 + /// In this case, min_range would remain -10, but max_range would be adjusted to + /// 127 / 12.8 = 9.921875 + /// + /// So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + /// + /// The input tensor can now be quantized by clipping values to the range + /// `min_range` to `max_range`, then multiplying by scale_factor as follows: + /// + /// ```c++ + /// result = round(min(max_range, max(min_range, input)) * scale_factor) + /// ``` + /// + /// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + /// this operation. These outputs should be used as the range for any further + /// calculations. + /// + /// + /// *narrow_range (bool) attribute* + /// + /// If true, we do not use the minimum quantized value. + /// i.e. for int8 the quantized output, it would be restricted to the range + /// -127..127 instead of the full -128..127 range. + /// This is provided for compatibility with certain inference backends. + /// (Only applies to SCALED mode) + /// + /// + /// *axis (int) attribute* + /// + /// An optional `axis` attribute can specify a dimension index of the input tensor, + /// such that quantization ranges will be calculated and applied separately for each + /// slice of the tensor along that dimension. This is useful for per-channel + /// quantization. + /// + /// If axis is specified, min_range and max_range + /// + /// if `axis`=None, per-tensor quantization is performed as normal. + /// + /// + /// *ensure_minimum_range (float) attribute* + /// + /// Ensures the minimum quantization range is at least this value. + /// The legacy default value for this is 0.01, but it is strongly suggested to + /// set it to 0 for new uses. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantize_v2(Tensor input, Tensor min_range, Tensor max_range, TF_DataType T, string mode = "MIN_COMBINED", string round_mode = "HALF_AWAY_FROM_ZERO", bool narrow_range = false, int axis = -1, float ensure_minimum_range = 0.01f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeV2", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary() { ["T"] = T, ["mode"] = mode, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["ensure_minimum_range"] = ensure_minimum_range } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantize_v2_eager_fallback(input, min_range, max_range, T: T, mode: mode, round_mode: round_mode, narrow_range: narrow_range, axis: axis, ensure_minimum_range: ensure_minimum_range, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (mode is null) + { + mode = "MIN_COMBINED"; + } + if (round_mode is null) + { + round_mode = "HALF_AWAY_FROM_ZERO"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["min_range"] = min_range; + keywords["max_range"] = max_range; + keywords["T"] = T; + keywords["mode"] = mode; + keywords["round_mode"] = round_mode; + keywords["narrow_range"] = narrow_range; + keywords["axis"] = axis; + keywords["ensure_minimum_range"] = ensure_minimum_range; + var _op = tf.OpDefLib._apply_op_helper("QuantizeV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis"), "ensure_minimum_range", _op.get_attr("ensure_minimum_range") }; + _execute.record_gradient("QuantizeV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantize_v2_eager_fallback(Tensor input, Tensor min_range, Tensor max_range, TF_DataType T, string mode, string round_mode, bool narrow_range, int axis, float ensure_minimum_range, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, min_range, max_range }; + object[] _attrs = new object[] { "T", T, "mode", mode, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis, "ensure_minimum_range", ensure_minimum_range }; + var _result = _execute.execute("QuantizeV2", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Concatenates quantized tensors along one dimension. + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_concat(Tensor concat_dim, Tensors values, Tensors input_mins, Tensors input_maxes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConcat", name) { args = new object[] { concat_dim, values, input_mins, input_maxes }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_concat_eager_fallback(concat_dim, values, input_mins, input_maxes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["concat_dim"] = concat_dim; + keywords["values"] = values; + keywords["input_mins"] = input_mins; + keywords["input_maxes"] = input_maxes; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConcat", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("QuantizedConcat", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_concat_eager_fallback(Tensor concat_dim, Tensors values, Tensors input_mins, Tensors input_maxes, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.Add(concat_dim); + _inputs_flat_list.AddRange(values); + _inputs_flat_list.AddRange(input_mins); + _inputs_flat_list.AddRange(input_maxes); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", values.Length, "T", values.dtype }; + var _result = _execute.execute("QuantizedConcat", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConcat", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Quantized Instance normalization. + /// + /// + /// + /// + /// + /// + /// If True, `given_y_min` and `given_y_min` + /// and `given_y_max` are used as the output range. Otherwise, + /// the implementation computes the output range. + /// + /// + /// + /// + /// Output in `y_min` if `output_range_given` is True. + /// + /// + /// + /// + /// Output in `y_max` if `output_range_given` is True. + /// + /// + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// + /// + /// Minimum value of `y_max - y_min` + /// + /// + /// + public static Tensor[] quantized_instance_norm(Tensor x, Tensor x_min, Tensor x_max, bool output_range_given = false, float given_y_min = 0f, float given_y_max = 0f, float variance_epsilon = 1E-05f, float min_separation = 0.001f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedInstanceNorm", name) { args = new object[] { x, x_min, x_max }, attrs = new Dictionary() { ["output_range_given"] = output_range_given, ["given_y_min"] = given_y_min, ["given_y_max"] = given_y_max, ["variance_epsilon"] = variance_epsilon, ["min_separation"] = min_separation } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_instance_norm_eager_fallback(x, x_min, x_max, output_range_given: output_range_given, given_y_min: given_y_min, given_y_max: given_y_max, variance_epsilon: variance_epsilon, min_separation: min_separation, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["x_min"] = x_min; + keywords["x_max"] = x_max; + keywords["output_range_given"] = output_range_given; + keywords["given_y_min"] = given_y_min; + keywords["given_y_max"] = given_y_max; + keywords["variance_epsilon"] = variance_epsilon; + keywords["min_separation"] = min_separation; + var _op = tf.OpDefLib._apply_op_helper("QuantizedInstanceNorm", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "output_range_given", _op._get_attr_bool("output_range_given"), "given_y_min", _op.get_attr("given_y_min"), "given_y_max", _op.get_attr("given_y_max"), "variance_epsilon", _op.get_attr("variance_epsilon"), "min_separation", _op.get_attr("min_separation") }; + _execute.record_gradient("QuantizedInstanceNorm", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_instance_norm_eager_fallback(Tensor x, Tensor x_min, Tensor x_max, bool output_range_given, float given_y_min, float given_y_max, float variance_epsilon, float min_separation, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, x_min, x_max }; + object[] _attrs = new object[] { "T", x.dtype, "output_range_given", output_range_given, "given_y_min", given_y_min, "given_y_max", given_y_max, "variance_epsilon", variance_epsilon, "min_separation", min_separation }; + var _result = _execute.execute("QuantizedInstanceNorm", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedInstanceNorm", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Reshapes a quantized tensor as per the Reshape op. + /// + /// + /// + /// ``` + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_reshape(Tensor tensor, Tensor shape, Tensor input_min, Tensor input_max, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReshape", name) { args = new object[] { tensor, shape, input_min, input_max }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_reshape_eager_fallback(tensor, shape, input_min, input_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["shape"] = shape; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + var _op = tf.OpDefLib._apply_op_helper("QuantizedReshape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tshape", _op._get_attr_type("Tshape") }; + _execute.record_gradient("QuantizedReshape", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_reshape_eager_fallback(Tensor tensor, Tensor shape, Tensor input_min, Tensor input_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, shape, input_min, input_max }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tshape", shape.dtype }; + var _result = _execute.execute("QuantizedReshape", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedReshape", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns the rank of a tensor. + /// + /// + /// + /// This operation returns an integer representing the rank of `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// # shape of tensor 't' is [2, 2, 3] + /// rank(t) ==> 3 + /// ``` + /// + /// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank + /// of a tensor is the number of indices required to uniquely select each element + /// of the tensor. Rank is also known as "order", "degree", or "ndims." + /// + /// + /// + /// + public static Tensor rank(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rank", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return rank_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Rank", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Rank", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor rank_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Rank", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Rank", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return the same ref tensor as the input ref tensor. + /// + /// + /// + public static Tensor ref_identity(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("ref_identity op does not support eager execution. Arg input is a ref."); + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("RefIdentity", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("RefIdentity", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ref_identity_eager_fallback(Tensor input, string name, Context ctx) + { + throw new RuntimeError($"ref_identity op does not support eager execution. Arg 'input' is a ref."); + } + /// + /// Reshapes a tensor. + /// + /// + /// + /// Given `tensor`, this operation returns a tensor that has the same values + /// as `tensor` with shape `shape`. + /// + /// If one component of 1-D tensor `shape` is the special value -1, the size of that + /// dimension is computed so that the total size remains constant. In particular, a + /// `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be + /// unknown. + /// + /// The `shape` must be 1-D and the operation returns a tensor with shape + /// `shape` filled with the values of `tensor`. In this case, the number of elements + /// implied by `shape` must be the same as the number of elements in `tensor`. + /// + /// It is an error if `shape` is not 1-D. + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] + /// # tensor 't' has shape [9] + /// reshape(t, [3, 3]) ==> [[1, 2, 3], + /// [4, 5, 6], + /// [7, 8, 9]] + /// + /// # tensor 't' is [[[1, 1], [2, 2]], + /// # [[3, 3], [4, 4]]] + /// # tensor 't' has shape [2, 2, 2] + /// reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + /// [3, 3, 4, 4]] + /// + /// # tensor 't' is [[[1, 1, 1], + /// # [2, 2, 2]], + /// # [[3, 3, 3], + /// # [4, 4, 4]], + /// # [[5, 5, 5], + /// # [6, 6, 6]]] + /// # tensor 't' has shape [3, 2, 3] + /// # pass '[-1]' to flatten 't' + /// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] + /// + /// # -1 can also be used to infer the shape + /// + /// # -1 is inferred to be 9: + /// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] + /// # -1 is inferred to be 2: + /// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] + /// # -1 is inferred to be 3: + /// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + /// [2, 2, 2], + /// [3, 3, 3]], + /// [[4, 4, 4], + /// [5, 5, 5], + /// [6, 6, 6]]] + /// + /// # tensor 't' is [7] + /// # shape `[]` reshapes to a scalar + /// reshape(t, []) ==> 7 + /// ``` + /// + /// + /// + /// + /// + public static Tensor reshape(Tensor tensor, Tensor shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reshape", name) { args = new object[] { tensor, shape }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reshape_eager_fallback(tensor, shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("Reshape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tshape", _op._get_attr_type("Tshape") }; + _execute.record_gradient("Reshape", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reshape_eager_fallback(Tensor tensor, Tensor shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, shape }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tshape", shape.dtype }; + var _result = _execute.execute("Reshape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Reshape", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Assign `value` to the sliced l-value reference of `ref`. + /// + /// + /// + /// The values of `value` are assigned to the positions in the variable + /// `ref` that are selected by the slice parameters. The slice parameters + /// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + /// + /// NOTE this op currently does not support broadcasting and so `value`'s + /// shape must be exactly the shape produced by the slice of `ref`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Operation resource_strided_slice_assign(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceStridedSliceAssign", name) { args = new object[] { ref_, begin, end, strides, value }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); + return null; + } + catch (Exception) + { + } + try + { + return resource_strided_slice_assign_eager_fallback(ref_, begin, end, strides, value, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["ref"] = ref_; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["value"] = value; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("ResourceStridedSliceAssign", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("ResourceStridedSliceAssign", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Operation resource_strided_slice_assign_eager_fallback(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { ref_, begin, end, strides, value }; + object[] _attrs = new object[] { "T", value.dtype, "Index", begin.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask }; + var _result = _execute.execute("ResourceStridedSliceAssign", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ResourceStridedSliceAssign", _inputs_flat, _attrs, _result); + } + return null; + } + /// + /// Reverses specific dimensions of a tensor. + /// + /// + /// + /// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions + /// of `tensor`, this operation reverses each dimension i of `tensor` where + /// `dims[i]` is `True`. + /// + /// `tensor` can have up to 8 dimensions. The number of dimensions + /// of `tensor` must equal the number of elements in `dims`. In other words: + /// + /// `rank(tensor) = size(dims)` + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [False, False, False, True] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is [False, True, False, False] + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is [False, False, True, False] + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// ``` + /// + /// + /// + /// + /// + public static Tensor reverse(Tensor tensor, Tensor dims, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reverse", name) { args = new object[] { tensor, dims }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reverse_eager_fallback(tensor, dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["dims"] = dims; + var _op = tf.OpDefLib._apply_op_helper("Reverse", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Reverse", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reverse_eager_fallback(Tensor tensor, Tensor dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, dims }; + object[] _attrs = new object[] { "T", tensor.dtype }; + var _result = _execute.execute("Reverse", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Reverse", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Reverses variable length slices. + /// + /// + /// + /// This op first slices `input` along the dimension `batch_dim`, and for each + /// slice `i`, reverses the first `seq_lengths[i]` elements along + /// the dimension `seq_dim`. + /// + /// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, + /// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + /// + /// The output slice `i` along dimension `batch_dim` is then given by input + /// slice `i`, with the first `seq_lengths[i]` slices along dimension + /// `seq_dim` reversed. + /// + /// For example: + /// + /// ``` + /// # Given this: + /// batch_dim = 0 + /// seq_dim = 1 + /// input.dims = (4, 8, ...) + /// seq_lengths = [7, 2, 3, 5] + /// + /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: + /// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + /// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + /// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + /// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] + /// + /// # while entries past seq_lens are copied through: + /// output[0, 7:, :, ...] = input[0, 7:, :, ...] + /// output[1, 2:, :, ...] = input[1, 2:, :, ...] + /// output[2, 3:, :, ...] = input[2, 3:, :, ...] + /// output[3, 2:, :, ...] = input[3, 2:, :, ...] + /// ``` + /// + /// In contrast, if: + /// + /// ``` + /// # Given this: + /// batch_dim = 2 + /// seq_dim = 0 + /// input.dims = (8, ?, 4, ...) + /// seq_lengths = [7, 2, 3, 5] + /// + /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: + /// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + /// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + /// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + /// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] + /// + /// # while entries past seq_lens are copied through: + /// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + /// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + /// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + /// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + /// ``` + /// + /// + /// + /// + /// + /// + /// The dimension which is partially reversed. + /// + /// + /// + /// + /// The dimension along which reversal is performed. + /// + /// + /// + public static Tensor reverse_sequence(Tensor input, Tensor seq_lengths, int seq_dim = 0, int batch_dim = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReverseSequence", name) { args = new object[] { input, seq_lengths }, attrs = new Dictionary() { ["seq_dim"] = seq_dim, ["batch_dim"] = batch_dim } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reverse_sequence_eager_fallback(input, seq_lengths, seq_dim: seq_dim, batch_dim: batch_dim, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["seq_lengths"] = seq_lengths; + keywords["seq_dim"] = seq_dim; + keywords["batch_dim"] = batch_dim; + var _op = tf.OpDefLib._apply_op_helper("ReverseSequence", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "seq_dim", _op._get_attr_int("seq_dim"), "batch_dim", _op._get_attr_int("batch_dim"), "T", _op._get_attr_type("T"), "Tlen", _op._get_attr_type("Tlen") }; + _execute.record_gradient("ReverseSequence", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reverse_sequence_eager_fallback(Tensor input, Tensor seq_lengths, int seq_dim, int batch_dim, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, seq_lengths }; + object[] _attrs = new object[] { "seq_dim", seq_dim, "batch_dim", batch_dim, "T", input.dtype, "Tlen", seq_lengths.dtype }; + var _result = _execute.execute("ReverseSequence", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReverseSequence", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Reverses specific dimensions of a tensor. + /// + /// + /// + /// Given a `tensor`, and a `int32` tensor `axis` representing the set of + /// dimensions of `tensor` to reverse. This operation reverses each dimension + /// `i` for which there exists `j` s.t. `axis[j] == i`. + /// + /// `tensor` can have up to 8 dimensions. The number of dimensions specified + /// in `axis` may be 0 or more entries. If an index is specified more than + /// once, a InvalidArgument error is raised. + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [3] or 'dims' is [-1] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is '[1]' (or 'dims' is '[-3]') + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is '[2]' (or 'dims' is '[-2]') + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// ``` + /// + /// + /// + /// + /// + public static Tensor reverse_v2(Tensor tensor, Tensor axis, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReverseV2", name) { args = new object[] { tensor, axis }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reverse_v2_eager_fallback(tensor, axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("ReverseV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("ReverseV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reverse_v2_eager_fallback(Tensor tensor, Tensor axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, axis }; + object[] _attrs = new object[] { "Tidx", axis.dtype, "T", tensor.dtype }; + var _result = _execute.execute("ReverseV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReverseV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Scatters `updates` into a tensor of shape `shape` according to `indices`. + /// + /// + /// + /// Scatter sparse `updates` according to individual values at the specified + /// `indices`. This op returns an output tensor with the `shape` you specify. This + /// op is the inverse of the `tf.gather_nd` operator which extracts values or slices + /// from a given tensor. + /// + /// This operation is similar to `tf.tensor_scatter_nd_add`, except that the tensor + /// is zero-initialized. Calling `tf.scatter_nd(indices, updates, shape)` + /// is identical to calling + /// `tf.tensor_scatter_nd_add(tf.zeros(shape, updates.dtype), indices, updates)` + /// + /// If `indices` contains duplicates, the associated `updates` are accumulated + /// (summed) into the output tensor. + /// + /// **WARNING**: For floating-point data types, the output may be nondeterministic. + /// This is because the order in which the updates are applied is nondeterministic + /// and when floating-point numbers are added in different orders the resulting + /// numerical approximation error can be slightly different. However, the output + /// will be deterministic if op determinism is enabled via + /// `tf.config.experimental.enable_op_determinism`. + /// + /// `indices` is an integer tensor containing indices into the output tensor. The + /// last dimension of `indices` can be at most the rank of `shape`: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of `indices` corresponds to indices of elements + /// (if `indices.shape[-1] = shape.rank`) or slices + /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + /// `shape`. + /// + /// `updates` is a tensor with shape: + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of the scatter op is to insert individual elements in + /// a tensor by index. Consider an example where you want to insert 4 scattered + /// elements in a rank-1 tensor with 8 elements. + /// + ///
+ /// + ///
+ /// + /// In Python, this scatter operation would look like this: + /// + /// ```python + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// shape = tf.constant([8]) + /// scatter = tf.scatter_nd(indices, updates, shape) + /// print(scatter) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [0, 11, 0, 10, 9, 0, 0, 12] + /// + /// You can also insert entire slices of a higher rank tensor all at once. For + /// example, you can insert two slices in the first dimension of a rank-3 tensor + /// with two matrices of new values. + /// + ///
+ /// + ///
+ /// + /// In Python, this scatter operation would look like this: + /// + /// ```python + /// indices = tf.constant([[1], [3]]) + /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// shape = tf.constant([4, 4, 4]) + /// scatter = tf.scatter_nd(indices, updates, shape) + /// print(scatter) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + ///
+ /// + /// + /// + /// + public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ScatterNd", name) { args = new object[] { indices, updates, shape }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return scatter_nd_eager_fallback(indices, updates, shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["indices"] = indices; + keywords["updates"] = updates; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("ScatterNd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ScatterNd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor scatter_nd_eager_fallback(Tensor indices, Tensor updates, Tensor shape, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { indices, updates, shape }; + object[] _attrs = new object[] { "T", updates.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ScatterNd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ScatterNd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Applies sparse addition to `input` using individual values or slices + /// + /// + /// + /// from `updates` according to indices `indices`. The updates are non-aliasing: + /// `input` is only modified in-place if no other operations will use it. + /// Otherwise, a copy of `input` is made. This operation has a gradient with + /// respect to both `input` and `updates`. + /// + /// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + /// + /// `indices` must be integer tensor, containing indices into `input`. + /// It must be shape \([d_0, ..., d_{Q-2}, K]\) where `0 < K <= P`. + /// + /// The innermost dimension of `indices` (with length `K`) corresponds to + /// indices into elements (if `K = P`) or `(P-K)`-dimensional slices + /// (if `K < P`) along the `K`th dimension of `input`. + /// + /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: + /// + /// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + /// + /// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + /// elements. In Python, that addition would look like this: + /// + /// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + /// with tf.Session() as sess: + /// print(sess.run(output)) + /// + /// The resulting value `output` would look like this: + /// + /// [1, 13, 3, 14, 14, 6, 7, 20] + /// + /// See `tf.scatter_nd` for more details about how to make updates to slices. + /// + /// + /// + /// + /// + /// + public static Tensor scatter_nd_non_aliasing_add(Tensor input, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ScatterNdNonAliasingAdd", name) { args = new object[] { input, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return scatter_nd_non_aliasing_add_eager_fallback(input, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("ScatterNdNonAliasingAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("ScatterNdNonAliasingAdd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor scatter_nd_non_aliasing_add_eager_fallback(Tensor input, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, indices, updates }; + object[] _attrs = new object[] { "T", input.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("ScatterNdNonAliasingAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ScatterNdNonAliasingAdd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the shape of a tensor. + /// + /// + /// + /// This operation returns a 1-D integer tensor representing the shape of `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// shape(t) ==> [2, 2, 3] + /// ``` + /// + /// + /// + /// + /// + public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Shape", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return shape_eager_fallback(input, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("Shape", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("Shape", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor shape_eager_fallback(Tensor input, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "out_type", out_type }; + var _result = _execute.execute("Shape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Shape", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns shape of tensors. + /// + /// + /// + /// This operation returns N 1-D integer tensors representing shape of `input[i]s`. + /// + /// + /// + /// + /// + public static Tensor[] shape_n(Tensors input, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShapeN", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return shape_n_eager_fallback(input, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("ShapeN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("ShapeN", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] shape_n_eager_fallback(Tensors input, TF_DataType out_type, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(input); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", input.Length, "T", input.dtype, "out_type", out_type }; + var _result = _execute.execute("ShapeN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ShapeN", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns the size of a tensor. + /// + /// + /// + /// This operation returns an integer representing the number of elements in + /// `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + /// size(t) ==> 12 + /// ``` + /// + /// + /// + /// + /// + public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Size", name) { args = new object[] { input }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return size_eager_fallback(input, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("Size", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("Size", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor size_eager_fallback(Tensor input, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "out_type", out_type }; + var _result = _execute.execute("Size", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Size", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return a slice from 'input'. + /// + /// + /// + /// The output tensor is a tensor with dimensions described by 'size' + /// whose values are extracted from 'input' starting at the offsets in + /// 'begin'. + /// + /// *Requirements*: + /// 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + /// + /// + /// + /// + /// + /// + public static Tensor slice(Tensor input, Tensor begin, Tensor size, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Slice", name) { args = new object[] { input, begin, size }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return slice_eager_fallback(input, begin, size, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["begin"] = begin; + keywords["size"] = size; + var _op = tf.OpDefLib._apply_op_helper("Slice", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index") }; + _execute.record_gradient("Slice", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor slice_eager_fallback(Tensor input, Tensor begin, Tensor size, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, begin, size }; + object[] _attrs = new object[] { "T", input.dtype, "Index", begin.dtype }; + var _result = _execute.execute("Slice", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Slice", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a copy of the input tensor. + /// + /// + /// + public static Tensor snapshot(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Snapshot", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return snapshot_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Snapshot", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Snapshot", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor snapshot_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Snapshot", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Snapshot", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// SpaceToBatch for 4-D tensors of type T. + /// + /// + /// + /// This is a legacy version of the more general SpaceToBatchND. + /// + /// Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + /// More specifically, this op outputs a copy of the input tensor where values from + /// the `height` and `width` dimensions are moved to the `batch` dimension. After + /// the zero-padding, both `height` and `width` of the input must be divisible by the + /// block size. + /// + /// The attr `block_size` must be greater than one. It indicates the block size. + /// + /// * Non-overlapping blocks of size `block_size x block size` in the height and + /// width dimensions are rearranged into the batch dimension at each location. + /// * The batch of the output tensor is `batch * block_size * block_size`. + /// * Both height_pad and width_pad must be divisible by block_size. + /// + /// The shape of the output will be: + /// + /// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + /// depth] + /// + /// Some examples: + /// + /// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2]], [[3], [4]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 1]` and value: + /// + /// ``` + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// ``` + /// + /// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 3]` and value: + /// + /// ``` + /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + /// ``` + /// + /// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[4, 2, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + /// + /// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[8, 1, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + /// ``` + /// + /// Among others, this operation is useful for reducing atrous convolution into + /// regular convolution. + /// + /// + /// + /// + /// + /// + public static Tensor space_to_batch(Tensor input, Tensor paddings, int block_size = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToBatch", name) { args = new object[] { input, paddings }, attrs = new Dictionary() { ["block_size"] = block_size } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return space_to_batch_eager_fallback(input, paddings, block_size: block_size, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["block_size"] = block_size; + var _op = tf.OpDefLib._apply_op_helper("SpaceToBatch", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "block_size", _op._get_attr_int("block_size") }; + _execute.record_gradient("SpaceToBatch", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor space_to_batch_eager_fallback(Tensor input, Tensor paddings, int block_size, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tpaddings", paddings.dtype, "block_size", block_size }; + var _result = _execute.execute("SpaceToBatch", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SpaceToBatch", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// SpaceToBatch for N-D tensors of type T. + /// + /// + /// + /// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + /// grid of blocks of shape `block_shape`, and interleaves these blocks with the + /// "batch" dimension (0) such that in the output, the spatial dimensions + /// `[1, ..., M]` correspond to the position within the grid, and the batch + /// dimension combines both the position within a spatial block and the original + /// batch position. Prior to division into blocks, the spatial dimensions of the + /// input are optionally zero padded according to `paddings`. See below for a + /// precise description. + /// + /// This operation is equivalent to the following steps: + /// + /// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the + /// input according to `paddings` to produce `padded` of shape `padded_shape`. + /// + /// 2. Reshape `padded` to `reshaped_padded` of shape: + /// + /// [batch] + + /// [padded_shape[1] / block_shape[0], + /// block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1], + /// block_shape[M-1]] + + /// remaining_shape + /// + /// 3. Permute dimensions of `reshaped_padded` to produce + /// `permuted_reshaped_padded` of shape: + /// + /// block_shape + + /// [batch] + + /// [padded_shape[1] / block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1]] + + /// remaining_shape + /// + /// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch + /// dimension, producing an output tensor of shape: + /// + /// [batch * prod(block_shape)] + + /// [padded_shape[1] / block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1]] + + /// remaining_shape + /// + /// Some examples: + /// + /// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1], [2]], [[3], [4]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 1]` and value: + /// + /// ``` + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// ``` + /// + /// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 3]` and value: + /// + /// ``` + /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + /// ``` + /// + /// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[4, 2, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + /// + /// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and + /// paddings = `[[0, 0], [2, 0]]`: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[8, 1, 3, 1]` and value: + /// + /// ``` + /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + /// [[[0], [2], [4]]], [[[0], [10], [12]]], + /// [[[0], [5], [7]]], [[[0], [13], [15]]], + /// [[[0], [6], [8]]], [[[0], [14], [16]]]] + /// ``` + /// + /// Among others, this operation is useful for reducing atrous convolution into + /// regular convolution. + /// + /// + /// + /// + /// + /// + public static Tensor space_to_batch_nd(Tensor input, Tensor block_shape, Tensor paddings, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToBatchND", name) { args = new object[] { input, block_shape, paddings }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return space_to_batch_nd_eager_fallback(input, block_shape, paddings, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["block_shape"] = block_shape; + keywords["paddings"] = paddings; + var _op = tf.OpDefLib._apply_op_helper("SpaceToBatchND", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tblock_shape", _op._get_attr_type("Tblock_shape"), "Tpaddings", _op._get_attr_type("Tpaddings") }; + _execute.record_gradient("SpaceToBatchND", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor space_to_batch_nd_eager_fallback(Tensor input, Tensor block_shape, Tensor paddings, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, block_shape, paddings }; + object[] _attrs = new object[] { "T", input.dtype, "Tblock_shape", block_shape.dtype, "Tpaddings", paddings.dtype }; + var _result = _execute.execute("SpaceToBatchND", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SpaceToBatchND", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// SpaceToDepth for tensors of type T. + /// + /// + /// + /// Rearranges blocks of spatial data, into depth. More specifically, + /// this op outputs a copy of the input tensor where values from the `height` + /// and `width` dimensions are moved to the `depth` dimension. + /// The attr `block_size` indicates the input block size. + /// + /// * Non-overlapping blocks of size `block_size x block size` are rearranged + /// into depth at each location. + /// * The depth of the output tensor is `block_size * block_size * input_depth`. + /// * The Y, X coordinates within each block of the input become the high order + /// component of the output channel index. + /// * The input tensor's height and width must be divisible by block_size. + /// + /// The `data_format` attr specifies the layout of the input and output tensors + /// with the following options: + /// "NHWC": `[ batch, height, width, channels ]` + /// "NCHW": `[ batch, channels, height, width ]` + /// "NCHW_VECT_C": + /// `qint8 [ batch, channels / 4, height, width, 4 ]` + /// + /// It is useful to consider the operation as transforming a 6-D Tensor. + /// e.g. for data_format = NHWC, + /// Each element in the input tensor can be specified via 6 coordinates, + /// ordered by decreasing memory layout significance as: + /// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + /// within the output image, bX, bY means coordinates + /// within the input block, iC means input channels). + /// The output would be a transpose to the following layout: + /// n,oY,oX,bY,bX,iC + /// + /// This operation is useful for resizing the activations between convolutions + /// (but keeping all data), e.g. instead of pooling. It is also useful for training + /// purely convolutional models. + /// + /// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and + /// block_size = 2: + /// + /// ``` + /// x = [[[[1], [2]], + /// [[3], [4]]]] + /// ``` + /// + /// This operation will output a tensor of shape `[1, 1, 1, 4]`: + /// + /// ``` + /// [[[[1, 2, 3, 4]]]] + /// ``` + /// + /// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + /// the corresponding output will have a single element (i.e. width and height are + /// both 1) and will have a depth of 4 channels (1 * block_size * block_size). + /// The output element shape is `[1, 1, 4]`. + /// + /// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// This operation, for block_size of 2, will return the following tensor of shape + /// `[1, 1, 1, 12]` + /// + /// ``` + /// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + /// ``` + /// + /// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: + /// + /// ``` + /// x = [[[[1], [2], [5], [6]], + /// [[3], [4], [7], [8]], + /// [[9], [10], [13], [14]], + /// [[11], [12], [15], [16]]]] + /// ``` + /// + /// the operator will return the following tensor of shape `[1 2 2 4]`: + /// + /// ``` + /// x = [[[[1, 2, 3, 4], + /// [5, 6, 7, 8]], + /// [[9, 10, 11, 12], + /// [13, 14, 15, 16]]]] + /// ``` + /// + /// + /// + /// + /// + /// The size of the spatial block. + /// + /// + /// + /// + public static Tensor space_to_depth(Tensor input, int block_size = 0, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToDepth", name) { args = new object[] { input }, attrs = new Dictionary() { ["block_size"] = block_size, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return space_to_depth_eager_fallback(input, block_size: block_size, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["block_size"] = block_size; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("SpaceToDepth", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("SpaceToDepth", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor space_to_depth_eager_fallback(Tensor input, int block_size, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "block_size", block_size, "data_format", data_format }; + var _result = _execute.execute("SpaceToDepth", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SpaceToDepth", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Splits a tensor into `num_split` tensors along one dimension. + /// + /// + /// + /// + /// + /// The number of ways to split. Must evenly divide + /// `value.shape[split_dim]`. + /// + /// + /// + public static Tensor[] split(Tensor split_dim, Tensor value, int num_split = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Split", name) { args = new object[] { split_dim, value }, attrs = new Dictionary() { ["num_split"] = num_split } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return split_eager_fallback(split_dim, value, num_split: num_split, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["split_dim"] = split_dim; + keywords["value"] = value; + keywords["num_split"] = num_split; + var _op = tf.OpDefLib._apply_op_helper("Split", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_split", _op._get_attr_int("num_split"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("Split", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] split_eager_fallback(Tensor split_dim, Tensor value, int num_split, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { split_dim, value }; + object[] _attrs = new object[] { "num_split", num_split, "T", value.dtype }; + var _result = _execute.execute("Split", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Split", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Splits a tensor into `num_split` tensors along one dimension. + /// + /// + /// + /// + /// + /// + public static Tensor[] split_v(Tensor value, Tensor size_splits, Tensor split_dim, int num_split = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SplitV", name) { args = new object[] { value, size_splits, split_dim }, attrs = new Dictionary() { ["num_split"] = num_split } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return split_v_eager_fallback(value, size_splits, split_dim, num_split: num_split, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["size_splits"] = size_splits; + keywords["split_dim"] = split_dim; + keywords["num_split"] = num_split; + var _op = tf.OpDefLib._apply_op_helper("SplitV", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num_split", _op._get_attr_int("num_split"), "T", _op._get_attr_type("T"), "Tlen", _op._get_attr_type("Tlen") }; + _execute.record_gradient("SplitV", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] split_v_eager_fallback(Tensor value, Tensor size_splits, Tensor split_dim, int num_split, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value, size_splits, split_dim }; + object[] _attrs = new object[] { "num_split", num_split, "T", value.dtype, "Tlen", size_splits.dtype }; + var _result = _execute.execute("SplitV", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SplitV", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Removes dimensions of size 1 from the shape of a tensor. + /// + /// + /// + /// Given a tensor `input`, this operation returns a tensor of the same type with + /// all dimensions of size 1 removed. If you don't want to remove all size 1 + /// dimensions, you can remove specific size 1 dimensions by specifying + /// `squeeze_dims`. + /// + /// For example: + /// + /// ``` + /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + /// shape(squeeze(t)) ==> [2, 3] + /// ``` + /// + /// Or, to remove specific size 1 dimensions: + /// + /// ``` + /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + /// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + /// ``` + /// + /// + /// + /// + /// + /// If specified, only squeezes the dimensions listed. The dimension + /// index starts at 0. It is an error to squeeze a dimension that is not 1. Must + /// be in the range `[-rank(input), rank(input))`. + /// + /// + /// + public static Tensor squeeze(Tensor input, int[] squeeze_dims = null, string? name = null) + { + var _ctx = tf.Context; + if (squeeze_dims is null) + { + squeeze_dims = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Squeeze", name) { args = new object[] { input }, attrs = new Dictionary() { ["squeeze_dims"] = squeeze_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return squeeze_eager_fallback(input, squeeze_dims: squeeze_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["squeeze_dims"] = squeeze_dims; + var _op = tf.OpDefLib._apply_op_helper("Squeeze", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "squeeze_dims", _op.get_attr("squeeze_dims") }; + _execute.record_gradient("Squeeze", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor squeeze_eager_fallback(Tensor input, int[] squeeze_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "squeeze_dims", squeeze_dims }; + var _result = _execute.execute("Squeeze", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Squeeze", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Stops gradient computation. + /// + /// + /// + /// When executed in a graph, this op outputs its input tensor as-is. + /// + /// When building ops to compute gradients, this op prevents the contribution of + /// its inputs to be taken into account. Normally, the gradient generator adds ops + /// to a graph to compute the derivatives of a specified 'loss' by recursively + /// finding out inputs that contributed to its computation. If you insert this op + /// in the graph it inputs are masked from the gradient generator. They are not + /// taken into account for computing gradients. + /// + /// This is useful any time you want to compute a value with TensorFlow but need + /// to pretend that the value was a constant. For example, the softmax function + /// for a vector x can be written as + /// + /// ```python + /// + /// def softmax(x): + /// numerator = tf.exp(x) + /// denominator = tf.reduce_sum(numerator) + /// return numerator / denominator + /// ``` + /// + /// This however is susceptible to overflow if the values in x are large. An + /// alternative more stable way is to subtract the maximum of x from each of the + /// values. + /// + /// ```python + /// + /// def stable_softmax(x): + /// z = x - tf.reduce_max(x) + /// numerator = tf.exp(z) + /// denominator = tf.reduce_sum(numerator) + /// return numerator / denominator + /// ``` + /// + /// However, when we backprop through the softmax to x, we dont want to backprop + /// through the `tf.reduce_max(x)` (if the max values are not unique then the + /// gradient could flow to the wrong input) calculation and treat that as a + /// constant. Therefore, we should write this out as + /// + /// ```python + /// + /// def stable_softmax(x): + /// z = x - tf.stop_gradient(tf.reduce_max(x)) + /// numerator = tf.exp(z) + /// denominator = tf.reduce_sum(numerator) + /// return numerator / denominator + /// ``` + /// + /// Some other examples include: + /// + /// * The *EM* algorithm where the *M-step* should not involve backpropagation + /// through the output of the *E-step*. + /// * Contrastive divergence training of Boltzmann machines where, when + /// differentiating the energy function, the training must not backpropagate + /// through the graph that generated the samples from the model. + /// * Adversarial training, where no backprop should happen through the adversarial + /// example generation process. + /// + /// + /// + /// + public static Tensor stop_gradient(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StopGradient", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return stop_gradient_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("StopGradient", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("StopGradient", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor stop_gradient_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("StopGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StopGradient", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return a strided slice from `input`. + /// + /// + /// + /// Note, most python users will want to use the Python `Tensor.__getitem__` + /// or `Variable.__getitem__` rather than this op directly. + /// + /// The goal of this op is to produce a new tensor with a subset of + /// the elements from the `n` dimensional `input` tensor. The subset is chosen using + /// a sequence of `m` sparse range specifications encoded into the arguments + /// of this function. Note, in some cases + /// `m` could be equal to `n`, but this need not be the case. Each + /// range specification entry can be one of the following: + /// + /// - An ellipsis (...). Ellipses are used to imply zero or more + /// dimensions of full-dimension selection and are produced using + /// `ellipsis_mask`. For example, `foo[...]` is the identity slice. + /// + /// - A new axis. This is used to insert a new shape=1 dimension and is + /// produced using `new_axis_mask`. For example, `foo[:, ...]` where + /// `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. + /// + /// + /// - A range `begin:end:stride`. This is used to specify how much to choose from + /// a given dimension. `stride` can be any integer but 0. `begin` is an integer + /// which represents the index of the first value to select while `end` represents + /// the index of the last value to select. The number of values selected in each + /// dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. + /// `begin` and `end` can be negative where `-1` is the last element, `-2` is + /// the second to last. `begin_mask` controls whether to replace the explicitly + /// given `begin` with an implicit effective value of `0` if `stride > 0` and + /// `-1` if `stride < 0`. `end_mask` is analogous but produces the number + /// required to create the largest open interval. For example, given a shape + /// `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do + /// not assume this is equivalent to `foo[0:-1]` which has an effective `begin` + /// and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the + /// first dimension of a tensor while dropping the last two (in the original + /// order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. + /// + /// - A single index. This is used to keep only elements that have a given + /// index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a + /// shape `(6,)` tensor. This is encoded in `begin` and `end` and + /// `shrink_axis_mask`. + /// + /// Each conceptual range specification is encoded in the op's argument. This + /// encoding is best understand by considering a non-trivial example. In + /// particular, + /// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as + /// + /// ``` + /// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) + /// end = [2, 4, x, x, -3, x] + /// strides = [1, 1, x, x, -1, 1] + /// begin_mask = 1<<4 | 1<<5 = 48 + /// end_mask = 1<<5 = 32 + /// ellipsis_mask = 1<<3 = 8 + /// new_axis_mask = 1<<2 = 4 + /// shrink_axis_mask = 1<<0 = 1 + /// ``` + /// + /// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of + /// the slice becomes (2, 1, 5, 5, 2, 5). + /// Let us walk step by step through each argument specification. + /// + /// 1. The first argument in the example slice is turned into `begin = 1` and + /// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we + /// also set the appropriate bit in `shrink_axis_mask`. + /// + /// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have + /// zero bits contributed. + /// + /// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 + /// dimension in the final shape. Dummy values are contributed to begin, + /// end and stride, while the new_axis_mask bit is set. + /// + /// 4. `...` grab the full ranges from as many dimensions as needed to + /// fully specify a slice for every dimension of the input shape. + /// + /// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated + /// with a dimension that has shape `s` is converted to a positive index + /// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion + /// is done internally so begin, end and strides receive x, -3, and -1. + /// The appropriate begin_mask bit is set to indicate the start range is the + /// full range (ignoring the x). + /// + /// 6. `:` indicates that the entire contents of the corresponding dimension + /// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides + /// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and + /// `end_mask` are also set. + /// + /// *Requirements*: + /// `0 != strides[i] for i in [0, m)` + /// `ellipsis_mask must be a power of two (only one ellipsis)` + /// + /// + /// + /// + /// + /// + /// + /// + /// a bitmask where a bit i being 1 means to ignore the begin + /// value and instead use the largest interval possible. At runtime + /// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or + /// `[-1, n-1]` if `stride[i] < 0` + /// + /// + /// + /// + /// analogous to `begin_mask` + /// + /// + /// + /// + /// a bitmask where bit `i` being 1 means the `i`th + /// position is actually an ellipsis. One bit at most can be 1. + /// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` + /// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis + /// implicitly creates as many range specifications as necessary to fully + /// specify the sliced range for every dimension. For example for a 4-dimensional + /// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. + /// + /// + /// + /// + /// a bitmask where bit `i` being 1 means the `i`th + /// specification creates a new shape 1 dimension. For example + /// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. + /// + /// + /// + /// + /// a bitmask where bit `i` implies that the `i`th + /// specification should shrink the dimensionality. begin and end + /// must imply a slice of size 1 in the dimension. For example in + /// python one might do `foo[:, 3, :]` which would result in + /// `shrink_axis_mask` being 2. + /// + /// + /// + public static Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StridedSlice", name) { args = new object[] { input, begin, end, strides }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return strided_slice_eager_fallback(input, begin, end, strides, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("StridedSlice", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("StridedSlice", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor strided_slice_eager_fallback(Tensor input, Tensor begin, Tensor end, Tensor strides, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, begin, end, strides }; + object[] _attrs = new object[] { "T", input.dtype, "Index", begin.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask }; + var _result = _execute.execute("StridedSlice", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StridedSlice", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Assign `value` to the sliced l-value reference of `ref`. + /// + /// + /// + /// The values of `value` are assigned to the positions in the variable + /// `ref` that are selected by the slice parameters. The slice parameters + /// `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. + /// + /// NOTE this op currently does not support broadcasting and so `value`'s + /// shape must be exactly the shape produced by the slice of `ref`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor strided_slice_assign(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("strided_slice_assign op does not support eager execution. Arg ref is a ref."); + } + Dictionary keywords = new(); + keywords["ref"] = ref_; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["value"] = value; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("StridedSliceAssign", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("StridedSliceAssign", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor strided_slice_assign_eager_fallback(Tensor ref_, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + throw new RuntimeError($"strided_slice_assign op does not support eager execution. Arg 'ref' is a ref."); + } + /// + /// Returns the gradient of `StridedSlice`. + /// + /// + /// + /// Since `StridedSlice` cuts out pieces of its `input` which is size + /// `shape`, its gradient will have the same shape (which is passed here + /// as `shape`). The gradient will be zero in any element that the slice + /// does not select. + /// + /// Arguments are the same as StridedSliceGrad with the exception that + /// `dy` is the input gradient to be propagated and `shape` is the + /// shape of `StridedSlice`'s `input`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end, Tensor strides, Tensor dy, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StridedSliceGrad", name) { args = new object[] { shape, begin, end, strides, dy }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return strided_slice_grad_eager_fallback(shape, begin, end, strides, dy, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["shape"] = shape; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["dy"] = dy; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("StridedSliceGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("StridedSliceGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor strided_slice_grad_eager_fallback(Tensor shape, Tensor begin, Tensor end, Tensor strides, Tensor dy, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { shape, begin, end, strides, dy }; + object[] _attrs = new object[] { "T", dy.dtype, "Index", shape.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask }; + var _result = _execute.execute("StridedSliceGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("StridedSliceGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Adds sparse `updates` to an existing tensor according to `indices`. + /// + /// + /// + /// This operation creates a new tensor by adding sparse `updates` to the passed + /// in `tensor`. + /// This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the + /// updates are added onto an existing tensor (as opposed to a variable). If the + /// memory for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `tensor.shape`. The last dimension of `indices` can be at most the rank of + /// `tensor.shape`: + /// + /// ``` + /// indices.shape[-1] <= tensor.shape.rank + /// ``` + /// + /// The last dimension of `indices` corresponds to indices into elements + /// (if `indices.shape[-1] = tensor.shape.rank`) or slices + /// (if `indices.shape[-1] < tensor.shape.rank`) along dimension + /// `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape + /// + /// ``` + /// indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + /// ``` + /// + /// The simplest form of `tensor_scatter_nd_add` is to add individual elements to a + /// tensor by index. For example, say we want to add 4 elements in a rank-1 + /// tensor with 8 elements. + /// + /// In Python, this scatter add operation would look like this: + /// + /// >>> indices = tf.constant([[4], [3], [1], [7]]) + /// >>> updates = tf.constant([9, 10, 11, 12]) + /// >>> tensor = tf.ones([8], dtype=tf.int32) + /// >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + /// >>> updated + /// + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + /// In Python, this scatter add operation would look like this: + /// + /// >>> indices = tf.constant([[0], [2]]) + /// >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// ... [7, 7, 7, 7], [8, 8, 8, 8]], + /// ... [[5, 5, 5, 5], [6, 6, 6, 6], + /// ... [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// >>> tensor = tf.ones([4, 4, 4],dtype=tf.int32) + /// >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + /// >>> updated + /// + /// + /// Note: on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_add(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterAdd", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_add_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterAdd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_add_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterAdd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Apply a sparse update to a tensor taking the element-wise maximum. + /// + /// + /// + /// Returns a new tensor copied from `tensor` whose values are element-wise maximum between + /// tensor and updates according to the indices. + /// + /// >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] + /// >>> indices = [[1], [4], [5]] + /// >>> updates = [1, -1, 1] + /// >>> tf.tensor_scatter_nd_max(tensor, indices, updates).numpy() + /// array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32) + /// + /// Refer to `tf.tensor_scatter_nd_update` for more details. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_max(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterMax", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_max_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterMax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_max_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterMax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterMax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_min(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterMin", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_min_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterMin", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_min_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterMin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterMin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Subtracts sparse `updates` from an existing tensor according to `indices`. + /// + /// + /// + /// This operation creates a new tensor by subtracting sparse `updates` from the + /// passed in `tensor`. + /// This operation is very similar to `tf.scatter_nd_sub`, except that the updates + /// are subtracted from an existing tensor (as opposed to a variable). If the memory + /// for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of `indices` corresponds to indices into elements + /// (if `indices.shape[-1] = shape.rank`) or slices + /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + /// `shape`. `updates` is a tensor with shape + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of tensor_scatter_sub is to subtract individual elements + /// from a tensor by index. For example, say we want to insert 4 scattered elements + /// in a rank-1 tensor with 8 elements. + /// + /// In Python, this scatter subtract operation would look like this: + /// + /// ```python + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// tensor = tf.ones([8], dtype=tf.int32) + /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + /// print(updated) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [1, -10, 1, -9, -8, 1, 1, -11] + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + /// In Python, this scatter add operation would look like this: + /// + /// ```python + /// indices = tf.constant([[0], [2]]) + /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// tensor = tf.ones([4, 4, 4],dtype=tf.int32) + /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + /// print(updated) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + /// [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_sub(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterSub", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_sub_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterSub", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterSub", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_sub_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterSub", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterSub", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Scatter `updates` into an existing tensor according to `indices`. + /// + /// + /// + /// This operation creates a new tensor by applying sparse `updates` to the passed + /// in `tensor`. + /// This operation is very similar to `tf.scatter_nd`, except that the updates are + /// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory + /// for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// If `indices` contains duplicates, then we pick the last update for the index. + /// + /// If an out of bound index is found on CPU, an error is returned. + /// + /// **WARNING**: There are some GPU specific semantics for this operation. + /// - If an out of bound index is found, the index is ignored. + /// - The order in which updates are applied is nondeterministic, so the output + /// will be nondeterministic if `indices` contains duplicates. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `shape`. + /// + /// * `indices` must have at least 2 axes: `(num_updates, index_depth)`. + /// * The last axis of `indices` is how deep to index into `tensor` so this index + /// depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim` + /// + /// if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. + /// if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input + /// `tensor`. + /// + /// Each `update` has a rank of `tensor.rank - indices.shape[-1]`. + /// The overall shape of `updates` is: + /// + /// ``` + /// indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + /// ``` + /// + /// For usage examples see the python [tf.tensor_scatter_nd_update]( + /// https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_scatter_update(Tensor tensor, Tensor indices, Tensor updates, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterUpdate", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_scatter_update_eager_fallback(tensor, indices, updates, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["tensor"] = tensor; + keywords["indices"] = indices; + keywords["updates"] = updates; + var _op = tf.OpDefLib._apply_op_helper("TensorScatterUpdate", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("TensorScatterUpdate", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_scatter_update_eager_fallback(Tensor tensor, Tensor indices, Tensor updates, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { tensor, indices, updates }; + object[] _attrs = new object[] { "T", tensor.dtype, "Tindices", indices.dtype }; + var _result = _execute.execute("TensorScatterUpdate", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorScatterUpdate", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Assign `value` to the sliced l-value reference of `input`. + /// + /// + /// + /// The values of `value` are assigned to the positions in the tensor `input` that + /// are selected by the slice parameters. The slice parameters `begin` `end` + /// `strides` etc. work exactly as in `StridedSlice`. + /// + /// NOTE this op currently does not support broadcasting and so `value`'s shape + /// must be exactly the shape produced by the slice of `input`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor tensor_strided_slice_update(Tensor input, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask = 0, int end_mask = 0, int ellipsis_mask = 0, int new_axis_mask = 0, int shrink_axis_mask = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorStridedSliceUpdate", name) { args = new object[] { input, begin, end, strides, value }, attrs = new Dictionary() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tensor_strided_slice_update_eager_fallback(input, begin, end, strides, value, begin_mask: begin_mask, end_mask: end_mask, ellipsis_mask: ellipsis_mask, new_axis_mask: new_axis_mask, shrink_axis_mask: shrink_axis_mask, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["begin"] = begin; + keywords["end"] = end; + keywords["strides"] = strides; + keywords["value"] = value; + keywords["begin_mask"] = begin_mask; + keywords["end_mask"] = end_mask; + keywords["ellipsis_mask"] = ellipsis_mask; + keywords["new_axis_mask"] = new_axis_mask; + keywords["shrink_axis_mask"] = shrink_axis_mask; + var _op = tf.OpDefLib._apply_op_helper("TensorStridedSliceUpdate", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Index", _op._get_attr_type("Index"), "begin_mask", _op._get_attr_int("begin_mask"), "end_mask", _op._get_attr_int("end_mask"), "ellipsis_mask", _op._get_attr_int("ellipsis_mask"), "new_axis_mask", _op._get_attr_int("new_axis_mask"), "shrink_axis_mask", _op._get_attr_int("shrink_axis_mask") }; + _execute.record_gradient("TensorStridedSliceUpdate", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tensor_strided_slice_update_eager_fallback(Tensor input, Tensor begin, Tensor end, Tensor strides, Tensor value, int begin_mask, int end_mask, int ellipsis_mask, int new_axis_mask, int shrink_axis_mask, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, begin, end, strides, value }; + object[] _attrs = new object[] { "T", input.dtype, "Index", begin.dtype, "begin_mask", begin_mask, "end_mask", end_mask, "ellipsis_mask", ellipsis_mask, "new_axis_mask", new_axis_mask, "shrink_axis_mask", shrink_axis_mask }; + var _result = _execute.execute("TensorStridedSliceUpdate", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TensorStridedSliceUpdate", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Constructs a tensor by tiling a given tensor. + /// + /// + /// + /// This operation creates a new tensor by replicating `input` `multiples` times. + /// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + /// and the values of `input` are replicated `multiples[i]` times along the 'i'th + /// dimension. For example, tiling `[a b c d]` by `[2]` produces + /// `[a b c d a b c d]`. + /// + /// >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + /// >>> b = tf.constant([1,2], tf.int32) + /// >>> tf.tile(a, b) + /// + /// >>> c = tf.constant([2,1], tf.int32) + /// >>> tf.tile(a, c) + /// + /// >>> d = tf.constant([2,2], tf.int32) + /// >>> tf.tile(a, d) + /// + /// + /// + /// + /// + /// + public static Tensor tile(Tensor input, Tensor multiples, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tile", name) { args = new object[] { input, multiples }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tile_eager_fallback(input, multiples, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["multiples"] = multiples; + var _op = tf.OpDefLib._apply_op_helper("Tile", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tmultiples", _op._get_attr_type("Tmultiples") }; + _execute.record_gradient("Tile", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tile_eager_fallback(Tensor input, Tensor multiples, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, multiples }; + object[] _attrs = new object[] { "T", input.dtype, "Tmultiples", multiples.dtype }; + var _result = _execute.execute("Tile", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Tile", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the gradient of `Tile`. + /// + /// + /// + /// Since `Tile` takes an input and repeats the input `multiples` times + /// along each dimension, `TileGrad` takes in `multiples` and aggregates + /// each repeated tile of `input` into `output`. + /// + /// + /// + /// + /// + public static Tensor tile_grad(Tensor input, Tensor multiples, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TileGrad", name) { args = new object[] { input, multiples }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tile_grad_eager_fallback(input, multiples, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["multiples"] = multiples; + var _op = tf.OpDefLib._apply_op_helper("TileGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("TileGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tile_grad_eager_fallback(Tensor input, Tensor multiples, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, multiples }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("TileGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TileGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Shuffle dimensions of x according to a permutation. + /// + /// + /// + /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + /// + /// + /// + /// + /// + public static Tensor transpose(Tensor x, Tensor perm, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Transpose", name) { args = new object[] { x, perm }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return transpose_eager_fallback(x, perm, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["perm"] = perm; + var _op = tf.OpDefLib._apply_op_helper("Transpose", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tperm", _op._get_attr_type("Tperm") }; + _execute.record_gradient("Transpose", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor transpose_eager_fallback(Tensor x, Tensor perm, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, perm }; + object[] _attrs = new object[] { "T", x.dtype, "Tperm", perm.dtype }; + var _result = _execute.execute("Transpose", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Transpose", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Finds unique elements in a 1-D tensor. + /// + /// + /// + /// This operation returns a tensor `y` containing all of the unique elements of `x` + /// sorted in the same order that they occur in `x`; `x` does not need to be sorted. + /// This operation also returns a tensor `idx` the same size as `x` that contains + /// the index of each value of `x` in the unique output `y`. In other words: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// Examples: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx = unique(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// ``` + /// + /// ``` + /// # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5] + /// y, idx = unique(x) + /// y ==> [4, 5, 1, 2, 3] + /// idx ==> [0, 1, 2, 3, 4, 4, 0, 1] + /// ``` + /// + /// + /// + /// + /// + public static Tensor[] unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Unique", name) { args = new object[] { x }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unique_eager_fallback(x, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("Unique", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("Unique", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unique_eager_fallback(Tensor x, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "out_idx", out_idx }; + var _result = _execute.execute("Unique", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Unique", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds unique elements along an axis of a tensor. + /// + /// + /// + /// This operation either returns a tensor `y` containing unique elements + /// along the `axis` of a tensor. The returned unique elements is sorted + /// in the same order as they occur along `axis` in `x`. + /// This operation also returns a tensor `idx` that is the same size as + /// the number of the elements in `x` along the `axis` dimension. It + /// contains the index in the unique output `y`. + /// In other words, for an `1-D` tensor `x` with `axis = None: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx = unique(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// ``` + /// + /// For an `2-D` tensor `x` with `axis = 0`: + /// + /// ``` + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx = unique(x, axis=0) + /// y ==> [[1, 0, 0], + /// [2, 0, 0]] + /// idx ==> [0, 0, 1] + /// ``` + /// + /// For an `2-D` tensor `x` with `axis = 1`: + /// + /// ``` + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx = unique(x, axis=1) + /// y ==> [[1, 0], + /// [1, 0], + /// [2, 0]] + /// idx ==> [0, 1, 1] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor[] unique_v2(Tensor x, Tensor axis, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueV2", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unique_v2_eager_fallback(x, axis, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("UniqueV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Taxis", _op._get_attr_type("Taxis"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("UniqueV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unique_v2_eager_fallback(Tensor x, Tensor axis, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "T", x.dtype, "Taxis", axis.dtype, "out_idx", out_idx }; + var _result = _execute.execute("UniqueV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UniqueV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds unique elements in a 1-D tensor. + /// + /// + /// + /// This operation returns a tensor `y` containing all of the unique elements of `x` + /// sorted in the same order that they occur in `x`. This operation also returns a + /// tensor `idx` the same size as `x` that contains the index of each value of `x` + /// in the unique output `y`. Finally, it returns a third tensor `count` that + /// contains the count of each element of `y` in `x`. In other words: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx, count = unique_with_counts(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// count ==> [2, 1, 3, 1, 2] + /// ``` + /// + /// + /// + /// + /// + public static Tensor[] unique_with_counts(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueWithCounts", name) { args = new object[] { x }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unique_with_counts_eager_fallback(x, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("UniqueWithCounts", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("UniqueWithCounts", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unique_with_counts_eager_fallback(Tensor x, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "out_idx", out_idx }; + var _result = _execute.execute("UniqueWithCounts", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UniqueWithCounts", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds unique elements along an axis of a tensor. + /// + /// + /// + /// This operation either returns a tensor `y` containing unique elements + /// along the `axis` of a tensor. The returned unique elements is sorted + /// in the same order as they occur along `axis` in `x`. + /// This operation also returns a tensor `idx` and a tensor `count` + /// that are the same size as the number of the elements in `x` along the + /// `axis` dimension. The `idx` contains the index in the unique output `y` + /// and the `count` contains the count in the unique output `y`. + /// In other words, for an `1-D` tensor `x` with `axis = None: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) + /// y, idx, count = UniqueWithCountsV2(x, axis = [0]) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// count ==> [2, 1, 3, 1, 2] + /// ``` + /// + /// For a `2-D` tensor `x` with `axis = 0`: + /// + /// ``` + /// x = tf.constant([[1, 0, 0], + /// [1, 0, 0], + /// [2, 0, 0]]) + /// y, idx, count = UniqueWithCountsV2(x, axis=[0]) + /// y ==> [[1, 0, 0], + /// [2, 0, 0]] + /// idx ==> [0, 0, 1] + /// count ==> [2, 1] + /// ``` + /// + /// For a `2-D` tensor `x` with `axis = 1`: + /// + /// ``` + /// x = tf.constant([[1, 0, 0], + /// [1, 0, 0], + /// [2, 0, 0]]) + /// y, idx, count = UniqueWithCountsV2(x, axis=[1]) + /// y ==> [[1, 0], + /// [1, 0], + /// [2, 0]] + /// idx ==> [0, 1, 1] + /// count ==> [1, 2] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor[] unique_with_counts_v2(Tensor x, Tensor axis, TF_DataType out_idx = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueWithCountsV2", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["out_idx"] = out_idx } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unique_with_counts_v2_eager_fallback(x, axis, out_idx: out_idx, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["out_idx"] = out_idx; + var _op = tf.OpDefLib._apply_op_helper("UniqueWithCountsV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Taxis", _op._get_attr_type("Taxis"), "out_idx", _op._get_attr_type("out_idx") }; + _execute.record_gradient("UniqueWithCountsV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unique_with_counts_v2_eager_fallback(Tensor x, Tensor axis, TF_DataType out_idx, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "T", x.dtype, "Taxis", axis.dtype, "out_idx", out_idx }; + var _result = _execute.execute("UniqueWithCountsV2", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UniqueWithCountsV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. + /// + /// + /// + /// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. + /// For example, given a tensor of shape `(A, B, C, D)`; + /// + /// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` + /// and each tensor in `output` will have shape `(B, C, D)`. (Note that the + /// dimension unpacked along is gone, unlike `split`). + /// + /// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` + /// and each tensor in `output` will have shape `(A, C, D)`. + /// Etc. + /// + /// This is the opposite of `pack`. + /// + /// + /// + /// + /// + /// + /// Dimension along which to unpack. Negative values wrap around, so the + /// valid range is `[-R, R)`. + /// + /// + /// + public static Tensor[] unpack(Tensor value, int num = 0, int axis = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Unpack", name) { args = new object[] { value }, attrs = new Dictionary() { ["num"] = num, ["axis"] = axis } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return unpack_eager_fallback(value, num: num, axis: axis, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["num"] = num; + keywords["axis"] = axis; + var _op = tf.OpDefLib._apply_op_helper("Unpack", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "num", _op._get_attr_int("num"), "T", _op._get_attr_type("T"), "axis", _op._get_attr_int("axis") }; + _execute.record_gradient("Unpack", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] unpack_eager_fallback(Tensor value, int num, int axis, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value }; + object[] _attrs = new object[] { "num", num, "T", value.dtype, "axis", axis }; + var _result = _execute.execute("Unpack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Unpack", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Converts an array of flat indices into a tuple of coordinate arrays. + /// + /// + /// + /// + /// Example: + /// + /// ``` + /// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) + /// # 'dims' represent a hypothetical (3, 3) tensor of indices: + /// # [[0, 1, *2*], + /// # [3, 4, *5*], + /// # [6, *7*, 8]] + /// # For each entry from 'indices', this operation returns + /// # its coordinates (marked with '*'), such as + /// # 2 ==> (0, 2) + /// # 5 ==> (1, 2) + /// # 7 ==> (2, 1) + /// y ==> [[0, 1, 2], [2, 2, 1]] + /// ``` + /// + /// @compatibility(numpy) + /// Equivalent to np.unravel_index + /// @end_compatibility + /// + /// + /// + /// + /// + public static Tensor unravel_index(Tensor indices, Tensor dims, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnravelIndex", name) { args = new object[] { indices, dims }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unravel_index_eager_fallback(indices, dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["indices"] = indices; + keywords["dims"] = dims; + var _op = tf.OpDefLib._apply_op_helper("UnravelIndex", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("UnravelIndex", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unravel_index_eager_fallback(Tensor indices, Tensor dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { indices, dims }; + object[] _attrs = new object[] { "Tidx", indices.dtype }; + var _result = _execute.execute("UnravelIndex", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnravelIndex", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Applies upper_bound(sorted_search_values, values) along each row. + /// + /// + /// + /// Each set of rows with the same index in (sorted_inputs, values) is treated + /// independently. The resulting row is the equivalent of calling + /// `np.searchsorted(sorted_inputs, values, side='right')`. + /// + /// The result is not a global index to the entire + /// `Tensor`, but rather just the index in the last dimension. + /// + /// A 2-D example: + /// sorted_sequence = [[0, 3, 9, 9, 10], + /// [1, 2, 3, 4, 5]] + /// values = [[2, 4, 9], + /// [0, 2, 6]] + /// + /// result = UpperBound(sorted_sequence, values) + /// + /// result == [[1, 2, 4], + /// [0, 2, 5]] + /// + /// + /// + /// + /// + /// + public static Tensor upper_bound(Tensor sorted_inputs, Tensor values, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UpperBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return upper_bound_eager_fallback(sorted_inputs, values, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["sorted_inputs"] = sorted_inputs; + keywords["values"] = values; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("UpperBound", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("UpperBound", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor upper_bound_eager_fallback(Tensor sorted_inputs, Tensor values, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { sorted_inputs, values }; + object[] _attrs = new object[] { "T", sorted_inputs.dtype, "out_type", out_type }; + var _result = _execute.execute("UpperBound", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UpperBound", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns locations of nonzero / true values in a tensor. + /// + /// + /// + /// This operation returns the coordinates of true elements in `input`. The + /// coordinates are returned in a 2-D tensor where the first dimension (rows) + /// represents the number of true elements, and the second dimension (columns) + /// represents the coordinates of the true elements. Keep in mind, the shape of + /// the output tensor can vary depending on how many true values there are in + /// `input`. Indices are output in row-major order. + /// + /// For example: + /// + /// ``` + /// # 'input' tensor is [[True, False] + /// # [True, False]] + /// # 'input' has two true values, so output has two coordinates. + /// # 'input' has rank of 2, so coordinates have two indices. + /// where(input) ==> [[0, 0], + /// [1, 0]] + /// + /// # `input` tensor is [[[True, False] + /// # [True, False]] + /// # [[False, True] + /// # [False, True]] + /// # [[False, False] + /// # [False, True]]] + /// # 'input' has 5 true values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// # `input` tensor is [[[1.5, 0.0] + /// # [-0.5, 0.0]] + /// # [[0.0, 0.25] + /// # [0.0, 0.75]] + /// # [[0.0, 0.0] + /// # [0.0, 0.01]]] + /// # 'input' has 5 nonzero values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// # `input` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] + /// # [0.0 + 0.5j, 0.0 + 0.0j]] + /// # [[0.0 + 0.0j, 0.25 + 1.5j] + /// # [0.0 + 0.0j, 0.75 + 0.0j]] + /// # [[0.0 + 0.0j, 0.0 + 0.0j] + /// # [0.0 + 0.0j, 0.01 + 0.0j]]] + /// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// ``` + /// + /// + /// + /// + public static Tensor where(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Where", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return where_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Where", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Where", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor where_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Where", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Where", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns a tensor of zeros with the same shape and type as x. + /// + /// + /// + public static Tensor zeros_like(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ZerosLike", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return zeros_like_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("ZerosLike", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ZerosLike", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor zeros_like_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("ZerosLike", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ZerosLike", _inputs_flat, _attrs, _result); + } + return _result[0]; } } diff --git a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs index bb84ac390..5663f9c97 100644 --- a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs @@ -19,7 +19,7 @@ public static Tensor[] partitioned_call(Tensors args, TF_DataType[] tout, EagerD { try { - return tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("PartitionedCall", name, + return tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "PartitionedCall", name, args, tout, f, config, config_proto, executor_type)); } catch (Exception) @@ -50,7 +50,7 @@ public static Tensor[] partitioned_call(Tensors args, TF_DataType[] tout, EagerD var output = tf.OpDefLib._apply_op_helper("PartitionedCall", name, kwargs); var result = output.outputs; - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } @@ -88,7 +88,7 @@ public static Tensor[] symbolic_gradient(Tensor[] input, TF_DataType[] Tout, Nam try { var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - "SymbolicGradient", name, input, Tout, f)); + tf.Context, "SymbolicGradient", name, input, Tout, f)); return _result; } catch (Exception) @@ -107,7 +107,7 @@ public static Tensor[] symbolic_gradient(Tensor[] input, TF_DataType[] Tout, Nam } var op = tf.OpDefLib._apply_op_helper("SymbolicGradient", name, new object[] { input, Tout, f }); var result = op.outputs; - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } @@ -117,8 +117,8 @@ public static Tensor[] symbolic_gradient(Tensor[] input, TF_DataType[] Tout, Nam public static Tensor[] symbolic_gradient_eager_fallback(Tensor[] input, TF_DataType[] Tout, NameAttrList f, string name, Context ctx) { object[] attrs = new object[] { "Tin", input, "Tout", Tout, "f", f }; - var result = execute.executes("SymbolicGradient", Tout.Length, input, attrs, ctx, name); - if (execute.must_record_gradient()) + var result = _execute.execute("SymbolicGradient", Tout.Length, input, attrs, ctx, name); + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } diff --git a/src/TensorFlowNET.Core/Operations/gen_io_ops.cs b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs new file mode 100644 index 000000000..490cb1880 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs @@ -0,0 +1,1378 @@ +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ + +using Tensorflow.Eager; +using Tensorflow.Contexts; +using static Tensorflow.Binding; + +namespace Tensorflow; + +internal static class gen_io_ops +{ + public static Tensor fixed_length_record_reader(int header_bytes = 0, int record_bytes = 0, int footer_bytes = 0, int hop_bytes = 0, string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReader", name, "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fixed_length_record_reader_eager_fallback(header_bytes: header_bytes, record_bytes: record_bytes, footer_bytes: footer_bytes, hop_bytes: hop_bytes, container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["header_bytes"] = header_bytes; keywords["record_bytes"] = record_bytes; keywords["footer_bytes"] = footer_bytes; keywords["hop_bytes"] = hop_bytes; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReader", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "header_bytes", _op._get_attr_int("header_bytes"), "record_bytes", _op._get_attr_int("record_bytes"), "footer_bytes", _op._get_attr_int("footer_bytes"), "hop_bytes", _op._get_attr_int("hop_bytes"), "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("FixedLengthRecordReader", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fixed_length_record_reader_eager_fallback(int header_bytes, int record_bytes, int footer_bytes, int hop_bytes, string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name }; + var _result = _execute.execute("FixedLengthRecordReader", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FixedLengthRecordReader", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor fixed_length_record_reader_v2(int header_bytes = 0, int record_bytes = 0, int footer_bytes = 0, int hop_bytes = 0, string container = "", string shared_name = "", string encoding = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReaderV2", name, "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name, "encoding", encoding)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fixed_length_record_reader_v2_eager_fallback(header_bytes: header_bytes, record_bytes: record_bytes, footer_bytes: footer_bytes, hop_bytes: hop_bytes, container: container, shared_name: shared_name, encoding: encoding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["header_bytes"] = header_bytes; keywords["record_bytes"] = record_bytes; keywords["footer_bytes"] = footer_bytes; keywords["hop_bytes"] = hop_bytes; keywords["container"] = container; keywords["shared_name"] = shared_name; keywords["encoding"] = encoding; var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReaderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "header_bytes", _op._get_attr_int("header_bytes"), "record_bytes", _op._get_attr_int("record_bytes"), "footer_bytes", _op._get_attr_int("footer_bytes"), "hop_bytes", _op._get_attr_int("hop_bytes"), "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name"), "encoding", _op.get_attr("encoding") }; + _execute.record_gradient("FixedLengthRecordReaderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fixed_length_record_reader_v2_eager_fallback(int header_bytes, int record_bytes, int footer_bytes, int hop_bytes, string container, string shared_name, string encoding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name, "encoding", encoding }; + var _result = _execute.execute("FixedLengthRecordReaderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FixedLengthRecordReaderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor identity_reader(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReader", name, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return identity_reader_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("IdentityReader", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("IdentityReader", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor identity_reader_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("IdentityReader", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IdentityReader", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor identity_reader_v2(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReaderV2", name, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return identity_reader_v2_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("IdentityReaderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("IdentityReaderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor identity_reader_v2_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("IdentityReaderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IdentityReaderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor matching_files(Tensor pattern, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatchingFiles", name, pattern)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return matching_files_eager_fallback(pattern, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["pattern"] = pattern; + var _op = tf.OpDefLib._apply_op_helper("MatchingFiles", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("MatchingFiles", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor matching_files_eager_fallback(Tensor pattern, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { pattern }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("MatchingFiles", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatchingFiles", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Operation merge_v2_checkpoints(Tensor checkpoint_prefixes, Tensor destination_prefix, bool delete_old_dirs = true, bool allow_missing_files = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MergeV2Checkpoints", name, checkpoint_prefixes, destination_prefix, "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files)); + return null; + } + catch (Exception) + { + } + try + { + return merge_v2_checkpoints_eager_fallback(checkpoint_prefixes, destination_prefix, delete_old_dirs: delete_old_dirs, allow_missing_files: allow_missing_files, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["checkpoint_prefixes"] = checkpoint_prefixes; + keywords["destination_prefix"] = destination_prefix; + keywords["delete_old_dirs"] = delete_old_dirs; keywords["allow_missing_files"] = allow_missing_files; var _op = tf.OpDefLib._apply_op_helper("MergeV2Checkpoints", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "delete_old_dirs", _op._get_attr_bool("delete_old_dirs"), "allow_missing_files", _op._get_attr_bool("allow_missing_files") }; + _execute.record_gradient("MergeV2Checkpoints", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor merge_v2_checkpoints_eager_fallback(Tensor checkpoint_prefixes, Tensor destination_prefix, bool delete_old_dirs, bool allow_missing_files, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { checkpoint_prefixes, destination_prefix }; + object[] _attrs = new object[] { "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files }; + var _result = _execute.execute("MergeV2Checkpoints", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MergeV2Checkpoints", _inputs_flat, _attrs, _result); + } + return null; + } + public static Tensor read_file(Tensor filename, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReadFile", name, filename)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return read_file_eager_fallback(filename, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["filename"] = filename; + var _op = tf.OpDefLib._apply_op_helper("ReadFile", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReadFile", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor read_file_eager_fallback(Tensor filename, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { filename }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReadFile", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReadFile", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor reader_num_records_produced(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_num_records_produced op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderNumRecordsProduced", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderNumRecordsProduced", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_num_records_produced_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_num_records_produced op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor reader_num_records_produced_v2(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumRecordsProducedV2", name, reader_handle)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reader_num_records_produced_v2_eager_fallback(reader_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderNumRecordsProducedV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderNumRecordsProducedV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_num_records_produced_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderNumRecordsProducedV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderNumRecordsProducedV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor reader_num_work_units_completed(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_num_work_units_completed op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderNumWorkUnitsCompleted", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderNumWorkUnitsCompleted", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_num_work_units_completed_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_num_work_units_completed op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor reader_num_work_units_completed_v2(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumWorkUnitsCompletedV2", name, reader_handle)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reader_num_work_units_completed_v2_eager_fallback(reader_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderNumWorkUnitsCompletedV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderNumWorkUnitsCompletedV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_num_work_units_completed_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderNumWorkUnitsCompletedV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderNumWorkUnitsCompletedV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor[] reader_read(Tensor reader_handle, Tensor queue_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_read op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["queue_handle"] = queue_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderRead", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderRead", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] reader_read_eager_fallback(Tensor reader_handle, Tensor queue_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_read op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor[] reader_read_up_to(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_read_up_to op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["queue_handle"] = queue_handle; + keywords["num_records"] = num_records; + var _op = tf.OpDefLib._apply_op_helper("ReaderReadUpTo", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderReadUpTo", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] reader_read_up_to_eager_fallback(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string name, Context ctx) + { + throw new RuntimeError($"reader_read_up_to op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor[] reader_read_up_to_v2(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadUpToV2", name, reader_handle, queue_handle, num_records)); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return reader_read_up_to_v2_eager_fallback(reader_handle, queue_handle, num_records, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["queue_handle"] = queue_handle; + keywords["num_records"] = num_records; + var _op = tf.OpDefLib._apply_op_helper("ReaderReadUpToV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderReadUpToV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] reader_read_up_to_v2_eager_fallback(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle, queue_handle, num_records }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderReadUpToV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderReadUpToV2", _inputs_flat, _attrs, _result); + } + return _result; + } + public static Tensor[] reader_read_v2(Tensor reader_handle, Tensor queue_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadV2", name, reader_handle, queue_handle)); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return reader_read_v2_eager_fallback(reader_handle, queue_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["queue_handle"] = queue_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderReadV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderReadV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] reader_read_v2_eager_fallback(Tensor reader_handle, Tensor queue_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle, queue_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderReadV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderReadV2", _inputs_flat, _attrs, _result); + } + return _result; + } + public static Operation reader_reset(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_reset op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderReset", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderReset", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor reader_reset_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_reset op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Operation reader_reset_v2(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderResetV2", name, reader_handle)); + return null; + } + catch (Exception) + { + } + try + { + return reader_reset_v2_eager_fallback(reader_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderResetV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderResetV2", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor reader_reset_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderResetV2", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderResetV2", _inputs_flat, _attrs, _result); + } + return null; + } + public static Operation reader_restore_state(Tensor reader_handle, Tensor state, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_restore_state op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["state"] = state; + var _op = tf.OpDefLib._apply_op_helper("ReaderRestoreState", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderRestoreState", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor reader_restore_state_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx) + { + throw new RuntimeError($"reader_restore_state op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Operation reader_restore_state_v2(Tensor reader_handle, Tensor state, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderRestoreStateV2", name, reader_handle, state)); + return null; + } + catch (Exception) + { + } + try + { + return reader_restore_state_v2_eager_fallback(reader_handle, state, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + keywords["state"] = state; + var _op = tf.OpDefLib._apply_op_helper("ReaderRestoreStateV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderRestoreStateV2", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor reader_restore_state_v2_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle, state }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderRestoreStateV2", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderRestoreStateV2", _inputs_flat, _attrs, _result); + } + return null; + } + public static Tensor reader_serialize_state(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + throw new RuntimeError("reader_serialize_state op does not support eager execution. Arg reader_handle is a ref."); + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderSerializeState", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderSerializeState", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_serialize_state_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + throw new RuntimeError($"reader_serialize_state op does not support eager execution. Arg 'reader_handle' is a ref."); + } + public static Tensor reader_serialize_state_v2(Tensor reader_handle, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderSerializeStateV2", name, reader_handle)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reader_serialize_state_v2_eager_fallback(reader_handle, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["reader_handle"] = reader_handle; + var _op = tf.OpDefLib._apply_op_helper("ReaderSerializeStateV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ReaderSerializeStateV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reader_serialize_state_v2_eager_fallback(Tensor reader_handle, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { reader_handle }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ReaderSerializeStateV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReaderSerializeStateV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor restore(Tensor file_pattern, Tensor tensor_name, TF_DataType dt, int preferred_shard = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Restore", name, file_pattern, tensor_name, "dt", dt, "preferred_shard", preferred_shard)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return restore_eager_fallback(file_pattern, tensor_name, dt: dt, preferred_shard: preferred_shard, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["file_pattern"] = file_pattern; + keywords["tensor_name"] = tensor_name; + keywords["dt"] = dt; keywords["preferred_shard"] = preferred_shard; var _op = tf.OpDefLib._apply_op_helper("Restore", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dt", _op._get_attr_type("dt"), "preferred_shard", _op._get_attr_int("preferred_shard") }; + _execute.record_gradient("Restore", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor restore_eager_fallback(Tensor file_pattern, Tensor tensor_name, TF_DataType dt, int preferred_shard, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { file_pattern, tensor_name }; + object[] _attrs = new object[] { "dt", dt, "preferred_shard", preferred_shard }; + var _result = _execute.execute("Restore", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Restore", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor restore_slice(Tensor file_pattern, Tensor tensor_name, Tensor shape_and_slice, TF_DataType dt, int preferred_shard = -1, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreSlice", name, file_pattern, tensor_name, shape_and_slice, "dt", dt, "preferred_shard", preferred_shard)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return restore_slice_eager_fallback(file_pattern, tensor_name, shape_and_slice, dt: dt, preferred_shard: preferred_shard, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["file_pattern"] = file_pattern; + keywords["tensor_name"] = tensor_name; + keywords["shape_and_slice"] = shape_and_slice; + keywords["dt"] = dt; keywords["preferred_shard"] = preferred_shard; var _op = tf.OpDefLib._apply_op_helper("RestoreSlice", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dt", _op._get_attr_type("dt"), "preferred_shard", _op._get_attr_int("preferred_shard") }; + _execute.record_gradient("RestoreSlice", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor restore_slice_eager_fallback(Tensor file_pattern, Tensor tensor_name, Tensor shape_and_slice, TF_DataType dt, int preferred_shard, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { file_pattern, tensor_name, shape_and_slice }; + object[] _attrs = new object[] { "dt", dt, "preferred_shard", preferred_shard }; + var _result = _execute.execute("RestoreSlice", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RestoreSlice", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor restore_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreV2", name, prefix, tensor_names, shape_and_slices, "dtypes", dtypes)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return restore_v2_eager_fallback(prefix, tensor_names, shape_and_slices, dtypes: dtypes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["prefix"] = prefix; + keywords["tensor_names"] = tensor_names; + keywords["shape_and_slices"] = shape_and_slices; + keywords["dtypes"] = dtypes; var _op = tf.OpDefLib._apply_op_helper("RestoreV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtypes", _op.get_attr("dtypes") }; + _execute.record_gradient("RestoreV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor restore_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { prefix, tensor_names, shape_and_slices }; + object[] _attrs = new object[] { "dtypes", dtypes }; + var _result = _execute.execute("RestoreV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RestoreV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Operation save(Tensor filename, Tensor tensor_names, Tensor data, TF_DataType[] T, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Save", name, filename, tensor_names, data, "T", T)); + return null; + } + catch (Exception) + { + } + try + { + return save_eager_fallback(filename, tensor_names, data, T: T, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["filename"] = filename; + keywords["tensor_names"] = tensor_names; + keywords["data"] = data; + keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("Save", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T") }; + _execute.record_gradient("Save", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor save_eager_fallback(Tensor filename, Tensor tensor_names, Tensor data, TF_DataType[] T, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { filename, tensor_names, data }; + object[] _attrs = new object[] { "T", T }; + var _result = _execute.execute("Save", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Save", _inputs_flat, _attrs, _result); + } + return null; + } + public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, TF_DataType[] T, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveSlices", name, filename, tensor_names, shapes_and_slices, data, "T", T)); + return null; + } + catch (Exception) + { + } + try + { + return save_slices_eager_fallback(filename, tensor_names, shapes_and_slices, data, T: T, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["filename"] = filename; + keywords["tensor_names"] = tensor_names; + keywords["shapes_and_slices"] = shapes_and_slices; + keywords["data"] = data; + keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("SaveSlices", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op.get_attr("T") }; + _execute.record_gradient("SaveSlices", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor save_slices_eager_fallback(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, TF_DataType[] T, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { filename, tensor_names, shapes_and_slices, data }; + object[] _attrs = new object[] { "T", T }; + var _result = _execute.execute("SaveSlices", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SaveSlices", _inputs_flat, _attrs, _result); + } + return null; + } + public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, TF_DataType[] dtypes, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveV2", name, prefix, tensor_names, shape_and_slices, tensors, "dtypes", dtypes)); + return null; + } + catch (Exception) + { + } + try + { + return save_v2_eager_fallback(prefix, tensor_names, shape_and_slices, tensors, dtypes: dtypes, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["prefix"] = prefix; + keywords["tensor_names"] = tensor_names; + keywords["shape_and_slices"] = shape_and_slices; + keywords["tensors"] = tensors; + keywords["dtypes"] = dtypes; var _op = tf.OpDefLib._apply_op_helper("SaveV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtypes", _op.get_attr("dtypes") }; + _execute.record_gradient("SaveV2", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor save_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, TF_DataType[] dtypes, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { prefix, tensor_names, shape_and_slices, tensors }; + object[] _attrs = new object[] { "dtypes", dtypes }; + var _result = _execute.execute("SaveV2", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SaveV2", _inputs_flat, _attrs, _result); + } + return null; + } + public static Tensor sharded_filename(Tensor basename, Tensor shard, Tensor num_shards, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilename", name, basename, shard, num_shards)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sharded_filename_eager_fallback(basename, shard, num_shards, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["basename"] = basename; + keywords["shard"] = shard; + keywords["num_shards"] = num_shards; + var _op = tf.OpDefLib._apply_op_helper("ShardedFilename", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ShardedFilename", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sharded_filename_eager_fallback(Tensor basename, Tensor shard, Tensor num_shards, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { basename, shard, num_shards }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ShardedFilename", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ShardedFilename", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor sharded_filespec(Tensor basename, Tensor num_shards, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilespec", name, basename, num_shards)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sharded_filespec_eager_fallback(basename, num_shards, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["basename"] = basename; + keywords["num_shards"] = num_shards; + var _op = tf.OpDefLib._apply_op_helper("ShardedFilespec", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("ShardedFilespec", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sharded_filespec_eager_fallback(Tensor basename, Tensor num_shards, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { basename, num_shards }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("ShardedFilespec", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ShardedFilespec", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor text_line_reader(int skip_header_lines = 0, string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReader", name, "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return text_line_reader_eager_fallback(skip_header_lines: skip_header_lines, container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["skip_header_lines"] = skip_header_lines; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("TextLineReader", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "skip_header_lines", _op._get_attr_int("skip_header_lines"), "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("TextLineReader", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor text_line_reader_eager_fallback(int skip_header_lines, string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name }; + var _result = _execute.execute("TextLineReader", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TextLineReader", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor text_line_reader_v2(int skip_header_lines = 0, string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReaderV2", name, "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return text_line_reader_v2_eager_fallback(skip_header_lines: skip_header_lines, container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["skip_header_lines"] = skip_header_lines; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("TextLineReaderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "skip_header_lines", _op._get_attr_int("skip_header_lines"), "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("TextLineReaderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor text_line_reader_v2_eager_fallback(int skip_header_lines, string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name }; + var _result = _execute.execute("TextLineReaderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TextLineReaderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor whole_file_reader(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReader", name, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return whole_file_reader_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("WholeFileReader", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("WholeFileReader", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor whole_file_reader_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("WholeFileReader", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("WholeFileReader", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Tensor whole_file_reader_v2(string container = "", string shared_name = "", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReaderV2", name, "container", container, "shared_name", shared_name)); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return whole_file_reader_v2_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("WholeFileReaderV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") }; + _execute.record_gradient("WholeFileReaderV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor whole_file_reader_v2_eager_fallback(string container, string shared_name, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { }; + object[] _attrs = new object[] { "container", container, "shared_name", shared_name }; + var _result = _execute.execute("WholeFileReaderV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("WholeFileReaderV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + public static Operation write_file(Tensor filename, Tensor contents, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WriteFile", name, filename, contents)); + return null; + } + catch (Exception) + { + } + try + { + return write_file_eager_fallback(filename, contents, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["filename"] = filename; + keywords["contents"] = contents; + var _op = tf.OpDefLib._apply_op_helper("WriteFile", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("WriteFile", _op.inputs, _attrs, _result); + } + return _op; + } + + public static Tensor write_file_eager_fallback(Tensor filename, Tensor contents, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { filename, contents }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("WriteFile", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("WriteFile", _inputs_flat, _attrs, _result); + } + return null; + } +} diff --git a/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs b/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs index 03159aaa1..d2907f090 100644 --- a/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_logging_ops.cs @@ -26,7 +26,7 @@ public static Operation assert(Tensor condition, object[] data, long summarize = if (tf.Context.executing_eagerly()) { var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - "Assert", name, + tf.Context, "Assert", name, new object[] { condition, data, summarize })); return results[0]; diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs index 564abbd0f..3456d9b3d 100644 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs @@ -1,569 +1,9487 @@ -/***************************************************************************** - Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -******************************************************************************/ - -using System; -using System.Collections; -using System.Collections.Generic; -using System.Linq; +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ + +using Tensorflow.Eager; using Tensorflow.Contexts; using static Tensorflow.Binding; -namespace Tensorflow +namespace Tensorflow; + +public static class gen_math_ops { - public static partial class gen_math_ops - { - public static Tensor _all(Tensor input, Tensor axis, bool keep_dims = false, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("All", name, args: new { input, reduction_indices = axis, keep_dims = keep_dims }); - - return _op.outputs[0]; - } - - /// - /// Add all input tensors element wise. - /// - /// - /// - /// - public static Tensor add_n(Tensor[] inputs, string name = null) - => tf.Context.ExecuteOp("AddN", name, new ExecuteOpArgs() - { - OpInputArgs = new object[] { inputs } - }); - - /// - /// Returns the index with the largest value across dimensions of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor arg_max(Tensor input, Axis dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => tf.Context.ExecuteOp("ArgMax", name, new ExecuteOpArgs(input, dimension) - .SetAttributes(new { output_type })); - - - /// - /// Returns the index with the smallest value across dimensions of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null) - => tf.Context.ExecuteOp("ArgMin", name, new ExecuteOpArgs(input, dimension) - .SetAttributes(new { output_type })); - - /// - /// Computes Psi, the derivative of Lgamma (the log of the absolute value of - /// `Gamma(x)`), element-wise. - /// - /// - /// - /// - public static Tensor digamma(Tensor x, string name = null) - => tf.OpDefLib._apply_op_helper("Digamma", name, args: new { x }).output; - - /// - /// Returns 0 if the denominator is zero. - /// - /// - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'DivNoNan'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// - /// *NOTE*: DivNoNan supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("DivNoNan", name, new ExecuteOpArgs(x, y)); - - public static Tensor mean(Tensor input, int axis, bool keep_dims = false, string name = null) - => mean(input, ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name); - - /// - /// Computes the mean of elements across dimensions of a tensor. - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. - /// - /// A `Tensor`. Must be one of the following types: - /// `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. - /// The tensor to reduce. - /// A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. - /// An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `input`. - public static Tensor mean(Tensor input, Tensor axis, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Mean", name, new ExecuteOpArgs(input, axis) - { - GetGradientAttrs = (op) => new - { - T = op.get_attr("T"), - Tidx = op.get_attr("Tidx"), - keep_dims = op.get_attr("keep_dims") - } - }.SetAttributes(new { keep_dims, reduction_indices = axis })); - - public static Tensor mean(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null) - { - if (tf.Context.executing_eagerly()) - { - return mean_eager_fallback(inputs, axis, keep_dims: keep_dims, name: name, ctx: tf.Context); - } - - var _op = tf.OpDefLib._apply_op_helper("Mean", name, args: new { inputs, reduction_indices = axis, keep_dims = keep_dims }); - - return _op.output; - } - - private static Tensor mean_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null) - { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs }); - var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { axis }); - var _inputs_flat = input.concat(axis1); - var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; - - return tf.Runner.Execute(ctx, "Mean", 1, _inputs_flat, _attrs, name: name)[0]; - } - - public static Tensor prod(T1 input, T2 axis, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Prod", name, - new ExecuteOpArgs(input, axis).SetAttributes(new { keep_dims, reduction_indices = axis })); - - private static Tensor prod_eager_fallback(Tensor input_t, int[] axis, bool keep_dims, string name, Context ctx = null) - { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { input_t }); - var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, default_dtype: tf.int32, args: new[] { axis }); - var _inputs_flat = input.concat(axis1); - var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; - - return tf.Runner.Execute(ctx, "Prod", 1, _inputs_flat, _attrs, name: name)[0]; - } - - public static Tensor acos(Tensor x, string name = null) - => tf.Context.ExecuteOp("Acos", name, new ExecuteOpArgs(x)); - - public static Tensor asin(Tensor x, string name = null) - => tf.Context.ExecuteOp("Asin", name, new ExecuteOpArgs(x)); - - public static Tensor add(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("Add", name, new ExecuteOpArgs(x, y)); - - public static Tensor add(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Add", name, new ExecuteOpArgs(x, y)); - - public static Tensor add_v2(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("AddV2", name, new ExecuteOpArgs(x, y)); - - public static Tensor atan(Tensor x, string name = null) - => tf.Context.ExecuteOp("Atan", name, new ExecuteOpArgs(x)); - - public static Tensor ceil(Tensor x, string name = null) - => tf.Context.ExecuteOp("Ceil", name, new ExecuteOpArgs(x)); - - public static Tensor sin(Tensor x, string name = null) - => tf.Context.ExecuteOp("Sin", name, new ExecuteOpArgs(x)); - - /// - /// Computes sigmoid of x element-wise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sigmoid'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Specifically, y = 1 / (1 + exp(-x)). - /// - public static Tensor sigmoid(Tensor x, string name = "Sigmoid") - => tf.Context.ExecuteOp("Sigmoid", name, new ExecuteOpArgs(x)); + /// + /// Computes the absolute value of a tensor. + /// + /// + /// + /// Given a tensor `x`, this operation returns a tensor containing the absolute + /// value of each element in `x`. For example, if x is an input element and y is + /// an output element, this operation computes \(y = |x|\). + /// + /// + /// + /// + public static Tensor abs(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Abs", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return abs_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Abs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Abs", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor abs_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Abs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Abs", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the element-wise sum of a list of tensors. + /// + /// + /// + /// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not + /// wait for all of its inputs to be ready before beginning to sum. This can + /// save memory if inputs are ready at different times, since minimum temporary + /// storage is proportional to the output size rather than the inputs size. + /// + /// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. + /// + /// Returns a `Tensor` of same shape and type as the elements of `inputs`. + /// + /// + /// + /// + /// + /// Shape of elements of `inputs`. + /// + /// + /// + public static Tensor accumulate_nv2(Tensors inputs, Shape shape, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AccumulateNV2", name) { args = new object[] { inputs }, attrs = new Dictionary() { ["shape"] = shape } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return accumulate_nv2_eager_fallback(inputs, shape: shape, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + keywords["shape"] = shape; + var _op = tf.OpDefLib._apply_op_helper("AccumulateNV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "shape", _op.get_attr("shape") }; + _execute.record_gradient("AccumulateNV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor accumulate_nv2_eager_fallback(Tensors inputs, Shape shape, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(inputs); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", inputs.Length, "T", inputs.dtype, "shape", shape }; + var _result = _execute.execute("AccumulateNV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AccumulateNV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes acos of x element-wise. + /// + /// + /// + /// + /// Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. + /// + /// Input range is `[-1, 1]` and the output has a range of `[0, pi]`. + /// + /// + /// + /// + /// + public static Tensor acos(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Acos", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return acos_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Acos", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Acos", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor acos_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Acos", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Acos", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes inverse hyperbolic cosine of x element-wise. + /// + /// + /// + /// Given an input tensor, the function computes inverse hyperbolic cosine of every element. + /// Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. + /// + /// ```python + /// x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] + /// ``` + /// + /// + /// + /// + public static Tensor acosh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Acosh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return acosh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Acosh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Acosh", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Computes the gradient of the sigmoid of x wrt its input. - /// - /// - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'SigmoidGrad'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Specifically, grad = dy * y * (1 - y), where y = sigmoid(x), and - /// dy is the corresponding input gradient. - /// - public static Tensor sigmoid_grad(Tensor y, Tensor dy, string name = "SigmoidGrad") - => tf.Context.ExecuteOp("SigmoidGrad", name, new ExecuteOpArgs(y, dy)); + public static Tensor acosh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Acosh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Acosh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x + y element-wise. + /// + /// + /// + /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor. + /// + /// Both input and output have a range `(-inf, inf)`. + /// + /// + /// + /// + /// + /// + public static Tensor add(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Add", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return add_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Add", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Add", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor sign(T x, string name = "Sign") - => tf.Context.ExecuteOp("Sign", name, new ExecuteOpArgs(x)); + public static Tensor add_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Add", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Add", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Add all input tensors element wise. + /// + /// + /// + /// Inputs must be of same size and shape. + /// + /// ```python + /// x = [9, 7, 10] + /// tf.math.add_n(x) ==> 26 + /// ``` + /// + /// + /// + /// + public static Tensor add_n(Tensors inputs, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AddN", name) { args = new object[] { inputs }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return add_n_eager_fallback(inputs, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["inputs"] = inputs; + var _op = tf.OpDefLib._apply_op_helper("AddN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "N", _op._get_attr_int("N"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AddN", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor sinh(Tensor x, string name = null) - => tf.Context.ExecuteOp("Sinh", name, new ExecuteOpArgs(x)); + public static Tensor add_n_eager_fallback(Tensors inputs, string name, Context ctx) + { + List _inputs_flat_list = new(); + _inputs_flat_list.AddRange(inputs); + var _inputs_flat = _inputs_flat_list.ToArray(); + object[] _attrs = new object[] { "N", inputs.Length, "T", inputs.dtype }; + var _result = _execute.execute("AddN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AddN", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x + y element-wise. + /// + /// + /// + /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor add_v2(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AddV2", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return add_v2_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("AddV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("AddV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor cos(T x, string name = null) - => tf.Context.ExecuteOp("Cos", name, new ExecuteOpArgs(x)); + public static Tensor add_v2_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("AddV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AddV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the "logical and" of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor all(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "All", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return all_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("All", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("All", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor cosh(Tensor x, string name = null) - => tf.Context.ExecuteOp("Cosh", name, new ExecuteOpArgs(x)); + public static Tensor all_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("All", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("All", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the argument of a complex number. + /// + /// + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the argument of each element in `input`. All elements in + /// `input` must be complex numbers of the form \(a + bj\), where *a* + /// is the real part and *b* is the imaginary part. + /// + /// The argument returned by this operation is of the form \(atan2(b, a)\). + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.angle(input) ==> [2.0132, 1.056] + /// ``` + /// + /// @compatibility(numpy) + /// Equivalent to np.angle. + /// @end_compatibility + /// + /// + /// + /// + /// + public static Tensor angle(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Angle", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return angle_eager_fallback(input, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("Angle", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("Angle", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Computes the sum along segments of a tensor. - /// - /// - /// - /// - /// - /// - public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string name = null) + public static Tensor angle_eager_fallback(Tensor input, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "Tout", Tout }; + var _result = _execute.execute("Angle", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Angle", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the "logical or" of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor any(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Any", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return any_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Any", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) { - var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentSum", name, new { data, segment_ids, num_segments }); - return _op.outputs[0]; + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Any", _op.inputs, _attrs, _result); } + return _result[0]; + } - public static Tensor tan(Tensor x, string name = null) - => tf.Context.ExecuteOp("Tan", name, new ExecuteOpArgs(x)); - - public static Tensor tanh(Tensor x, string name = null) - => tf.Context.ExecuteOp("Tanh", name, new ExecuteOpArgs(x)); - - /// - /// Computes the gradient for the tanh of `x` wrt its input. - /// - /// - /// - /// - /// - public static Tensor tanh_grad(Tensor y, Tensor dy, string name = null) - => tf.Context.ExecuteOp("TanhGrad", name, new ExecuteOpArgs(y, dy)); - - public static Tensor floor(Tensor x, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("Floor", name, args: new { x }); - - return _op.outputs[0]; - } - - public static Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("ClipByValue", name, args: new { t, clip_value_min, clip_value_max }); - - return _op.outputs[0]; - } - - public static Tensor greater(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Greater", name, new ExecuteOpArgs(x, y)); - - /// - /// Computes the log of the absolute value of `Gamma(x)` element-wise. - /// - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. - /// - /// - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - public static Tensor lgamma(Tensor x, string name = null) - => tf.Context.ExecuteOp("Lgamma", name, new ExecuteOpArgs(x)); - - - public static Tensor greater_equal(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("GreaterEqual", name, new ExecuteOpArgs(x, y)); - - public static Tensor less(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Less", name, new ExecuteOpArgs(x, y)); - - public static Tensor less_equal(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("LessEqual", name, new ExecuteOpArgs(x, y)); - - public static Tensor log1p(Tensor x, string name = null) - => tf.Context.ExecuteOp("Log1p", name, new ExecuteOpArgs(x)); - - public static Tensor logical_and(T x, T y, string name = null) - => tf.Context.ExecuteOp("LogicalAnd", name, new ExecuteOpArgs(x, y)); - - public static Tensor logical_not(Tensor x, string name = null) - => tf.Context.ExecuteOp("LogicalNot", name, new ExecuteOpArgs(x)); - - public static Tensor logical_or(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("LogicalOr", name, new ExecuteOpArgs(x, y)); - - public static Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor") - { - return logical_and( - logical_or(x, y), - logical_not(logical_and(x, y)), - name); - } + public static Tensor any_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Any", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Any", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of abs(x-y) < tolerance element-wise. + /// + /// + /// + /// + /// + public static Tensor approximate_equal(Tensor x, Tensor y, float tolerance = 1E-05f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ApproximateEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["tolerance"] = tolerance } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return approximate_equal_eager_fallback(x, y, tolerance: tolerance, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["tolerance"] = tolerance; + var _op = tf.OpDefLib._apply_op_helper("ApproximateEqual", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "tolerance", _op.get_attr("tolerance") }; + _execute.record_gradient("ApproximateEqual", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor squared_difference(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("SquaredDifference", name, new ExecuteOpArgs(x, y)); + public static Tensor approximate_equal_eager_fallback(Tensor x, Tensor y, float tolerance, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "tolerance", tolerance }; + var _result = _execute.execute("ApproximateEqual", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ApproximateEqual", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the index with the largest value across dimensions of a tensor. + /// + /// + /// + /// Note that in case of ties the identity of the return value is not guaranteed. + /// + /// Usage: + /// ```python + /// import tensorflow as tf + /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] + /// b = tf.math.argmax(input = a) + /// c = tf.keras.backend.eval(b) + /// # c = 4 + /// # here a[4] = 166.32 which is the largest element of a across axis 0 + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor arg_max(Tensor input, Tensor dimension, TF_DataType output_type = TF_DataType.TF_INT64, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ArgMax", name) { args = new object[] { input, dimension }, attrs = new Dictionary() { ["output_type"] = output_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return arg_max_eager_fallback(input, dimension, output_type: output_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["dimension"] = dimension; + keywords["output_type"] = output_type; + var _op = tf.OpDefLib._apply_op_helper("ArgMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "output_type", _op._get_attr_type("output_type") }; + _execute.record_gradient("ArgMax", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Computes square of x element-wise. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor square(Tensor x, string name = null) - => tf.Context.ExecuteOp("Square", name, new ExecuteOpArgs(x)); + public static Tensor arg_max_eager_fallback(Tensor input, Tensor dimension, TF_DataType output_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, dimension }; + object[] _attrs = new object[] { "T", input.dtype, "Tidx", dimension.dtype, "output_type", output_type }; + var _result = _execute.execute("ArgMax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ArgMax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the index with the smallest value across dimensions of a tensor. + /// + /// + /// + /// Note that in case of ties the identity of the return value is not guaranteed. + /// + /// Usage: + /// ```python + /// import tensorflow as tf + /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] + /// b = tf.math.argmin(input = a) + /// c = tf.keras.backend.eval(b) + /// # c = 0 + /// # here a[0] = 1 which is the smallest element of a across axis 0 + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor arg_min(Tensor input, Tensor dimension, TF_DataType output_type = TF_DataType.TF_INT64, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ArgMin", name) { args = new object[] { input, dimension }, attrs = new Dictionary() { ["output_type"] = output_type } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return arg_min_eager_fallback(input, dimension, output_type: output_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["dimension"] = dimension; + keywords["output_type"] = output_type; + var _op = tf.OpDefLib._apply_op_helper("ArgMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "output_type", _op._get_attr_type("output_type") }; + _execute.record_gradient("ArgMin", _op.inputs, _attrs, _result); + } + return _result[0]; + } - /// - /// Returns which elements of x are finite. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. - /// A name for the operation (optional). - /// A `Tensor` of type `bool`. - public static Tensor is_finite(Tensor x, string name = null) - => tf.Context.ExecuteOp("IsFinite", name, new ExecuteOpArgs(x)); + public static Tensor arg_min_eager_fallback(Tensor input, Tensor dimension, TF_DataType output_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, dimension }; + object[] _attrs = new object[] { "T", input.dtype, "Tidx", dimension.dtype, "output_type", output_type }; + var _result = _execute.execute("ArgMin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ArgMin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the trignometric inverse sine of x element-wise. + /// + /// + /// + /// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that + /// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. + /// + /// **Note**: The output of `tf.math.asin` will lie within the invertible range + /// of sine, i.e [-pi/2, pi/2]. + /// + /// For example: + /// + /// ```python + /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + /// x = tf.constant([1.047, 0.785]) + /// y = tf.math.sin(x) # [0.8659266, 0.7068252] + /// + /// tf.math.asin(y) # [1.047, 0.785] = x + /// ``` + /// + /// + /// + /// + /// + public static Tensor asin(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Asin", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return asin_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Asin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Asin", _op.inputs, _attrs, _result); + } + return _result[0]; + } - public static Tensor is_nan(Tensor x, string name = null) - => tf.Context.ExecuteOp("IsNan", name, new ExecuteOpArgs(x)); - - - /// - /// Computes exponential of x element-wise. \\(y = e^x\\). - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. - /// A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor exp(Tensor x, string name = null) - => tf.Context.ExecuteOp("Exp", name, new ExecuteOpArgs(x)); - - /// - /// Computes natural logarithm of x element-wise. - /// - /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. - /// name: A name for the operation (optional). - /// A `Tensor`. Has the same type as `x`. - public static Tensor log(Tensor x, string name = null) - => tf.Context.ExecuteOp("Log", name, new ExecuteOpArgs(x)); - - public static Tensor softplus(Tensor features, string name = null) - => tf.Context.ExecuteOp("Softplus", name, new ExecuteOpArgs(features)); - - public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate = false, string name = null) - => tf.Context.ExecuteOp("Cast", name, new ExecuteOpArgs(x) - .SetAttributes(new { DstT, Truncate })); - - public static Tensor neg(Tensor x, string name = null) - => tf.Context.ExecuteOp("Neg", name, new ExecuteOpArgs(x)); - - public static Tensor sqrt(Tensor x, string name = null) - => tf.Context.ExecuteOp("Sqrt", name, new ExecuteOpArgs(x)); - - public static Tensor sub(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("Sub", name, new ExecuteOpArgs(x, y)); - - public static Tensor sub(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Sub", name, new ExecuteOpArgs(x, y)); - - /// - /// Returns the truth value of (x == y) element-wise. - /// - /// - /// - /// - /// - public static Tensor equal(Tx x, Ty y, bool incompatible_shape_error = true, string name = null) - => tf.Context.ExecuteOp("Equal", name, new ExecuteOpArgs(x, y) - .SetAttributes(new - { - incompatible_shape_error - })); - - /// - /// Returns the truth value of (x != y) element-wise. - /// - /// The type of the x. - /// The type of the y. - /// The x. - /// The y. - /// The name. - /// - public static Tensor not_equal(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("NotEqual", name, new ExecuteOpArgs(x, y)); - - public static Tensor atan2(Tensor y, Tensor x, string name = null) - => tf.Context.ExecuteOp("Atan2", name, new ExecuteOpArgs(y, x)); - - public static Tensor mul(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(x, y)); - - public static Tensor mul_no_nan(Tx x, Ty y, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("MulNoNan", name, args: new { x, y }); - - return _op.outputs[0]; - } - - public static Tensor real_div(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("RealDiv", name, new ExecuteOpArgs(x, y)); - - public static Tensor reciprocal(Tensor x, string name = null) - => tf.Context.ExecuteOp("Reciprocal", name, new ExecuteOpArgs(x)); - - public static Tensor floor_mod(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("FloorMod", name, new ExecuteOpArgs(x, y)); - - public static Tensor floor_div(Tensor x, Tensor y, string name = null) - => tf.Context.ExecuteOp("FloorDiv", name, new ExecuteOpArgs(x, y)); - - /// - /// Multiply the matrix "a" by the matrix "b". - /// - /// - /// - /// - /// - /// - /// - public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string name = null) - => tf.Context.ExecuteOp("MatMul", name, new ExecuteOpArgs(a, b) - .SetAttributes(new - { - transpose_a, - transpose_b - })); - - /// - /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. - /// - /// - /// - /// - /// - public static Tensor maximum(T1 x, T2 y, string name = null) - => tf.Context.ExecuteOp("Maximum", name, new ExecuteOpArgs(x, y)); - - public static Tensor minimum(T1 x, T2 y, string name = null) - => tf.Context.ExecuteOp("Minimum", name, new ExecuteOpArgs(x, y)); - - public static Tensor _abs(Tensor x, string name = null) - => tf.Context.ExecuteOp("Abs", name, new ExecuteOpArgs(x)); - - public static Tensor _any(Tx input, Ty axis, bool keep_dims = false, string name = null) - { - var _op = tf.OpDefLib._apply_op_helper("Any", name, new { input, reduction_indices = axis, keep_dims }); - - return _op.outputs[0]; - } - - public static Tensor _max(Tx input, Ty axis, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Max", name, new ExecuteOpArgs(input, axis) - { - GetGradientAttrs = (op) => new - { - T = op.get_attr("T"), - keep_dims = op.get_attr("keep_dims"), - Tidx = op.get_attr("Tidx") - } - }.SetAttributes(new { keep_dims, reduction_indices = axis })); - - public static Tensor _min(Tx input, Ty axis, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Min", name, new ExecuteOpArgs(input, axis) - { - GetGradientAttrs = (op) => new - { - T = op.get_attr("T"), - keep_dims = op.get_attr("keep_dims"), - Tidx = op.get_attr("Tidx") - } - }.SetAttributes(new { keep_dims, reduction_indices = axis })); - - public static Tensor pow(Tx x, Ty y, string name = null) - => tf.Context.ExecuteOp("Pow", name, new ExecuteOpArgs(x, y)); - - public static Tensor _sum(Tx input, Ty axis = default, bool keep_dims = false, string name = null) - => tf.Context.ExecuteOp("Sum", name, - new ExecuteOpArgs(input, axis).SetAttributes(new { keep_dims, reduction_indices = axis })); - - private static Tensor _sum_eager_fallback(Tensor[] inputs, Tensor axis, bool keep_dims = false, string name = null, Context ctx = null) - { - var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new[] { inputs }); - var (_attr_Tidx, axis1) = tf.Runner.ArgsToMatchingEager(ctx, tf.int32, new[] { axis }); - var _inputs_flat = input.concat(axis1); - var _attrs = new object[] { "keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx }; - - return tf.Runner.Execute(ctx, "Sum", 1, _inputs_flat, _attrs, name: name)[0]; - } - - /// - /// Creates a sequence of numbers. - /// - /// - /// - /// - /// - /// - public static Tensor range(Tensor start, Tensor limit, Tensor delta, string name = null) - => tf.Context.ExecuteOp("Range", name, new ExecuteOpArgs(start, limit, delta)); - - /// - /// Rounds the values of a tensor to the nearest integer, element-wise. - /// - /// - /// - /// - /// If specified, the created operation in the graph will be this one, otherwise it will be named 'Round'. - /// - /// - /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result. - /// - /// - /// Rounds half to even. Also known as bankers rounding. If you want to round - /// according to the current system rounding mode use std::cint. - /// - public static Tensor round(Tensor x, string name = "Round") - => tf.Context.ExecuteOp("Round", name, new ExecuteOpArgs(x)); - - /// - /// Computes reciprocal of square root of x element-wise. - /// - /// - /// - /// - public static Tensor rsqrt(Tensor x, string name = null) - => tf.Context.ExecuteOp("Rsqrt", name, new ExecuteOpArgs(x)); - - /// - /// Returns the fraction of zeros in value. - /// - /// A tensor of numeric type. - /// A name for the operation (optional). - /// The fraction of zeros in value, with type float32. - public static Tensor zero_fraction(Tensor value, string name = null) - => tf.Context.ExecuteOp("zero_fraction", name, new ExecuteOpArgs(value)); + public static Tensor asin_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Asin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Asin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes inverse hyperbolic sine of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes inverse hyperbolic sine + /// for every element in the tensor. Both input and output has a range of + /// `[-inf, inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] + /// ``` + /// + /// + /// + /// + public static Tensor asinh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Asinh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return asinh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Asinh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Asinh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor asinh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Asinh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Asinh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the trignometric inverse tangent of x element-wise. + /// + /// + /// + /// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that + /// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. + /// + /// **Note**: The output of `tf.math.atan` will lie within the invertible range + /// of tan, i.e (-pi/2, pi/2). + /// + /// For example: + /// + /// ```python + /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + /// x = tf.constant([1.047, 0.785]) + /// y = tf.math.tan(x) # [1.731261, 0.99920404] + /// + /// tf.math.atan(y) # [1.047, 0.785] = x + /// ``` + /// + /// + /// + /// + /// + public static Tensor atan(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return atan_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Atan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Atan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor atan_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Atan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Atan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes arctangent of `y/x` element-wise, respecting signs of the arguments. + /// + /// + /// + /// This is the angle \( heta in [-pi, pi] \) such that + /// \[ x = r cos( heta) \] + /// and + /// \[ y = r sin( heta) \] + /// where \(r = sqrt{x^2 + y^2} \). + /// + /// For example: + /// + /// >>> x = [1., 1.] + /// >>> y = [1., -1.] + /// >>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy()) + /// [ 45. -45.] + /// + /// + /// + /// + /// + /// + /// + public static Tensor atan2(Tensor y, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atan2", name) { args = new object[] { y, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return atan2_eager_fallback(y, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Atan2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Atan2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor atan2_eager_fallback(Tensor y, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, x }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("Atan2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Atan2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes inverse hyperbolic tangent of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes inverse hyperbolic tangent + /// for every element in the tensor. Input range is `[-1,1]` and output range is + /// `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the + /// input is `1`, output will be `inf`. Values outside the range will have + /// `nan` as output. + /// + /// ```python + /// x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) + /// tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] + /// ``` + /// + /// + /// + /// + public static Tensor atanh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atanh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return atanh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Atanh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Atanh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor atanh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Atanh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Atanh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiplies slices of two tensors in batches. + /// + /// + /// + /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + /// + /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + /// and `[..., r_y, c_y]`. + /// + /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + /// + /// + /// + /// + /// + /// If `True`, adjoint the slices of `x`. Defaults to `False`. + /// + /// + /// + /// + /// If `True`, adjoint the slices of `y`. Defaults to `False`. + /// + /// + /// + public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMul", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_mat_mul_eager_fallback(x, y, adj_x: adj_x, adj_y: adj_y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["adj_x"] = adj_x; + keywords["adj_y"] = adj_y; + var _op = tf.OpDefLib._apply_op_helper("BatchMatMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "adj_x", _op._get_attr_bool("adj_x"), "adj_y", _op._get_attr_bool("adj_y") }; + _execute.record_gradient("BatchMatMul", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor batch_mat_mul_eager_fallback(Tensor x, Tensor y, bool adj_x, bool adj_y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "adj_x", adj_x, "adj_y", adj_y }; + var _result = _execute.execute("BatchMatMul", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatMul", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiplies slices of two tensors in batches. + /// + /// + /// + /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + /// + /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + /// and `[..., r_y, c_y]`. + /// + /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + /// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More + /// about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + /// + /// + /// + /// + /// + /// + /// + /// If `True`, adjoint the slices of `x`. Defaults to `False`. + /// + /// + /// + /// + /// If `True`, adjoint the slices of `y`. Defaults to `False`. + /// + /// + /// + public static Tensor batch_mat_mul_v2(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMulV2", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_mat_mul_v2_eager_fallback(x, y, adj_x: adj_x, adj_y: adj_y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["adj_x"] = adj_x; + keywords["adj_y"] = adj_y; + var _op = tf.OpDefLib._apply_op_helper("BatchMatMulV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "adj_x", _op._get_attr_bool("adj_x"), "adj_y", _op._get_attr_bool("adj_y") }; + _execute.record_gradient("BatchMatMulV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor batch_mat_mul_v2_eager_fallback(Tensor x, Tensor y, bool adj_x, bool adj_y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "adj_x", adj_x, "adj_y", adj_y }; + var _result = _execute.execute("BatchMatMulV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatMulV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiplies slices of two tensors in batches. + /// + /// + /// + /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + /// + /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + /// and `[..., r_y, c_y]`. + /// + /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + /// *NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More + /// about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + /// + /// + /// + /// + /// + /// + /// + /// If not spcified, Tout is the same type to input type. + /// + /// + /// + /// + /// If `True`, adjoint the slices of `x`. Defaults to `False`. + /// + /// + /// + /// + /// If `True`, adjoint the slices of `y`. Defaults to `False`. + /// + /// + /// + public static Tensor batch_mat_mul_v3(Tensor x, Tensor y, TF_DataType Tout, bool adj_x = false, bool adj_y = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMulV3", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["Tout"] = Tout, ["adj_x"] = adj_x, ["adj_y"] = adj_y } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_mat_mul_v3_eager_fallback(x, y, Tout: Tout, adj_x: adj_x, adj_y: adj_y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["Tout"] = Tout; + keywords["adj_x"] = adj_x; + keywords["adj_y"] = adj_y; + var _op = tf.OpDefLib._apply_op_helper("BatchMatMulV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Ta", _op._get_attr_type("Ta"), "Tb", _op._get_attr_type("Tb"), "Tout", _op._get_attr_type("Tout"), "adj_x", _op._get_attr_bool("adj_x"), "adj_y", _op._get_attr_bool("adj_y") }; + _execute.record_gradient("BatchMatMulV3", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor batch_mat_mul_v3_eager_fallback(Tensor x, Tensor y, TF_DataType Tout, bool adj_x, bool adj_y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "Ta", x.dtype, "Tb", y.dtype, "Tout", Tout, "adj_x", adj_x, "adj_y", adj_y }; + var _result = _execute.execute("BatchMatMulV3", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchMatMulV3", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the regularized incomplete beta integral \\(I_x(a, b)\\). + /// + /// + /// + /// The regularized incomplete beta integral is defined as: + /// + /// + /// \(I_x(a, b) = rac{B(x; a, b)}{B(a, b)}\) + /// + /// where + /// + /// + /// \(B(x; a, b) = int_0^x t^{a-1} (1 - t)^{b-1} dt\) + /// + /// + /// is the incomplete beta function and \(B(a, b)\) is the *complete* + /// beta function. + /// + /// + /// + /// + /// + /// + public static Tensor betainc(Tensor a, Tensor b, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Betainc", name) { args = new object[] { a, b, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return betainc_eager_fallback(a, b, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Betainc", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Betainc", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor betainc_eager_fallback(Tensor a, Tensor b, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Betainc", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Betainc", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// + /// + /// + /// + /// + public static Tensor bincount(Tensor arr, Tensor size, Tensor weights, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bincount", name) { args = new object[] { arr, size, weights }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bincount_eager_fallback(arr, size, weights, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["arr"] = arr; + keywords["size"] = size; + keywords["weights"] = weights; + var _op = tf.OpDefLib._apply_op_helper("Bincount", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Bincount", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bincount_eager_fallback(Tensor arr, Tensor size, Tensor weights, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { arr, size, weights }; + object[] _attrs = new object[] { "T", weights.dtype }; + var _result = _execute.execute("Bincount", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Bincount", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Bucketizes 'input' based on 'boundaries'. + /// + /// + /// + /// For example, if the inputs are + /// boundaries = [0, 10, 100] + /// input = [[-5, 10000] + /// [150, 10] + /// [5, 100]] + /// + /// then the output will be + /// output = [[0, 3] + /// [3, 2] + /// [1, 3]] + /// + /// + /// + /// + /// + /// A sorted list of floats gives the boundary of the buckets. + /// + /// + /// + public static Tensor bucketize(Tensor input, float[] boundaries, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bucketize", name) { args = new object[] { input }, attrs = new Dictionary() { ["boundaries"] = boundaries } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bucketize_eager_fallback(input, boundaries: boundaries, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["boundaries"] = boundaries; + var _op = tf.OpDefLib._apply_op_helper("Bucketize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "boundaries", _op.get_attr("boundaries") }; + _execute.record_gradient("Bucketize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bucketize_eager_fallback(Tensor input, float[] boundaries, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "boundaries", boundaries }; + var _result = _execute.execute("Bucketize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Bucketize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Cast x of type SrcT to y of DstT. + /// + /// + /// + /// + /// + public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cast", name) { args = new object[] { x }, attrs = new Dictionary() { ["DstT"] = DstT, ["Truncate"] = Truncate } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cast_eager_fallback(x, DstT: DstT, Truncate: Truncate, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["DstT"] = DstT; + keywords["Truncate"] = Truncate; + var _op = tf.OpDefLib._apply_op_helper("Cast", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "SrcT", _op._get_attr_type("SrcT"), "DstT", _op._get_attr_type("DstT"), "Truncate", _op._get_attr_bool("Truncate") }; + _execute.record_gradient("Cast", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cast_eager_fallback(Tensor x, TF_DataType DstT, bool Truncate, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "SrcT", x.dtype, "DstT", DstT, "Truncate", Truncate }; + var _result = _execute.execute("Cast", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cast", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise smallest integer not less than x. + /// + /// + /// + public static Tensor ceil(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Ceil", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ceil_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Ceil", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Ceil", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ceil_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Ceil", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Ceil", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Clips tensor values to a specified min and max. + /// + /// + /// + /// Given a tensor `t`, this operation returns a tensor of the same type and + /// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. + /// Any values less than `clip_value_min` are set to `clip_value_min`. Any values + /// greater than `clip_value_max` are set to `clip_value_max`. + /// + /// + /// + /// + /// + /// + public static Tensor clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ClipByValue", name) { args = new object[] { t, clip_value_min, clip_value_max }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return clip_by_value_eager_fallback(t, clip_value_min, clip_value_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["t"] = t; + keywords["clip_value_min"] = clip_value_min; + keywords["clip_value_max"] = clip_value_max; + var _op = tf.OpDefLib._apply_op_helper("ClipByValue", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ClipByValue", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor clip_by_value_eager_fallback(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { t, clip_value_min, clip_value_max }; + object[] _attrs = new object[] { "T", t.dtype }; + var _result = _execute.execute("ClipByValue", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ClipByValue", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Converts two real numbers to a complex number. + /// + /// + /// + /// Given a tensor `real` representing the real part of a complex number, and a + /// tensor `imag` representing the imaginary part of a complex number, this + /// operation returns complex numbers elementwise of the form \(a + bj\), where + /// *a* represents the `real` part and *b* represents the `imag` part. + /// + /// The input tensors `real` and `imag` must have the same shape. + /// + /// For example: + /// + /// ``` + /// # tensor 'real' is [2.25, 3.25] + /// # tensor `imag` is [4.75, 5.75] + /// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor complex(Tensor real, Tensor imag, TF_DataType Tout = TF_DataType.TF_COMPLEX64, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Complex", name) { args = new object[] { real, imag }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return complex_eager_fallback(real, imag, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["real"] = real; + keywords["imag"] = imag; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("Complex", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("Complex", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor complex_eager_fallback(Tensor real, Tensor imag, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { real, imag }; + object[] _attrs = new object[] { "T", real.dtype, "Tout", Tout }; + var _result = _execute.execute("Complex", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Complex", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the complex absolute value of a tensor. + /// + /// + /// + /// Given a tensor `x` of complex numbers, this operation returns a tensor of type + /// `float` or `double` that is the absolute value of each element in `x`. All + /// elements in `x` must be complex numbers of the form \(a + bj\). The absolute + /// value is computed as \( sqrt{a^2 + b^2}\). + /// + /// For example: + /// + /// >>> x = tf.complex(3.0, 4.0) + /// >>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + /// 5.0 + /// + /// + /// + /// + /// + /// + public static Tensor complex_abs(Tensor x, TF_DataType Tout = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ComplexAbs", name) { args = new object[] { x }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return complex_abs_eager_fallback(x, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("ComplexAbs", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("ComplexAbs", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor complex_abs_eager_fallback(Tensor x, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "Tout", Tout }; + var _result = _execute.execute("ComplexAbs", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ComplexAbs", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the complex conjugate of a complex number. + /// + /// + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// complex numbers that are the complex conjugate of each element in `input`. The + /// complex numbers in `input` must be of the form \(a + bj\), where *a* is the + /// real part and *b* is the imaginary part. + /// + /// The complex conjugate returned by this operation is of the form \(a - bj\). + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + /// ``` + /// + /// + /// + /// + public static Tensor conj(Tensor input, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conj", name) { args = new object[] { input }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conj_eager_fallback(input, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + var _op = tf.OpDefLib._apply_op_helper("Conj", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Conj", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conj_eager_fallback(Tensor input, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype }; + var _result = _execute.execute("Conj", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conj", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes cos of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes cosine of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `[-1,1]`. If input lies outside the boundary, `nan` + /// is returned. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] + /// ``` + /// + /// + /// + /// + public static Tensor cos(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cos", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cos_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Cos", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Cos", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cos_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Cos", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cos", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes hyperbolic cosine of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes hyperbolic cosine of every + /// element in the tensor. Input range is `[-inf, inf]` and output range + /// is `[1, inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + /// tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] + /// ``` + /// + /// + /// + /// + public static Tensor cosh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cosh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cosh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Cosh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Cosh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cosh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Cosh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cosh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the pairwise cross product. + /// + /// + /// + /// `a` and `b` must be the same shape; they can either be simple 3-element vectors, + /// or any shape where the innermost dimension is 3. In the latter case, each pair + /// of corresponding 3-element vectors is cross-multiplied independently. + /// + /// + /// + /// + /// + public static Tensor cross(Tensor a, Tensor b, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cross", name) { args = new object[] { a, b }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cross_eager_fallback(a, b, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + var _op = tf.OpDefLib._apply_op_helper("Cross", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Cross", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cross_eager_fallback(Tensor a, Tensor b, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Cross", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cross", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the cumulative product of the tensor `x` along `axis`. + /// + /// + /// + /// By default, this op performs an inclusive cumprod, which means that the first + /// element of the input is identical to the first element of the output: + /// + /// ```python + /// tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is + /// performed instead: + /// + /// ```python + /// tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + /// ``` + /// + /// By setting the `reverse` kwarg to `True`, the cumprod is performed in the + /// opposite direction: + /// + /// ```python + /// tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + /// ``` + /// + /// This is more efficient than using separate `tf.reverse` ops. + /// + /// The `reverse` and `exclusive` kwargs can also be combined: + /// + /// ```python + /// tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + /// ``` + /// + /// + /// + /// + /// + /// + /// If `True`, perform exclusive cumprod. + /// + /// + /// + /// + /// A `bool` (default: False). + /// + /// + /// + public static Tensor cumprod(Tensor x, Tensor axis, bool exclusive = false, bool reverse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cumprod", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cumprod_eager_fallback(x, axis, exclusive: exclusive, reverse: reverse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["exclusive"] = exclusive; + keywords["reverse"] = reverse; + var _op = tf.OpDefLib._apply_op_helper("Cumprod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "exclusive", _op._get_attr_bool("exclusive"), "reverse", _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Cumprod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cumprod_eager_fallback(Tensor x, Tensor axis, bool exclusive, bool reverse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "exclusive", exclusive, "reverse", reverse, "T", x.dtype, "Tidx", axis.dtype }; + var _result = _execute.execute("Cumprod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cumprod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the cumulative sum of the tensor `x` along `axis`. + /// + /// + /// + /// By default, this op performs an inclusive cumsum, which means that the first + /// element of the input is identical to the first element of the output: + /// + /// ```python + /// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is + /// performed instead: + /// + /// ```python + /// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + /// ``` + /// + /// By setting the `reverse` kwarg to `True`, the cumsum is performed in the + /// opposite direction: + /// + /// ```python + /// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + /// ``` + /// + /// This is more efficient than using separate `tf.reverse` ops. + /// + /// The `reverse` and `exclusive` kwargs can also be combined: + /// + /// ```python + /// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + /// ``` + /// + /// + /// + /// + /// + /// + /// If `True`, perform exclusive cumsum. + /// + /// + /// + /// + /// A `bool` (default: False). + /// + /// + /// + public static Tensor cumsum(Tensor x, Tensor axis, bool exclusive = false, bool reverse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cumsum", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cumsum_eager_fallback(x, axis, exclusive: exclusive, reverse: reverse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["exclusive"] = exclusive; + keywords["reverse"] = reverse; + var _op = tf.OpDefLib._apply_op_helper("Cumsum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "exclusive", _op._get_attr_bool("exclusive"), "reverse", _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Cumsum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cumsum_eager_fallback(Tensor x, Tensor axis, bool exclusive, bool reverse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "exclusive", exclusive, "reverse", reverse, "T", x.dtype, "Tidx", axis.dtype }; + var _result = _execute.execute("Cumsum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Cumsum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the cumulative product of the tensor `x` along `axis`. + /// + /// + /// + /// By default, this op performs an inclusive cumulative log-sum-exp, + /// which means that the first + /// element of the input is identical to the first element of the output: + /// ```python + /// tf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is + /// performed instead: + /// ```python + /// tf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))] + /// ``` + /// Note that the neutral element of the log-sum-exp operation is `-inf`, + /// however, for performance reasons, the minimal value representable by the + /// floating point type is used instead. + /// + /// By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the + /// opposite direction. + /// + /// + /// + /// + /// + /// + /// If `True`, perform exclusive cumulative log-sum-exp. + /// + /// + /// + /// + /// A `bool` (default: False). + /// + /// + /// + public static Tensor cumulative_logsumexp(Tensor x, Tensor axis, bool exclusive = false, bool reverse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CumulativeLogsumexp", name) { args = new object[] { x, axis }, attrs = new Dictionary() { ["exclusive"] = exclusive, ["reverse"] = reverse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return cumulative_logsumexp_eager_fallback(x, axis, exclusive: exclusive, reverse: reverse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["axis"] = axis; + keywords["exclusive"] = exclusive; + keywords["reverse"] = reverse; + var _op = tf.OpDefLib._apply_op_helper("CumulativeLogsumexp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "exclusive", _op._get_attr_bool("exclusive"), "reverse", _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("CumulativeLogsumexp", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor cumulative_logsumexp_eager_fallback(Tensor x, Tensor axis, bool exclusive, bool reverse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, axis }; + object[] _attrs = new object[] { "exclusive", exclusive, "reverse", reverse, "T", x.dtype, "Tidx", axis.dtype }; + var _result = _execute.execute("CumulativeLogsumexp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("CumulativeLogsumexp", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// + /// + /// + /// + /// + /// + /// bool; Whether the kernel should count the appearance or number of occurrences. + /// + /// + /// + public static Tensor dense_bincount(Tensor input, Tensor size, Tensor weights, bool binary_output = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DenseBincount", name) { args = new object[] { input, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dense_bincount_eager_fallback(input, size, weights, binary_output: binary_output, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["size"] = size; + keywords["weights"] = weights; + keywords["binary_output"] = binary_output; + var _op = tf.OpDefLib._apply_op_helper("DenseBincount", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx"), "T", _op._get_attr_type("T"), "binary_output", _op._get_attr_bool("binary_output") }; + _execute.record_gradient("DenseBincount", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dense_bincount_eager_fallback(Tensor input, Tensor size, Tensor weights, bool binary_output, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, size, weights }; + object[] _attrs = new object[] { "Tidx", input.dtype, "T", weights.dtype, "binary_output", binary_output }; + var _result = _execute.execute("DenseBincount", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DenseBincount", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes Psi, the derivative of Lgamma (the log of the absolute value of + /// + /// + /// + /// `Gamma(x)`), element-wise. + /// + /// + /// + /// + public static Tensor digamma(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Digamma", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return digamma_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Digamma", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Digamma", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor digamma_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Digamma", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Digamma", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x / y element-wise. + /// + /// + /// + /// *NOTE*: `Div` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor div(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Div", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return div_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Div", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Div", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor div_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Div", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Div", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns 0 if the denominator is zero. + /// + /// + /// + /// + /// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor div_no_nan(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DivNoNan", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return div_no_nan_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("DivNoNan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("DivNoNan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor div_no_nan_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("DivNoNan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DivNoNan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x == y) element-wise. + /// + /// + /// + /// *NOTE*: `Equal` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// ```python + /// x = tf.constant([2, 4]) + /// y = tf.constant(2) + /// tf.math.equal(x, y) ==> array([True, False]) + /// + /// x = tf.constant([2, 4]) + /// y = tf.constant([2, 4]) + /// tf.math.equal(x, y) ==> array([True, True]) + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor equal(Tensor x, Tensor y, bool incompatible_shape_error = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Equal", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["incompatible_shape_error"] = incompatible_shape_error } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return equal_eager_fallback(x, y, incompatible_shape_error: incompatible_shape_error, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["incompatible_shape_error"] = incompatible_shape_error; + var _op = tf.OpDefLib._apply_op_helper("Equal", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "incompatible_shape_error", _op._get_attr_bool("incompatible_shape_error") }; + _execute.record_gradient("Equal", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor equal_eager_fallback(Tensor x, Tensor y, bool incompatible_shape_error, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "incompatible_shape_error", incompatible_shape_error }; + var _result = _execute.execute("Equal", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Equal", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$. + /// + /// + /// + public static Tensor erf(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erf", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return erf_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Erf", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Erf", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor erf_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Erf", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Erf", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the complementary error function of `x` element-wise. + /// + /// + /// + public static Tensor erfc(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erfc", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return erfc_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Erfc", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Erfc", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor erfc_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Erfc", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Erfc", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + public static Tensor erfinv(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erfinv", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return erfinv_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Erfinv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Erfinv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor erfinv_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Erfinv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Erfinv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the euclidean norm of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor euclidean_norm(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EuclideanNorm", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return euclidean_norm_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("EuclideanNorm", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("EuclideanNorm", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor euclidean_norm_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("EuclideanNorm", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EuclideanNorm", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes exponential of x element-wise. \\(y = e^x\\). + /// + /// + /// + /// This function computes the exponential of every element in the input tensor. + /// i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. + /// `e` denotes Euler's number and is approximately equal to 2.718281. + /// Output is positive for any real input. + /// + /// ```python + /// x = tf.constant(2.0) + /// tf.math.exp(x) ==> 7.389056 + /// + /// x = tf.constant([2.0, 8.0]) + /// tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) + /// ``` + /// + /// For complex numbers, the exponential value is calculated as follows: + /// + /// ``` + /// e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) + /// ``` + /// + /// Let's consider complex number 1+1j as an example. + /// e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) + /// + /// ```python + /// x = tf.constant(1 + 1j) + /// tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j + /// ``` + /// + /// + /// + /// + public static Tensor exp(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Exp", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return exp_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Exp", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Exp", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor exp_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Exp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Exp", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes `exp(x) - 1` element-wise. + /// + /// + /// + /// i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. + /// `e` denotes Euler's number and is approximately equal to 2.718281. + /// + /// ```python + /// x = tf.constant(2.0) + /// tf.math.expm1(x) ==> 6.389056 + /// + /// x = tf.constant([2.0, 8.0]) + /// tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) + /// + /// x = tf.constant(1 + 1j) + /// tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) + /// ``` + /// + /// + /// + /// + public static Tensor expm1(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Expm1", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return expm1_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Expm1", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Expm1", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor expm1_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Expm1", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Expm1", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise largest integer not greater than x. + /// + /// + /// + public static Tensor floor(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Floor", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return floor_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Floor", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Floor", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor floor_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Floor", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Floor", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x // y element-wise. + /// + /// + /// + /// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor floor_div(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FloorDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return floor_div_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("FloorDiv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("FloorDiv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor floor_div_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("FloorDiv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FloorDiv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise remainder of division. + /// + /// + /// + /// This follows Python semantics in that the + /// result here is consistent with a flooring divide. E.g. + /// `floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y. + /// + /// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor floor_mod(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FloorMod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return floor_mod_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("FloorMod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("FloorMod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor floor_mod_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("FloorMod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FloorMod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x > y) element-wise. + /// + /// + /// + /// *NOTE*: `Greater` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 2, 5]) + /// tf.math.greater(x, y) ==> [False, True, True] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.greater(x, y) ==> [False, False, True] + /// ``` + /// + /// + /// + /// + /// + public static Tensor greater(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Greater", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return greater_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Greater", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Greater", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor greater_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Greater", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Greater", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x >= y) element-wise. + /// + /// + /// + /// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6, 7]) + /// y = tf.constant([5, 2, 5, 10]) + /// tf.math.greater_equal(x, y) ==> [True, True, True, False] + /// + /// x = tf.constant([5, 4, 6, 7]) + /// y = tf.constant([5]) + /// tf.math.greater_equal(x, y) ==> [True, False, True, True] + /// ``` + /// + /// + /// + /// + /// + public static Tensor greater_equal(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GreaterEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return greater_equal_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("GreaterEqual", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("GreaterEqual", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor greater_equal_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("GreaterEqual", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("GreaterEqual", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Return histogram of values. + /// + /// + /// + /// Given the tensor `values`, this operation returns a rank 1 histogram counting + /// the number of entries in `values` that fall into every bin. The bins are + /// equal width and determined by the arguments `value_range` and `nbins`. + /// + /// ```python + /// # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + /// nbins = 5 + /// value_range = [0.0, 5.0] + /// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + /// + /// with tf.get_default_session() as sess: + /// hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + /// variables.global_variables_initializer().run() + /// sess.run(hist) => [2, 1, 1, 0, 2] + /// ``` + /// + /// + /// + /// + /// + /// + /// + public static Tensor histogram_fixed_width(Tensor values, Tensor value_range, Tensor nbins, TF_DataType dtype = TF_DataType.TF_INT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "HistogramFixedWidth", name) { args = new object[] { values, value_range, nbins }, attrs = new Dictionary() { ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return histogram_fixed_width_eager_fallback(values, value_range, nbins, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["values"] = values; + keywords["value_range"] = value_range; + keywords["nbins"] = nbins; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("HistogramFixedWidth", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("HistogramFixedWidth", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor histogram_fixed_width_eager_fallback(Tensor values, Tensor value_range, Tensor nbins, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { values, value_range, nbins }; + object[] _attrs = new object[] { "T", values.dtype, "dtype", dtype }; + var _result = _execute.execute("HistogramFixedWidth", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("HistogramFixedWidth", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the lower regularized incomplete Gamma function `P(a, x)`. + /// + /// + /// + /// The lower regularized incomplete Gamma function is defined as: + /// + /// + /// \(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\) + /// + /// where + /// + /// \(gamma(a, x) = \int_{0}^{x} t^{a-1} exp(-t) dt\) + /// + /// is the lower incomplete Gamma function. + /// + /// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete + /// Gamma function. + /// + /// + /// + /// + /// + public static Tensor igamma(Tensor a, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Igamma", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return igamma_eager_fallback(a, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Igamma", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Igamma", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor igamma_eager_fallback(Tensor a, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Igamma", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Igamma", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient of `igamma(a, x)` wrt `a`. + /// + /// + /// + /// + public static Tensor igamma_grad_a(Tensor a, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IgammaGradA", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return igamma_grad_a_eager_fallback(a, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("IgammaGradA", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("IgammaGradA", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor igamma_grad_a_eager_fallback(Tensor a, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("IgammaGradA", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IgammaGradA", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the upper regularized incomplete Gamma function `Q(a, x)`. + /// + /// + /// + /// The upper regularized incomplete Gamma function is defined as: + /// + /// \(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\) + /// + /// where + /// + /// \(Gamma(a, x) = int_{x}^{infty} t^{a-1} exp(-t) dt\) + /// + /// is the upper incomplete Gamma function. + /// + /// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete + /// Gamma function. + /// + /// + /// + /// + /// + public static Tensor igammac(Tensor a, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Igammac", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return igammac_eager_fallback(a, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Igammac", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Igammac", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor igammac_eager_fallback(Tensor a, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Igammac", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Igammac", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the imaginary part of a complex number. + /// + /// + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the imaginary part of each element in `input`. All + /// elements in `input` must be complex numbers of the form \(a + bj\), where *a* + /// is the real part and *b* is the imaginary part returned by this operation. + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.imag(input) ==> [4.75, 5.75] + /// ``` + /// + /// + /// + /// + /// + public static Tensor imag(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Imag", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return imag_eager_fallback(input, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("Imag", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("Imag", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor imag_eager_fallback(Tensor input, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "Tout", Tout }; + var _result = _execute.execute("Imag", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Imag", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the reciprocal of x element-wise. + /// + /// + /// + /// I.e., \(y = 1 / x\). + /// + /// + /// + /// + public static Tensor inv(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Inv", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inv_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Inv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Inv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inv_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Inv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Inv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the inverse of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor inv_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return inv_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("InvGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InvGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor inv_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("InvGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InvGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns which elements of x are finite. + /// + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isfinite + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) + /// tf.math.is_finite(x) ==> [True, True, True, False, False] + /// ``` + /// + /// + /// + /// + public static Tensor is_finite(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsFinite", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return is_finite_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("IsFinite", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("IsFinite", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor is_finite_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("IsFinite", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IsFinite", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns which elements of x are Inf. + /// + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isinf + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, np.inf, 6.8, np.inf]) + /// tf.math.is_inf(x) ==> [False, True, False, True] + /// ``` + /// + /// + /// + /// + public static Tensor is_inf(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsInf", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return is_inf_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("IsInf", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("IsInf", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor is_inf_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("IsInf", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IsInf", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns which elements of x are NaN. + /// + /// + /// + /// @compatibility(numpy) + /// Equivalent to np.isnan + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) + /// tf.math.is_nan(x) ==> [False, True, False, True, False] + /// ``` + /// + /// + /// + /// + public static Tensor is_nan(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsNan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return is_nan_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("IsNan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("IsNan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor is_nan_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("IsNan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IsNan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x < y) element-wise. + /// + /// + /// + /// *NOTE*: `Less` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.less(x, y) ==> [False, True, False] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 6, 7]) + /// tf.math.less(x, y) ==> [False, True, True] + /// ``` + /// + /// + /// + /// + /// + public static Tensor less(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Less", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return less_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Less", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Less", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor less_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Less", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Less", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x <= y) element-wise. + /// + /// + /// + /// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.less_equal(x, y) ==> [True, True, False] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 6, 6]) + /// tf.math.less_equal(x, y) ==> [True, True, True] + /// ``` + /// + /// + /// + /// + /// + public static Tensor less_equal(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LessEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return less_equal_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("LessEqual", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("LessEqual", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor less_equal_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("LessEqual", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LessEqual", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the log of the absolute value of `Gamma(x)` element-wise. + /// + /// + /// + /// For positive numbers, this function computes log((input - 1)!) for every element in the tensor. + /// `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) + /// tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] + /// ``` + /// + /// + /// + /// + public static Tensor lgamma(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Lgamma", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return lgamma_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Lgamma", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Lgamma", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor lgamma_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Lgamma", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Lgamma", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Generates values in an interval. + /// + /// + /// + /// A sequence of `num` evenly-spaced values are generated beginning at `start`. + /// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, + /// so that the last one is exactly `stop`. + /// + /// For example: + /// + /// ``` + /// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor lin_space(Tensor start, Tensor stop, Tensor num, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LinSpace", name) { args = new object[] { start, stop, num }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return lin_space_eager_fallback(start, stop, num, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["start"] = start; + keywords["stop"] = stop; + keywords["num"] = num; + var _op = tf.OpDefLib._apply_op_helper("LinSpace", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("LinSpace", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor lin_space_eager_fallback(Tensor start, Tensor stop, Tensor num, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { start, stop, num }; + object[] _attrs = new object[] { "T", start.dtype, "Tidx", num.dtype }; + var _result = _execute.execute("LinSpace", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LinSpace", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes natural logarithm of x element-wise. + /// + /// + /// + /// I.e., \(y = log_e x\). + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 5]) + /// tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] + /// ``` + /// + /// + /// + /// + public static Tensor log(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Log", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return log_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Log", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Log", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor log_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Log", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Log", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes natural logarithm of (1 + x) element-wise. + /// + /// + /// + /// I.e., \(y = log_e (1 + x)\). + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 5]) + /// tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] + /// ``` + /// + /// + /// + /// + public static Tensor log1p(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Log1p", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return log1p_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Log1p", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Log1p", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor log1p_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Log1p", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Log1p", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of x AND y element-wise. + /// + /// + /// + /// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor logical_and(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalAnd", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return logical_and_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("LogicalAnd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("LogicalAnd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor logical_and_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("LogicalAnd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LogicalAnd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of `NOT x` element-wise. + /// + /// + /// + public static Tensor logical_not(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalNot", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return logical_not_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("LogicalNot", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("LogicalNot", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor logical_not_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("LogicalNot", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LogicalNot", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of x OR y element-wise. + /// + /// + /// + /// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor logical_or(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalOr", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return logical_or_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("LogicalOr", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { }; + _execute.record_gradient("LogicalOr", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor logical_or_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { }; + var _result = _execute.execute("LogicalOr", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LogicalOr", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiply the matrix "a" by the matrix "b". + /// + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of + /// "a" (after being transposed if transpose_a is true) must match the + /// outer dimension of "b" (after being transposed if transposed_b is + /// true). + /// + /// *Note*: The default kernel implementation for MatMul on GPUs uses + /// cublas. + /// + /// + /// + /// + /// + /// + /// If true, "a" is transposed before multiplication. + /// + /// + /// + /// + /// If true, "b" is transposed before multiplication. + /// + /// + /// + public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatMul", name) { args = new object[] { a, b }, attrs = new Dictionary() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mat_mul_eager_fallback(a, b, transpose_a: transpose_a, transpose_b: transpose_b, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + var _op = tf.OpDefLib._apply_op_helper("MatMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MatMul", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mat_mul_eager_fallback(Tensor a, Tensor b, bool transpose_a, bool transpose_b, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b }; + object[] _attrs = new object[] { "transpose_a", transpose_a, "transpose_b", transpose_b, "T", a.dtype }; + var _result = _execute.execute("MatMul", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MatMul", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the maximum of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor max(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Max", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Max", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Max", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Max", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Max", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. + /// + /// + /// + /// *NOTE*: `Maximum` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor maximum(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Maximum", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return maximum_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Maximum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Maximum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor maximum_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Maximum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Maximum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the mean of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor mean(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mean", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mean_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Mean", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Mean", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mean_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Mean", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Mean", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the minimum of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor min(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Min", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return min_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Min", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Min", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor min_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Min", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Min", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the min of x and y (i.e. x < y ? x : y) element-wise. + /// + /// + /// + /// *NOTE*: `Minimum` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor minimum(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Minimum", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return minimum_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Minimum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Minimum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor minimum_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Minimum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Minimum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise remainder of division. This emulates C semantics in that + /// + /// + /// + /// the result here is consistent with a truncating divide. E.g. + /// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. + /// + /// *NOTE*: `Mod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor mod(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mod_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Mod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Mod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mod_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Mod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Mod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x * y element-wise. + /// + /// + /// + /// *NOTE*: `Mul` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor mul(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mul", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mul_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Mul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Mul", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mul_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Mul", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Mul", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. + /// + /// + /// + /// *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor mul_no_nan(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MulNoNan", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return mul_no_nan_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("MulNoNan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("MulNoNan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor mul_no_nan_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("MulNoNan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MulNoNan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + public static Tensor ndtri(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Ndtri", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ndtri_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Ndtri", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Ndtri", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ndtri_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Ndtri", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Ndtri", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes numerical negative value element-wise. + /// + /// + /// + /// I.e., \(y = -x\). + /// + /// + /// + /// + public static Tensor neg(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Neg", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return neg_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Neg", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Neg", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor neg_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Neg", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Neg", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the next representable value of `x1` in the direction of `x2`, element-wise. + /// + /// + /// + /// This operation returns the same result as the C++ std::nextafter function. + /// + /// It can also return a subnormal number. + /// + /// @compatibility(cpp) + /// Equivalent to C++ std::nextafter function. + /// @end_compatibility + /// + /// + /// + /// + /// + public static Tensor next_after(Tensor x1, Tensor x2, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NextAfter", name) { args = new object[] { x1, x2 }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return next_after_eager_fallback(x1, x2, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x1"] = x1; + keywords["x2"] = x2; + var _op = tf.OpDefLib._apply_op_helper("NextAfter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("NextAfter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor next_after_eager_fallback(Tensor x1, Tensor x2, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x1, x2 }; + object[] _attrs = new object[] { "T", x1.dtype }; + var _result = _execute.execute("NextAfter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("NextAfter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the truth value of (x != y) element-wise. + /// + /// + /// + /// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + /// + public static Tensor not_equal(Tensor x, Tensor y, bool incompatible_shape_error = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NotEqual", name) { args = new object[] { x, y }, attrs = new Dictionary() { ["incompatible_shape_error"] = incompatible_shape_error } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return not_equal_eager_fallback(x, y, incompatible_shape_error: incompatible_shape_error, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["incompatible_shape_error"] = incompatible_shape_error; + var _op = tf.OpDefLib._apply_op_helper("NotEqual", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "incompatible_shape_error", _op._get_attr_bool("incompatible_shape_error") }; + _execute.record_gradient("NotEqual", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor not_equal_eager_fallback(Tensor x, Tensor y, bool incompatible_shape_error, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype, "incompatible_shape_error", incompatible_shape_error }; + var _result = _execute.execute("NotEqual", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("NotEqual", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the polygamma function \\(\psi^{(n)}(x)\\). + /// + /// + /// + /// The polygamma function is defined as: + /// + /// + /// \(psi^{(a)}(x) = rac{d^a}{dx^a} psi(x)\) + /// + /// where \(psi(x)\) is the digamma function. + /// The polygamma function is defined only for non-negative integer orders \a\. + /// + /// + /// + /// + /// + public static Tensor polygamma(Tensor a, Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Polygamma", name) { args = new object[] { a, x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return polygamma_eager_fallback(a, x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Polygamma", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Polygamma", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor polygamma_eager_fallback(Tensor a, Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, x }; + object[] _attrs = new object[] { "T", a.dtype }; + var _result = _execute.execute("Polygamma", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Polygamma", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the power of one value to another. + /// + /// + /// + /// Given a tensor `x` and a tensor `y`, this operation computes \(x^y\) for + /// corresponding elements in `x` and `y`. For example: + /// + /// ``` + /// # tensor 'x' is [[2, 2]], [3, 3]] + /// # tensor 'y' is [[8, 16], [2, 3]] + /// tf.pow(x, y) ==> [[256, 65536], [9, 27]] + /// ``` + /// + /// + /// + /// + /// + public static Tensor pow(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pow", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return pow_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Pow", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Pow", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor pow_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Pow", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Pow", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the product of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor prod(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Prod", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return prod_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Prod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Prod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor prod_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Prod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Prod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Convert the quantized 'input' tensor into a lower-precision 'output', using the + /// + /// + /// + /// actual distribution of the values to maximize the usage of the lower bit depth + /// and adjusting the output min and max ranges accordingly. + /// + /// [input_min, input_max] are scalar floats that specify the range for the float + /// interpretation of the 'input' data. For example, if input_min is -1.0f and + /// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + /// + /// This operator tries to squeeze as much precision as possible into an output with + /// a lower bit depth by calculating the actual min and max values found in the + /// data. For example, maybe that quint16 input has no values lower than 16,384 and + /// none higher than 49,152. That means only half the range is actually needed, all + /// the float interpretations are between -0.5f and 0.5f, so if we want to compress + /// the data into a quint8 output, we can use that range rather than the theoretical + /// -1.0f to 1.0f that is suggested by the input min and max. + /// + /// In practice, this is most useful for taking output from operations like + /// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + /// may have large potential output ranges, but in practice have a distribution of + /// input values that only uses a small fraction of the possible range. By feeding + /// that output into this operator, we can reduce it from 32 bits down to 8 with + /// minimal loss of accuracy. + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. Should be a lower bit depth than Tinput. + /// + /// + /// + public static Tensor[] quantize_down_and_shrink_range(Tensor input, Tensor input_min, Tensor input_max, TF_DataType out_type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeDownAndShrinkRange", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantize_down_and_shrink_range_eager_fallback(input, input_min, input_max, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizeDownAndShrinkRange", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizeDownAndShrinkRange", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantize_down_and_shrink_range_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "Tinput", input.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizeDownAndShrinkRange", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns x + y element-wise, working on quantized buffers. + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_add(Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType Toutput = TF_DataType.TF_QINT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedAdd", name) { args = new object[] { x, y, min_x, max_x, min_y, max_y }, attrs = new Dictionary() { ["Toutput"] = Toutput } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_add_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput: Toutput, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["min_x"] = min_x; + keywords["max_x"] = max_x; + keywords["min_y"] = min_y; + keywords["max_y"] = max_y; + keywords["Toutput"] = Toutput; + var _op = tf.OpDefLib._apply_op_helper("QuantizedAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Toutput", _op._get_attr_type("Toutput") }; + _execute.record_gradient("QuantizedAdd", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_add_eager_fallback(Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType Toutput, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y, min_x, max_x, min_y, max_y }; + object[] _attrs = new object[] { "T1", x.dtype, "T2", y.dtype, "Toutput", Toutput }; + var _result = _execute.execute("QuantizedAdd", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedAdd", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Perform a quantized matrix multiplication of `a` by the matrix `b`. + /// + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of + /// `a` (after being transposed if `transpose_a` is non-zero) must match the + /// outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, `a` is transposed before multiplication. + /// + /// + /// + /// + /// If true, `b` is transposed before multiplication. + /// + /// + /// + /// + /// The type of output produced by activation function + /// following this operation. + /// + /// + /// + public static Tensor[] quantized_mat_mul(Tensor a, Tensor b, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput = TF_DataType.TF_QINT32, bool transpose_a = false, bool transpose_b = false, TF_DataType Tactivation = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMul", name) { args = new object[] { a, b, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["Tactivation"] = Tactivation } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_eager_fallback(a, b, min_a, max_a, min_b, max_b, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, Tactivation: Tactivation, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["Tactivation"] = Tactivation; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "Tactivation", _op._get_attr_type("Tactivation") }; + _execute.record_gradient("QuantizedMatMul", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_eager_fallback(Tensor a, Tensor b, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput, bool transpose_a, bool transpose_b, TF_DataType Tactivation, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, min_a, max_a, min_b, max_b }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "Tactivation", Tactivation }; + var _result = _execute.execute("QuantizedMatMul", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMul", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns x * y element-wise, working on quantized buffers. + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_mul(Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType Toutput = TF_DataType.TF_QINT32, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMul", name) { args = new object[] { x, y, min_x, max_x, min_y, max_y }, attrs = new Dictionary() { ["Toutput"] = Toutput } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mul_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput: Toutput, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + keywords["min_x"] = min_x; + keywords["max_x"] = max_x; + keywords["min_y"] = min_y; + keywords["max_y"] = max_y; + keywords["Toutput"] = Toutput; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Toutput", _op._get_attr_type("Toutput") }; + _execute.record_gradient("QuantizedMul", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mul_eager_fallback(Tensor x, Tensor y, Tensor min_x, Tensor max_x, Tensor min_y, Tensor max_y, TF_DataType Toutput, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y, min_x, max_x, min_y, max_y }; + object[] _attrs = new object[] { "T1", x.dtype, "T2", y.dtype, "Toutput", Toutput }; + var _result = _execute.execute("QuantizedMul", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMul", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// + /// + /// + /// + /// + /// + /// + /// bool; Whether the kernel should count the appearance or number of occurrences. + /// + /// + /// + public static Tensor ragged_bincount(Tensor splits, Tensor values, Tensor size, Tensor weights, bool binary_output = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RaggedBincount", name) { args = new object[] { splits, values, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return ragged_bincount_eager_fallback(splits, values, size, weights, binary_output: binary_output, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["splits"] = splits; + keywords["values"] = values; + keywords["size"] = size; + keywords["weights"] = weights; + keywords["binary_output"] = binary_output; + var _op = tf.OpDefLib._apply_op_helper("RaggedBincount", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx"), "T", _op._get_attr_type("T"), "binary_output", _op._get_attr_bool("binary_output") }; + _execute.record_gradient("RaggedBincount", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor ragged_bincount_eager_fallback(Tensor splits, Tensor values, Tensor size, Tensor weights, bool binary_output, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { splits, values, size, weights }; + object[] _attrs = new object[] { "Tidx", values.dtype, "T", weights.dtype, "binary_output", binary_output }; + var _result = _execute.execute("RaggedBincount", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RaggedBincount", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Creates a sequence of numbers. + /// + /// + /// + /// This operation creates a sequence of numbers that begins at `start` and + /// extends by increments of `delta` up to but not including `limit`. + /// + /// For example: + /// + /// ``` + /// # 'start' is 3 + /// # 'limit' is 18 + /// # 'delta' is 3 + /// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor range(Tensor start, Tensor limit, Tensor delta, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Range", name) { args = new object[] { start, limit, delta }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return range_eager_fallback(start, limit, delta, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["start"] = start; + keywords["limit"] = limit; + keywords["delta"] = delta; + var _op = tf.OpDefLib._apply_op_helper("Range", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Range", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor range_eager_fallback(Tensor start, Tensor limit, Tensor delta, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { start, limit, delta }; + object[] _attrs = new object[] { "Tidx", start.dtype }; + var _result = _execute.execute("Range", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Range", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the real part of a complex number. + /// + /// + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the real part of each element in `input`. All elements in + /// `input` must be complex numbers of the form \(a + bj\), where *a* is the real + /// part returned by this operation and *b* is the imaginary part. + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.real(input) ==> [-2.25, 3.25] + /// ``` + /// + /// + /// + /// + /// + public static Tensor real(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Real", name) { args = new object[] { input }, attrs = new Dictionary() { ["Tout"] = Tout } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return real_eager_fallback(input, Tout: Tout, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["Tout"] = Tout; + var _op = tf.OpDefLib._apply_op_helper("Real", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout") }; + _execute.record_gradient("Real", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor real_eager_fallback(Tensor input, TF_DataType Tout, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "Tout", Tout }; + var _result = _execute.execute("Real", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Real", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x / y element-wise for real types. + /// + /// + /// + /// If `x` and `y` are reals, this will return the floating-point division. + /// + /// *NOTE*: `Div` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor real_div(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RealDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return real_div_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("RealDiv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("RealDiv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor real_div_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("RealDiv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RealDiv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the reciprocal of x element-wise. + /// + /// + /// + /// I.e., \(y = 1 / x\). + /// + /// + /// + /// + public static Tensor reciprocal(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reciprocal", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reciprocal_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Reciprocal", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Reciprocal", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reciprocal_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Reciprocal", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Reciprocal", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the inverse of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor reciprocal_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReciprocalGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return reciprocal_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("ReciprocalGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ReciprocalGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor reciprocal_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("ReciprocalGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReciprocalGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes a range that covers the actual values present in a quantized tensor. + /// + /// + /// + /// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a + /// range that covers the actual values present in that tensor. This op is typically + /// used to produce the `requested_output_min` and `requested_output_max` for + /// `Requantize`. + /// + /// + /// + /// + /// + /// + public static Tensor[] requantization_range(Tensor input, Tensor input_min, Tensor input_max, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizationRange", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return requantization_range_eager_fallback(input, input_min, input_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + var _op = tf.OpDefLib._apply_op_helper("RequantizationRange", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput") }; + _execute.record_gradient("RequantizationRange", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] requantization_range_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "Tinput", input.dtype }; + var _result = _execute.execute("RequantizationRange", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RequantizationRange", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes requantization range per channel. + /// + /// + /// + /// + /// + /// + /// The maximum value of the output that needs to be clipped. + /// Example: set this to 6 for Relu6. + /// + /// + /// + public static Tensor[] requantization_range_per_channel(Tensor input, Tensor input_min, Tensor input_max, float clip_value_max, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizationRangePerChannel", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary() { ["clip_value_max"] = clip_value_max } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return requantization_range_per_channel_eager_fallback(input, input_min, input_max, clip_value_max: clip_value_max, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["clip_value_max"] = clip_value_max; + var _op = tf.OpDefLib._apply_op_helper("RequantizationRangePerChannel", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "clip_value_max", _op.get_attr("clip_value_max") }; + _execute.record_gradient("RequantizationRangePerChannel", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] requantization_range_per_channel_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, float clip_value_max, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max }; + object[] _attrs = new object[] { "T", input.dtype, "clip_value_max", clip_value_max }; + var _result = _execute.execute("RequantizationRangePerChannel", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RequantizationRangePerChannel", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Converts the quantized `input` tensor into a lower-precision `output`. + /// + /// + /// + /// Converts the quantized `input` tensor into a lower-precision `output`, using the + /// output range specified with `requested_output_min` and `requested_output_max`. + /// + /// `[input_min, input_max]` are scalar floats that specify the range for the float + /// interpretation of the `input` data. For example, if `input_min` is -1.0f and + /// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. Should be a lower bit depth than Tinput. + /// + /// + /// + public static Tensor[] requantize(Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Requantize", name) { args = new object[] { input, input_min, input_max, requested_output_min, requested_output_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return requantize_eager_fallback(input, input_min, input_max, requested_output_min, requested_output_max, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["requested_output_min"] = requested_output_min; + keywords["requested_output_max"] = requested_output_max; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("Requantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("Requantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] requantize_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max, requested_output_min, requested_output_max }; + object[] _attrs = new object[] { "Tinput", input.dtype, "out_type", out_type }; + var _result = _execute.execute("Requantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Requantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Requantizes input with min and max values known per channel. + /// + /// + /// + /// + /// + /// + /// + /// + /// The quantized type of output tensor that needs to be converted. + /// + /// + /// + public static Tensor[] requantize_per_channel(Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizePerChannel", name) { args = new object[] { input, input_min, input_max, requested_output_min, requested_output_max }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return requantize_per_channel_eager_fallback(input, input_min, input_max, requested_output_min, requested_output_max, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["input_min"] = input_min; + keywords["input_max"] = input_max; + keywords["requested_output_min"] = requested_output_min; + keywords["requested_output_max"] = requested_output_max; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("RequantizePerChannel", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("RequantizePerChannel", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] requantize_per_channel_eager_fallback(Tensor input, Tensor input_min, Tensor input_max, Tensor requested_output_min, Tensor requested_output_max, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, input_min, input_max, requested_output_min, requested_output_max }; + object[] _attrs = new object[] { "T", input.dtype, "out_type", out_type }; + var _result = _execute.execute("RequantizePerChannel", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RequantizePerChannel", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Returns element-wise integer closest to x. + /// + /// + /// + /// If the result is midway between two representable values, + /// the even representable is chosen. + /// For example: + /// + /// ``` + /// rint(-1.5) ==> -2.0 + /// rint(0.5000001) ==> 1.0 + /// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + /// ``` + /// + /// + /// + /// + public static Tensor rint(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rint", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return rint_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Rint", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Rint", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor rint_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Rint", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Rint", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Rounds the values of a tensor to the nearest integer, element-wise. + /// + /// + /// + /// Rounds half to even. Also known as bankers rounding. If you want to round + /// according to the current system rounding mode use std::cint. + /// + /// + /// + /// + public static Tensor round(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Round", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return round_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Round", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Round", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor round_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Round", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Round", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes reciprocal of square root of x element-wise. + /// + /// + /// + /// I.e., \(y = 1 / sqrt{x}\). + /// + /// + /// + /// + public static Tensor rsqrt(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rsqrt", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return rsqrt_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Rsqrt", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Rsqrt", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor rsqrt_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Rsqrt", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Rsqrt", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the rsqrt of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor rsqrt_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RsqrtGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return rsqrt_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("RsqrtGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("RsqrtGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor rsqrt_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("RsqrtGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("RsqrtGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the maximum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = max_j(data_j)\) where `max` is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the max is empty for a given segment ID `i`, `output[i] = 0`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as the same as a smaller following index. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_max(c, tf.constant([0, 0, 1])).numpy() + /// array([[4, 3, 3, 4], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_max(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMax", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_max_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentMax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_max_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentMax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentMax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the mean along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = rac{sum_j data_j}{N}\) where `mean` is + /// over `j` such that `segment_ids[j] == i` and `N` is the total number of + /// values summed. + /// + /// If the mean is empty for a given segment ID `i`, `output[i] = 0`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as a smaller following index when computing the numerator + /// of the mean. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_mean(c, tf.constant([0, 0, 1])).numpy() + /// array([[2.5, 2.5, 2.5, 2.5], + /// [5., 6., 7., 8.]], dtype=float32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_mean(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMean", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_mean_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentMean", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentMean", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_mean_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentMean", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentMean", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the minimum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = min_j(data_j)\) where `min` is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the min is empty for a given segment ID `i`, `output[i] = 0`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as the same as a smaller following index. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_min(c, tf.constant([0, 0, 1])).numpy() + /// array([[1, 2, 2, 1], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_min(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMin", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_min_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentMin", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_min_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentMin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentMin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the product along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = prod_j data_j\) where the product is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the product is empty for a given segment ID `i`, `output[i] = 1`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as the same as a smaller following index. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_prod(c, tf.constant([0, 0, 1])).numpy() + /// array([[4, 6, 6, 4], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_prod(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentProd", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_prod_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentProd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentProd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_prod_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentProd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentProd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output_i = sum_j data_j\) where sum is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be sorted, + /// and an error is thrown for indices that are not increasing. On GPU, this + /// does not throw an error for unsorted indices. On GPU, out-of-order indices + /// result in safe but unspecified behavior, which may include treating + /// out-of-order indices as the same as a smaller following index. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// >>> tf.math.segment_sum(c, tf.constant([0, 0, 1])).numpy() + /// array([[5, 5, 5, 5], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + public static Tensor segment_sum(Tensor data, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentSum", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return segment_sum_eager_fallback(data, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SegmentSum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices") }; + _execute.record_gradient("SegmentSum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor segment_sum_eager_fallback(Tensor data, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype }; + var _result = _execute.execute("SegmentSum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SegmentSum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Selects elements from `t` or `e`, depending on `condition`. + /// + /// + /// + /// The `t`, and `e` tensors must all have the same shape, and the + /// output will also have that shape. + /// + /// The `condition` tensor must be a scalar if `t` and `e` are scalars. + /// If `t` and `e` are vectors or higher rank, then `condition` must be either a + /// scalar, a vector with size matching the first dimension of `t`, or must have + /// the same shape as `t`. + /// + /// The `condition` tensor acts as a mask that chooses, based on the value at each + /// element, whether the corresponding element / row in the output should be + /// taken from `t` (if true) or `e` (if false). + /// + /// If `condition` is a vector and `t` and `e` are higher rank matrices, then + /// it chooses which row (outer dimension) to copy from `t` and `e`. + /// If `condition` has the same shape as `t` and `e`, then it chooses which + /// element to copy from `t` and `e`. + /// + /// For example: + /// + /// ```python + /// # 'condition' tensor is [[True, False] + /// # [False, True]] + /// # 't' is [[1, 2], + /// # [3, 4]] + /// # 'e' is [[5, 6], + /// # [7, 8]] + /// select(condition, t, e) # => [[1, 6], [7, 4]] + /// + /// + /// # 'condition' tensor is [True, False] + /// # 't' is [[1, 2], + /// # [3, 4]] + /// # 'e' is [[5, 6], + /// # [7, 8]] + /// select(condition, t, e) ==> [[1, 2], + /// [7, 8]] + /// + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor select(Tensor condition, Tensor t, Tensor e, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Select", name) { args = new object[] { condition, t, e }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return select_eager_fallback(condition, t, e, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["condition"] = condition; + keywords["t"] = t; + keywords["e"] = e; + var _op = tf.OpDefLib._apply_op_helper("Select", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Select", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor select_eager_fallback(Tensor condition, Tensor t, Tensor e, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { condition, t, e }; + object[] _attrs = new object[] { "T", t.dtype }; + var _result = _execute.execute("Select", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Select", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// + /// + /// + /// + /// + /// + public static Tensor select_v2(Tensor condition, Tensor t, Tensor e, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SelectV2", name) { args = new object[] { condition, t, e }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return select_v2_eager_fallback(condition, t, e, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["condition"] = condition; + keywords["t"] = t; + keywords["e"] = e; + var _op = tf.OpDefLib._apply_op_helper("SelectV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SelectV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor select_v2_eager_fallback(Tensor condition, Tensor t, Tensor e, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { condition, t, e }; + object[] _attrs = new object[] { "T", t.dtype }; + var _result = _execute.execute("SelectV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SelectV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes sigmoid of `x` element-wise. + /// + /// + /// + /// Specifically, `y = 1 / (1 + exp(-x))`. + /// + /// + /// + /// + public static Tensor sigmoid(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sigmoid", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sigmoid_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sigmoid", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sigmoid", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sigmoid_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sigmoid", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sigmoid", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient of the sigmoid of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and + /// `dy` is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor sigmoid_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SigmoidGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sigmoid_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("SigmoidGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SigmoidGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sigmoid_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("SigmoidGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SigmoidGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns an element-wise indication of the sign of a number. + /// + /// + /// + /// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. + /// + /// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. + /// + /// Example usage: + /// >>> tf.math.sign([0., 2., -3.]) + /// + /// + /// + /// + /// + public static Tensor sign(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sign", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sign_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sign", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sign", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sign_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sign", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sign", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes sine of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes sine of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `[-1,1]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) + /// tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] + /// ``` + /// + /// + /// + /// + public static Tensor sin(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sin", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sin_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sin", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sin_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes hyperbolic sine of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes hyperbolic sine of every + /// element in the tensor. Input range is `[-inf,inf]` and output range + /// is `[-inf,inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + /// tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] + /// ``` + /// + /// + /// + /// + public static Tensor sinh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sinh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sinh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sinh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sinh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sinh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sinh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sinh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Generates points from the Sobol sequence. + /// + /// + /// + /// Creates a Sobol sequence with `num_results` samples. Each sample has dimension + /// `dim`. Skips the first `skip` samples. + /// + /// + /// + /// + /// + /// + /// + /// The type of the sample. One of: `float32` or `float64`. + /// + /// + /// + public static Tensor sobol_sample(Tensor dim, Tensor num_results, Tensor skip, TF_DataType dtype = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SobolSample", name) { args = new object[] { dim, num_results, skip }, attrs = new Dictionary() { ["dtype"] = dtype } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sobol_sample_eager_fallback(dim, num_results, skip, dtype: dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["dim"] = dim; + keywords["num_results"] = num_results; + keywords["skip"] = skip; + keywords["dtype"] = dtype; + var _op = tf.OpDefLib._apply_op_helper("SobolSample", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype") }; + _execute.record_gradient("SobolSample", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sobol_sample_eager_fallback(Tensor dim, Tensor num_results, Tensor skip, TF_DataType dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { dim, num_results, skip }; + object[] _attrs = new object[] { "dtype", dtype }; + var _result = _execute.execute("SobolSample", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SobolSample", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Counts the number of occurrences of each value in an integer array. + /// + /// + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// bool; Whether the kernel should count the appearance or number of occurrences. + /// + /// + /// + public static Tensor sparse_bincount(Tensor indices, Tensor values, Tensor dense_shape, Tensor size, Tensor weights, bool binary_output = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseBincount", name) { args = new object[] { indices, values, dense_shape, size, weights }, attrs = new Dictionary() { ["binary_output"] = binary_output } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_bincount_eager_fallback(indices, values, dense_shape, size, weights, binary_output: binary_output, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["indices"] = indices; + keywords["values"] = values; + keywords["dense_shape"] = dense_shape; + keywords["size"] = size; + keywords["weights"] = weights; + keywords["binary_output"] = binary_output; + var _op = tf.OpDefLib._apply_op_helper("SparseBincount", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tidx", _op._get_attr_type("Tidx"), "T", _op._get_attr_type("T"), "binary_output", _op._get_attr_bool("binary_output") }; + _execute.record_gradient("SparseBincount", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_bincount_eager_fallback(Tensor indices, Tensor values, Tensor dense_shape, Tensor size, Tensor weights, bool binary_output, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { indices, values, dense_shape, size, weights }; + object[] _attrs = new object[] { "Tidx", values.dtype, "T", weights.dtype, "binary_output", binary_output }; + var _result = _execute.execute("SparseBincount", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseBincount", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Multiply matrix "a" by matrix "b". + /// + /// + /// + /// The inputs must be two-dimensional matrices and the inner dimension of "a" must + /// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not + /// `SparseTensor`s. This op is optimized for the case where at least one of "a" or + /// "b" is sparse, in the sense that they have a large proportion of zero values. + /// The breakeven for using this versus a dense matrix multiply on one platform was + /// 30% zero values in the sparse matrix. + /// + /// The gradient computation of this operation will only take advantage of sparsity + /// in the input gradient when that gradient comes from a Relu. + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false, bool a_is_sparse = false, bool b_is_sparse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseMatMul", name) { args = new object[] { a, b }, attrs = new Dictionary() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["a_is_sparse"] = a_is_sparse, ["b_is_sparse"] = b_is_sparse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_mat_mul_eager_fallback(a, b, transpose_a: transpose_a, transpose_b: transpose_b, a_is_sparse: a_is_sparse, b_is_sparse: b_is_sparse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["a_is_sparse"] = a_is_sparse; + keywords["b_is_sparse"] = b_is_sparse; + var _op = tf.OpDefLib._apply_op_helper("SparseMatMul", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "a_is_sparse", _op._get_attr_bool("a_is_sparse"), "b_is_sparse", _op._get_attr_bool("b_is_sparse"), "Ta", _op._get_attr_type("Ta"), "Tb", _op._get_attr_type("Tb") }; + _execute.record_gradient("SparseMatMul", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_mat_mul_eager_fallback(Tensor a, Tensor b, bool transpose_a, bool transpose_b, bool a_is_sparse, bool b_is_sparse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b }; + object[] _attrs = new object[] { "transpose_a", transpose_a, "transpose_b", transpose_b, "a_is_sparse", a_is_sparse, "b_is_sparse", b_is_sparse, "Ta", a.dtype, "Tb", b.dtype }; + var _result = _execute.execute("SparseMatMul", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseMatMul", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the mean along sparse segments of a tensor. + /// + /// + /// + /// See `tf.sparse.segment_sum` for usage examples. + /// + /// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first + /// dimension, selecting a subset of dimension 0, specified by `indices`. + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_mean(Tensor data, Tensor indices, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMean", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_mean_eager_fallback(data, indices, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentMean", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentMean", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_mean_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentMean", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentMean", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients for SparseSegmentMean. + /// + /// + /// + /// Returns tensor "output" with same shape as grad, except for dimension 0 whose + /// value is output_dim0. + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_mean_grad(Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMeanGrad", name) { args = new object[] { grad, indices, segment_ids, output_dim0 }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_mean_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["grad"] = grad; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + keywords["output_dim0"] = output_dim0; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentMeanGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentMeanGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_mean_grad_eager_fallback(Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { grad, indices, segment_ids, output_dim0 }; + object[] _attrs = new object[] { "T", grad.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentMeanGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentMeanGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the mean along sparse segments of a tensor. + /// + /// + /// + /// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is + /// missing, the `output` tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_mean_with_num_segments(Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMeanWithNumSegments", name) { args = new object[] { data, indices, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_mean_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentMeanWithNumSegments", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tnumsegments", _op._get_attr_type("Tnumsegments"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentMeanWithNumSegments", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_mean_with_num_segments_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tnumsegments", num_segments.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentMeanWithNumSegments", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. + /// + /// + /// + /// N is the size of the segment being reduced. + /// + /// See `tf.sparse.segment_sum` for usage examples. + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_sqrt_n(Tensor data, Tensor indices, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSqrtN", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_sqrt_n_eager_fallback(data, indices, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentSqrtN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentSqrtN", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_sqrt_n_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentSqrtN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentSqrtN", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along sparse segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first + /// dimension, selecting a subset of dimension 0, specified by `indices`. + /// + /// For example: + /// + /// ```python + /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + /// + /// # Select two rows, one segment. + /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + /// # => [[0 0 0 0]] + /// + /// # Select two rows, two segment. + /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + /// # => [[ 1 2 3 4] + /// # [-1 -2 -3 -4]] + /// + /// # Select all rows, two segments. + /// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + /// # => [[0 0 0 0] + /// # [5 6 7 8]] + /// + /// # Which is equivalent to: + /// tf.segment_sum(c, tf.constant([0, 0, 1])) + /// ``` + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_sum(Tensor data, Tensor indices, Tensor segment_ids, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSum", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_sum_eager_fallback(data, indices, segment_ids, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentSum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentSum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_sum_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentSum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentSum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients for SparseSegmentSum. + /// + /// + /// + /// Returns tensor "output" with same shape as grad, except for dimension 0 whose + /// value is output_dim0. + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_sum_grad(Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSumGrad", name) { args = new object[] { grad, indices, segment_ids, output_dim0 }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_sum_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["grad"] = grad; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + keywords["output_dim0"] = output_dim0; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentSumGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentSumGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_sum_grad_eager_fallback(Tensor grad, Tensor indices, Tensor segment_ids, Tensor output_dim0, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { grad, indices, segment_ids, output_dim0 }; + object[] _attrs = new object[] { "T", grad.dtype, "Tidx", indices.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentSumGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentSumGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along sparse segments of a tensor. + /// + /// + /// + /// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is + /// missing, the `output` tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) + /// for an explanation of segments. + /// + /// For example: + /// + /// ```python + /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + /// + /// tf.sparse_segment_sum_with_num_segments( + /// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + /// # => [[0 0 0 0] + /// # [0 0 0 0] + /// # [0 0 0 0]] + /// + /// tf.sparse_segment_sum_with_num_segments(c, + /// tf.constant([0, 1]), + /// tf.constant([0, 2], + /// num_segments=4)) + /// # => [[ 1 2 3 4] + /// # [ 0 0 0 0] + /// # [-1 -2 -3 -4] + /// # [ 0 0 0 0]] + /// ``` + /// + /// + /// + /// + /// + /// + /// + public static Tensor sparse_segment_sum_with_num_segments(Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSumWithNumSegments", name) { args = new object[] { data, indices, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sparse_segment_sum_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["indices"] = indices; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("SparseSegmentSumWithNumSegments", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx"), "Tnumsegments", _op._get_attr_type("Tnumsegments"), "Tsegmentids", _op._get_attr_type("Tsegmentids") }; + _execute.record_gradient("SparseSegmentSumWithNumSegments", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sparse_segment_sum_with_num_segments_eager_fallback(Tensor data, Tensor indices, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, indices, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tidx", indices.dtype, "Tnumsegments", num_segments.dtype, "Tsegmentids", segment_ids.dtype }; + var _result = _execute.execute("SparseSegmentSumWithNumSegments", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes square root of x element-wise. + /// + /// + /// + /// I.e., \(y = sqrt{x} = x^{1/2}\). + /// + /// + /// + /// + public static Tensor sqrt(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sqrt", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sqrt_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Sqrt", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sqrt", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sqrt_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sqrt", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sqrt", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the sqrt of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor sqrt_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SqrtGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sqrt_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("SqrtGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SqrtGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sqrt_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("SqrtGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SqrtGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes square of x element-wise. + /// + /// + /// + /// I.e., \(y = x * x = x^2\). + /// + /// + /// + /// + public static Tensor square(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Square", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return square_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Square", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Square", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor square_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Square", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Square", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns conj(x - y)(x - y) element-wise. + /// + /// + /// + /// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor squared_difference(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SquaredDifference", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return squared_difference_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("SquaredDifference", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SquaredDifference", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor squared_difference_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("SquaredDifference", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SquaredDifference", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x - y element-wise. + /// + /// + /// + /// *NOTE*: `Sub` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor sub(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sub", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sub_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Sub", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Sub", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sub_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Sub", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sub", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum of elements across dimensions of a tensor. + /// + /// + /// + /// Reduces `input` along the dimensions given in `reduction_indices`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_indices`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// + /// + /// + /// + /// + /// If true, retain reduced dimensions with length 1. + /// + /// + /// + public static Tensor sum(Tensor input, Tensor reduction_indices, bool keep_dims = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sum", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary() { ["keep_dims"] = keep_dims } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return sum_eager_fallback(input, reduction_indices, keep_dims: keep_dims, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["reduction_indices"] = reduction_indices; + keywords["keep_dims"] = keep_dims; + var _op = tf.OpDefLib._apply_op_helper("Sum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "keep_dims", _op._get_attr_bool("keep_dims"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx") }; + _execute.record_gradient("Sum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor sum_eager_fallback(Tensor input, Tensor reduction_indices, bool keep_dims, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, reduction_indices }; + object[] _attrs = new object[] { "keep_dims", keep_dims, "T", input.dtype, "Tidx", reduction_indices.dtype }; + var _result = _execute.execute("Sum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Sum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes tan of x element-wise. + /// + /// + /// + /// Given an input tensor, this function computes tangent of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `(-inf, inf)`. If input lies outside the boundary, `nan` + /// is returned. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] + /// ``` + /// + /// + /// + /// + public static Tensor tan(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tan", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tan_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Tan", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Tan", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tan_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Tan", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Tan", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes hyperbolic tangent of `x` element-wise. + /// + /// + /// + /// Given an input tensor, this function computes hyperbolic tangent of every + /// element in the tensor. Input range is `[-inf, inf]` and + /// output range is `[-1,1]`. + /// + /// >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) + /// >>> tf.math.tanh(x) + /// + /// + /// + /// + /// + /// + public static Tensor tanh(Tensor x, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tanh", name) { args = new object[] { x }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tanh_eager_fallback(x, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + var _op = tf.OpDefLib._apply_op_helper("Tanh", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Tanh", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tanh_eager_fallback(Tensor x, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Tanh", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Tanh", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient for the tanh of `x` wrt its input. + /// + /// + /// + /// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` + /// is the corresponding input gradient. + /// + /// + /// + /// + /// + public static Tensor tanh_grad(Tensor y, Tensor dy, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TanhGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return tanh_grad_eager_fallback(y, dy, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["y"] = y; + keywords["dy"] = dy; + var _op = tf.OpDefLib._apply_op_helper("TanhGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("TanhGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor tanh_grad_eager_fallback(Tensor y, Tensor dy, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y, dy }; + object[] _attrs = new object[] { "T", y.dtype }; + var _result = _execute.execute("TanhGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TanhGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns x / y element-wise for integer types. + /// + /// + /// + /// Truncation designates that negative numbers will round fractional quantities + /// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different + /// than Python semantics. See `FloorDiv` for a division function that matches + /// Python Semantics. + /// + /// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor truncate_div(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TruncateDiv", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return truncate_div_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("TruncateDiv", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("TruncateDiv", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor truncate_div_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("TruncateDiv", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TruncateDiv", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns element-wise remainder of division. This emulates C semantics in that + /// + /// + /// + /// the result here is consistent with a truncating divide. E.g. `truncate(x / y) * + /// y + truncate_mod(x, y) = x`. + /// + /// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// + /// + /// + /// + public static Tensor truncate_mod(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TruncateMod", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return truncate_mod_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("TruncateMod", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("TruncateMod", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor truncate_mod_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("TruncateMod", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TruncateMod", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the maximum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to `tf.math.unsorted_segment_sum`, + /// Instead of computing the sum over segments, it computes the maximum such that: + /// + /// \(output_i = max_{j...} data[j...]\) where max is over tuples `j...` such + /// that `segment_ids[j...] == i`. + /// + /// If the maximum is empty for a given segment ID `i`, it outputs the smallest + /// possible value for the specific numeric type, + /// `output[i] = numeric_limits::lowest()`. + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be less than + /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + /// result in safe but unspecified behavior, which may include ignoring + /// out-of-bound indices or outputting a tensor with a 0 stored in the first + /// dimension of its shape if `num_segments` is 0. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// >>> tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + /// array([[4, 3, 3, 4], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + ///
+ /// + /// + /// + /// + public static Tensor unsorted_segment_max(Tensor data, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentMax", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unsorted_segment_max_eager_fallback(data, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentMax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices"), "Tnumsegments", _op._get_attr_type("Tnumsegments") }; + _execute.record_gradient("UnsortedSegmentMax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unsorted_segment_max_eager_fallback(Tensor data, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype, "Tnumsegments", num_segments.dtype }; + var _result = _execute.execute("UnsortedSegmentMax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnsortedSegmentMax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the minimum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to `tf.math.unsorted_segment_sum`, + /// Instead of computing the sum over segments, it computes the minimum such that: + /// + /// \(output_i = min_{j...} data_[j...]\) where min is over tuples `j...` such + /// that `segment_ids[j...] == i`. + /// + /// If the minimum is empty for a given segment ID `i`, it outputs the largest + /// possible value for the specific numeric type, + /// `output[i] = numeric_limits::max()`. + /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// >>> tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + /// array([[1, 2, 2, 1], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be less than + /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + /// result in safe but unspecified behavior, which may include ignoring + /// out-of-bound indices or outputting a tensor with a 0 stored in the first + /// dimension of its shape if `num_segments` is 0. + /// + /// + /// + /// + /// + /// + public static Tensor unsorted_segment_min(Tensor data, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentMin", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unsorted_segment_min_eager_fallback(data, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentMin", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices"), "Tnumsegments", _op._get_attr_type("Tnumsegments") }; + _execute.record_gradient("UnsortedSegmentMin", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unsorted_segment_min_eager_fallback(Tensor data, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype, "Tnumsegments", num_segments.dtype }; + var _result = _execute.execute("UnsortedSegmentMin", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnsortedSegmentMin", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the product along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to `tf.math.unsorted_segment_sum`, + /// Instead of computing the sum over segments, it computes the product of all + /// entries belonging to a segment such that: + /// + /// \(output_i = prod_{j...} data[j...]\) where the product is over tuples + /// `j...` such that `segment_ids[j...] == i`. + /// + /// For example: + /// + /// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// >>> tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + /// array([[4, 6, 6, 4], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// If there is no entry for a given segment ID `i`, it outputs 1. + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// Caution: On CPU, values in `segment_ids` are always validated to be less than + /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + /// result in safe but unspecified behavior, which may include ignoring + /// out-of-bound indices or outputting a tensor with a 0 stored in the first + /// dimension of its shape if `num_segments` is 0. + /// + /// + /// + /// + /// + /// + /// + public static Tensor unsorted_segment_prod(Tensor data, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentProd", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unsorted_segment_prod_eager_fallback(data, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentProd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices"), "Tnumsegments", _op._get_attr_type("Tnumsegments") }; + _execute.record_gradient("UnsortedSegmentProd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unsorted_segment_prod_eager_fallback(Tensor data, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype, "Tnumsegments", num_segments.dtype }; + var _result = _execute.execute("UnsortedSegmentProd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnsortedSegmentProd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the sum along segments of a tensor. + /// + /// + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \(output[i] = sum_{j...} data[j...]\) where the sum is over tuples `j...` such + /// that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` + /// need not be sorted and need not cover all values in the full + /// range of valid values. + /// + /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. + /// If the given segment ID `i` is negative, the value is dropped and will not be + /// added to the sum of the segment. + /// + /// `num_segments` should equal the number of distinct segment IDs. + /// + /// Caution: On CPU, values in `segment_ids` are always validated to be less than + /// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this + /// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + /// result in safe but unspecified behavior, which may include ignoring + /// out-of-bound indices or outputting a tensor with a 0 stored in the first + /// dimension of its shape if `num_segments` is 0. + /// + ///
+ /// + ///
+ /// + /// >>> c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]] + /// >>> tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy() + /// array([[5, 5, 5, 5], + /// [5, 6, 7, 8]], dtype=int32) + /// + /// + /// + ///
+ /// + /// + /// + /// + public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tensor num_segments, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentSum", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return unsorted_segment_sum_eager_fallback(data, segment_ids, num_segments, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["data"] = data; + keywords["segment_ids"] = segment_ids; + keywords["num_segments"] = num_segments; + var _op = tf.OpDefLib._apply_op_helper("UnsortedSegmentSum", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tindices", _op._get_attr_type("Tindices"), "Tnumsegments", _op._get_attr_type("Tnumsegments") }; + _execute.record_gradient("UnsortedSegmentSum", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor unsorted_segment_sum_eager_fallback(Tensor data, Tensor segment_ids, Tensor num_segments, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { data, segment_ids, num_segments }; + object[] _attrs = new object[] { "T", data.dtype, "Tindices", segment_ids.dtype, "Tnumsegments", num_segments.dtype }; + var _result = _execute.execute("UnsortedSegmentSum", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("UnsortedSegmentSum", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns 0 if x == 0, and x / y otherwise, elementwise. + /// + /// + /// + /// + public static Tensor xdivy(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xdivy", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return xdivy_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Xdivy", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Xdivy", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor xdivy_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Xdivy", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Xdivy", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. + /// + /// + /// + /// + public static Tensor xlog1py(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xlog1py", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return xlog1py_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Xlog1py", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Xlog1py", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor xlog1py_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Xlog1py", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Xlog1py", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns 0 if x == 0, and x * log(y) otherwise, elementwise. + /// + /// + /// + /// + public static Tensor xlogy(Tensor x, Tensor y, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xlogy", name) { args = new object[] { x, y }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return xlogy_eager_fallback(x, y, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["y"] = y; + var _op = tf.OpDefLib._apply_op_helper("Xlogy", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Xlogy", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor xlogy_eager_fallback(Tensor x, Tensor y, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, y }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Xlogy", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Xlogy", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Compute the Hurwitz zeta function \\(\zeta(x, q)\\). + /// + /// + /// + /// The Hurwitz zeta function is defined as: + /// + /// + /// \(zeta(x, q) = sum_{n=0}^{infty} (q + n)^{-x}\) + /// + /// + /// + /// + /// + public static Tensor zeta(Tensor x, Tensor q, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Zeta", name) { args = new object[] { x, q }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return zeta_eager_fallback(x, q, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["q"] = q; + var _op = tf.OpDefLib._apply_op_helper("Zeta", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Zeta", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor zeta_eager_fallback(Tensor x, Tensor q, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, q }; + object[] _attrs = new object[] { "T", x.dtype }; + var _result = _execute.execute("Zeta", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Zeta", _inputs_flat, _attrs, _result); + } + return _result[0]; } } diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs deleted file mode 100644 index 8e6e72d12..000000000 --- a/src/TensorFlowNET.Core/Operations/gen_math_ops.eager.cs +++ /dev/null @@ -1,11 +0,0 @@ -using System; -using static Tensorflow.Binding; - -namespace Tensorflow -{ - public static partial class gen_math_ops - { - public static Tensor mul(IntPtr x, IntPtr y, string name = null) - => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(x, y)); - } -} diff --git a/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs new file mode 100644 index 000000000..c0cec2785 --- /dev/null +++ b/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs @@ -0,0 +1,8084 @@ +/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/ + +using Tensorflow.Eager; +using Tensorflow.Contexts; +using static Tensorflow.Binding; + +namespace Tensorflow; + +public static class gen_nn_ops +{ + /// + /// Returns min/max k values and their indices of the input operand in an approximate manner. + /// + /// + /// + /// See https://arxiv.org/abs/2206.14286 for the algorithm details. + /// This op is only optimized on TPU currently. + /// + /// + /// + /// + /// Specifies the number of min/max-k. + /// + /// + /// Integer dimension along which to search. Default: -1. + /// + /// + /// Recall target for the approximation. Range in (0,1] + /// + /// + /// When true, computes max-k; otherwise computes min-k. + /// + /// + /// + /// When set to a positive value, it overrides the size determined by + /// `input[reduction_dim]` for evaluating the recall. This option is useful when + /// the given `input` is only a subset of the overall computation in SPMD or + /// distributed pipelines, where the true input size cannot be deferred by the + /// `input` shape. + /// + /// + /// + /// + /// When true, aggregates approximate results to top-k. When false, returns the + /// approximate results. The number of the approximate results is implementation + /// defined and is greater equals to the specified `k`. + /// + /// + /// + public static Tensor[] approx_top_k(Tensor input, int k = 0, int reduction_dimension = -1, float recall_target = 0.95f, bool is_max_k = true, int reduction_input_size_override = -1, bool aggregate_to_topk = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ApproxTopK", name) { args = new object[] { input }, attrs = new Dictionary() { ["k"] = k, ["reduction_dimension"] = reduction_dimension, ["recall_target"] = recall_target, ["is_max_k"] = is_max_k, ["reduction_input_size_override"] = reduction_input_size_override, ["aggregate_to_topk"] = aggregate_to_topk } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return approx_top_k_eager_fallback(input, k: k, reduction_dimension: reduction_dimension, recall_target: recall_target, is_max_k: is_max_k, reduction_input_size_override: reduction_input_size_override, aggregate_to_topk: aggregate_to_topk, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["reduction_dimension"] = reduction_dimension; + keywords["recall_target"] = recall_target; + keywords["is_max_k"] = is_max_k; + keywords["reduction_input_size_override"] = reduction_input_size_override; + keywords["aggregate_to_topk"] = aggregate_to_topk; + var _op = tf.OpDefLib._apply_op_helper("ApproxTopK", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "k", _op._get_attr_int("k"), "reduction_dimension", _op._get_attr_int("reduction_dimension"), "recall_target", _op.get_attr("recall_target"), "is_max_k", _op._get_attr_bool("is_max_k"), "reduction_input_size_override", _op._get_attr_int("reduction_input_size_override"), "aggregate_to_topk", _op._get_attr_bool("aggregate_to_topk"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("ApproxTopK", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] approx_top_k_eager_fallback(Tensor input, int k, int reduction_dimension, float recall_target, bool is_max_k, int reduction_input_size_override, bool aggregate_to_topk, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "k", k, "reduction_dimension", reduction_dimension, "recall_target", recall_target, "is_max_k", is_max_k, "reduction_input_size_override", reduction_input_size_override, "aggregate_to_topk", aggregate_to_topk, "T", input.dtype }; + var _result = _execute.execute("ApproxTopK", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ApproxTopK", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Performs average pooling on the input. + /// + /// + /// + /// Each entry in `output` is the mean of the corresponding size `ksize` + /// window in `value`. + /// + /// + /// + /// + /// + /// The size of the sliding window for each dimension of `value`. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of `value`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor avg_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return avg_pool_eager_fallback(value, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("AvgPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AvgPool", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor avg_pool_eager_fallback(Tensor value, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", value.dtype }; + var _result = _execute.execute("AvgPool", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AvgPool", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs 3D average pooling on the input. + /// + /// + /// + /// Each entry in `output` is the mean of the corresponding size `ksize` window in + /// `value`. + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor avg_pool3d(Tensor input, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool3D", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return avg_pool3d_eager_fallback(input, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("AvgPool3D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AvgPool3D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor avg_pool3d_eager_fallback(Tensor input, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", input.dtype }; + var _result = _execute.execute("AvgPool3D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AvgPool3D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of average pooling function. + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor avg_pool3d_grad(Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool3DGrad", name) { args = new object[] { orig_input_shape, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return avg_pool3d_grad_eager_fallback(orig_input_shape, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["orig_input_shape"] = orig_input_shape; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("AvgPool3DGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AvgPool3DGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor avg_pool3d_grad_eager_fallback(Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input_shape, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", grad.dtype }; + var _result = _execute.execute("AvgPool3DGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AvgPool3DGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of the average pooling function. + /// + /// + /// + /// + /// + /// The size of the sliding window for each dimension of the input. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor avg_pool_grad(Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPoolGrad", name) { args = new object[] { orig_input_shape, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return avg_pool_grad_eager_fallback(orig_input_shape, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input_shape"] = orig_input_shape; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("AvgPoolGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("AvgPoolGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor avg_pool_grad_eager_fallback(Tensor orig_input_shape, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input_shape, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", grad.dtype }; + var _result = _execute.execute("AvgPoolGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("AvgPoolGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Batch normalization. + /// + /// + /// + /// This op is deprecated. Prefer `tf.nn.batch_normalization`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// + /// + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// + public static Tensor batch_norm_with_global_normalization(Tensor t, Tensor m, Tensor v, Tensor beta, Tensor gamma, float variance_epsilon, bool scale_after_normalization, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchNormWithGlobalNormalization", name) { args = new object[] { t, m, v, beta, gamma }, attrs = new Dictionary() { ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return batch_norm_with_global_normalization_eager_fallback(t, m, v, beta, gamma, variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["t"] = t; + keywords["m"] = m; + keywords["v"] = v; + keywords["beta"] = beta; + keywords["gamma"] = gamma; + keywords["variance_epsilon"] = variance_epsilon; + keywords["scale_after_normalization"] = scale_after_normalization; + var _op = tf.OpDefLib._apply_op_helper("BatchNormWithGlobalNormalization", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "variance_epsilon", _op.get_attr("variance_epsilon"), "scale_after_normalization", _op._get_attr_bool("scale_after_normalization") }; + _execute.record_gradient("BatchNormWithGlobalNormalization", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor batch_norm_with_global_normalization_eager_fallback(Tensor t, Tensor m, Tensor v, Tensor beta, Tensor gamma, float variance_epsilon, bool scale_after_normalization, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { t, m, v, beta, gamma }; + object[] _attrs = new object[] { "T", t.dtype, "variance_epsilon", variance_epsilon, "scale_after_normalization", scale_after_normalization }; + var _result = _execute.execute("BatchNormWithGlobalNormalization", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Gradients for batch normalization. + /// + /// + /// + /// This op is deprecated. See `tf.nn.batch_normalization`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// + /// + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// + public static Tensor[] batch_norm_with_global_normalization_grad(Tensor t, Tensor m, Tensor v, Tensor gamma, Tensor backprop, float variance_epsilon, bool scale_after_normalization, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchNormWithGlobalNormalizationGrad", name) { args = new object[] { t, m, v, gamma, backprop }, attrs = new Dictionary() { ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return batch_norm_with_global_normalization_grad_eager_fallback(t, m, v, gamma, backprop, variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["t"] = t; + keywords["m"] = m; + keywords["v"] = v; + keywords["gamma"] = gamma; + keywords["backprop"] = backprop; + keywords["variance_epsilon"] = variance_epsilon; + keywords["scale_after_normalization"] = scale_after_normalization; + var _op = tf.OpDefLib._apply_op_helper("BatchNormWithGlobalNormalizationGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "variance_epsilon", _op.get_attr("variance_epsilon"), "scale_after_normalization", _op._get_attr_bool("scale_after_normalization") }; + _execute.record_gradient("BatchNormWithGlobalNormalizationGrad", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] batch_norm_with_global_normalization_grad_eager_fallback(Tensor t, Tensor m, Tensor v, Tensor gamma, Tensor backprop, float variance_epsilon, bool scale_after_normalization, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { t, m, v, gamma, backprop }; + object[] _attrs = new object[] { "T", t.dtype, "variance_epsilon", variance_epsilon, "scale_after_normalization", scale_after_normalization }; + var _result = _execute.execute("BatchNormWithGlobalNormalizationGrad", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Adds `bias` to `value`. + /// + /// + /// + /// This is a special case of `tf.add` where `bias` is restricted to be 1-D. + /// Broadcasting is supported, so `value` may have any number of dimensions. + /// + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the bias tensor will be added to the last dimension + /// of the value tensor. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// The tensor will be added to "in_channels", the third-to-the-last + /// dimension. + /// + /// + /// + public static Tensor bias_add(Tensor value, Tensor bias, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAdd", name) { args = new object[] { value, bias }, attrs = new Dictionary() { ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bias_add_eager_fallback(value, bias, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["bias"] = bias; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("BiasAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("BiasAdd", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bias_add_eager_fallback(Tensor value, Tensor bias, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value, bias }; + object[] _attrs = new object[] { "T", value.dtype, "data_format", data_format }; + var _result = _execute.execute("BiasAdd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BiasAdd", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// The backward operation for "BiasAdd" on the "bias" tensor. + /// + /// + /// + /// It accumulates all the values from out_backprop into the feature dimension. + /// For NHWC data format, the feature dimension is the last. For NCHW data format, + /// the feature dimension is the third-to-last. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the bias tensor will be added to the last dimension + /// of the value tensor. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// The tensor will be added to "in_channels", the third-to-the-last + /// dimension. + /// + /// + /// + public static Tensor bias_add_grad(Tensor out_backprop, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAddGrad", name) { args = new object[] { out_backprop }, attrs = new Dictionary() { ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bias_add_grad_eager_fallback(out_backprop, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["out_backprop"] = out_backprop; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("BiasAddGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("BiasAddGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bias_add_grad_eager_fallback(Tensor out_backprop, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { out_backprop }; + object[] _attrs = new object[] { "T", out_backprop.dtype, "data_format", data_format }; + var _result = _execute.execute("BiasAddGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BiasAddGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Adds `bias` to `value`. + /// + /// + /// + /// This is a deprecated version of BiasAdd and will be soon removed. + /// + /// This is a special case of `tf.add` where `bias` is restricted to be 1-D. + /// Broadcasting is supported, so `value` may have any number of dimensions. + /// + /// + /// + /// + /// + public static Tensor bias_add_v1(Tensor value, Tensor bias, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAddV1", name) { args = new object[] { value, bias }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return bias_add_v1_eager_fallback(value, bias, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["bias"] = bias; + var _op = tf.OpDefLib._apply_op_helper("BiasAddV1", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("BiasAddV1", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor bias_add_v1_eager_fallback(Tensor value, Tensor bias, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value, bias }; + object[] _attrs = new object[] { "T", value.dtype }; + var _result = _execute.execute("BiasAddV1", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("BiasAddV1", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes a 2-D convolution given 4-D `input` and `filter` tensors. + /// + /// + /// + /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + /// and a filter / kernel tensor of shape + /// `[filter_height, filter_width, in_channels, out_channels]`, this op + /// performs the following: + /// + /// 1. Flattens the filter to a 2-D matrix with shape + /// `[filter_height * filter_width * in_channels, output_channels]`. + /// 2. Extracts image patches from the input tensor to form a *virtual* + /// tensor of shape `[batch, out_height, out_width, + /// filter_height * filter_width * in_channels]`. + /// 3. For each patch, right-multiplies the filter matrix and the image patch + /// vector. + /// + /// In detail, with the default NHWC format, + /// + /// output[b, i, j, k] = + /// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + /// filter[di, dj, q, k] + /// + /// Must have `strides[0] = strides[3] = 1`. For the most common case of the same + /// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 4. The stride of the sliding window for each + /// dimension of `input`. The dimension order is determined by the value of + /// `data_format`, see below for details. + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor conv2d(Tensor input, Tensor filter, int[] strides, string padding, bool use_cudnn_on_gpu = true, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv2d_eager_fallback(input, filter, strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["strides"] = strides; + keywords["use_cudnn_on_gpu"] = use_cudnn_on_gpu; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "use_cudnn_on_gpu", _op._get_attr_bool("use_cudnn_on_gpu"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv2D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv2d_eager_fallback(Tensor input, Tensor filter, int[] strides, bool use_cudnn_on_gpu, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv2D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv2D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of convolution with respect to the filter. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// of the convolution. Must be in the same order as the dimension specified with + /// format. + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor conv2d_backprop_filter(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, bool use_cudnn_on_gpu = true, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2DBackpropFilter", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv2d_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter_sizes"] = filter_sizes; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["use_cudnn_on_gpu"] = use_cudnn_on_gpu; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropFilter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "use_cudnn_on_gpu", _op._get_attr_bool("use_cudnn_on_gpu"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv2DBackpropFilter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv2d_backprop_filter_eager_fallback(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, bool use_cudnn_on_gpu, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter_sizes, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv2DBackpropFilter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv2DBackpropFilter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of convolution with respect to the input. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// of the convolution. Must be in the same order as the dimension specified with + /// format. + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor conv2d_backprop_input(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, bool use_cudnn_on_gpu = true, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2DBackpropInput", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv2d_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides: strides, use_cudnn_on_gpu: use_cudnn_on_gpu, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input_sizes"] = input_sizes; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["use_cudnn_on_gpu"] = use_cudnn_on_gpu; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv2DBackpropInput", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "use_cudnn_on_gpu", _op._get_attr_bool("use_cudnn_on_gpu"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv2DBackpropInput", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv2d_backprop_input_eager_fallback(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, bool use_cudnn_on_gpu, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_sizes, filter, out_backprop }; + object[] _attrs = new object[] { "T", filter.dtype, "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv2DBackpropInput", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv2DBackpropInput", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes a 3-D convolution given 5-D `input` and `filter` tensors. + /// + /// + /// + /// In signal processing, cross-correlation is a measure of similarity of + /// two waveforms as a function of a time-lag applied to one of them. This + /// is also known as a sliding dot product or sliding inner-product. + /// + /// Our Conv3D implements a form of cross-correlation. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor conv3d(Tensor input, Tensor filter, int[] strides, string padding, string data_format = "NDHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_eager_fallback(input, filter, strides: strides, padding: padding, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv3D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_eager_fallback(Tensor input, Tensor filter, int[] strides, string padding, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv3D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of 3-D convolution with respect to the filter. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + public static Tensor conv3d_backprop_filter(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropFilter", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_backprop_filter_eager_fallback(input, filter, out_backprop, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3DBackpropFilter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv3DBackpropFilter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_backprop_filter_eager_fallback(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("Conv3DBackpropFilter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3DBackpropFilter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of 3-D convolution with respect to the filter. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor conv3d_backprop_filter_v2(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, string data_format = "NDHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropFilterV2", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_backprop_filter_v2_eager_fallback(input, filter_sizes, out_backprop, strides: strides, padding: padding, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter_sizes"] = filter_sizes; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3DBackpropFilterV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv3DBackpropFilterV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_backprop_filter_v2_eager_fallback(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter_sizes, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("Conv3DBackpropFilterV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of 3-D convolution with respect to the input. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + public static Tensor conv3d_backprop_input(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropInput", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_backprop_input_eager_fallback(input, filter, out_backprop, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3DBackpropInput", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("Conv3DBackpropInput", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_backprop_input_eager_fallback(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("Conv3DBackpropInput", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3DBackpropInput", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of 3-D convolution with respect to the input. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + /// + /// 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor conv3d_backprop_input_v2(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, string data_format = "NDHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropInputV2", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return conv3d_backprop_input_v2_eager_fallback(input_sizes, filter, out_backprop, strides: strides, padding: padding, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input_sizes"] = input_sizes; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("Conv3DBackpropInputV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations"), "Tshape", _op._get_attr_type("Tshape") }; + _execute.record_gradient("Conv3DBackpropInputV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor conv3d_backprop_input_v2_eager_fallback(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_sizes, filter, out_backprop }; + object[] _attrs = new object[] { "T", filter.dtype, "strides", strides, "padding", padding, "data_format", data_format, "dilations", dilations, "Tshape", input_sizes.dtype }; + var _result = _execute.execute("Conv3DBackpropInputV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Conv3DBackpropInputV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Returns the dimension index in the destination data format given the one in + /// + /// + /// + /// the source data format. + /// + /// + /// + /// + /// + /// source data format. + /// + /// + /// + /// + /// destination data format. + /// + /// + /// + public static Tensor data_format_dim_map(Tensor x, string src_format = "NHWC", string dst_format = "NCHW", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DataFormatDimMap", name) { args = new object[] { x }, attrs = new Dictionary() { ["src_format"] = src_format, ["dst_format"] = dst_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return data_format_dim_map_eager_fallback(x, src_format: src_format, dst_format: dst_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (src_format is null) + { + src_format = "NHWC"; + } + if (dst_format is null) + { + dst_format = "NCHW"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["src_format"] = src_format; + keywords["dst_format"] = dst_format; + var _op = tf.OpDefLib._apply_op_helper("DataFormatDimMap", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "src_format", _op.get_attr("src_format"), "dst_format", _op.get_attr("dst_format") }; + _execute.record_gradient("DataFormatDimMap", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor data_format_dim_map_eager_fallback(Tensor x, string src_format, string dst_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "src_format", src_format, "dst_format", dst_format }; + var _result = _execute.execute("DataFormatDimMap", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DataFormatDimMap", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Permute input tensor from `src_format` to `dst_format`. + /// + /// + /// + /// Given source and destination format strings of length n=4 or 5, the input + /// tensor must be a vector of size n or n-2, or a 2D tensor of shape + /// (n, 2) or (n-2, 2). + /// + /// If the first dimension of the input tensor is n-2, it is assumed that + /// non-spatial dimensions are omitted (i.e `N`, `C`). + /// + /// For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and input: + /// ``` + /// [1, 2, 3, 4] + /// ``` + /// , the output will be: + /// ``` + /// [1, 4, 2, 3] + /// ``` + /// With `src_format` of `NDHWC`, `dst_format` of `NCDHW`, and input: + /// ``` + /// [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]] + /// ``` + /// , the output will be: + /// ``` + /// [[1, 6], [5, 10], [2, 7], [3, 8], [4, 9]] + /// ``` + /// With `src_format` of `NHWC`, `dst_format` of `NCHW`, and input: + /// ``` + /// [1, 2] + /// ``` + /// , the output will be: + /// ``` + /// [1, 2] + /// ``` + /// + /// + /// + /// + /// + /// source data format. + /// + /// + /// + /// + /// destination data format. + /// + /// + /// + public static Tensor data_format_vec_permute(Tensor x, string src_format = "NHWC", string dst_format = "NCHW", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DataFormatVecPermute", name) { args = new object[] { x }, attrs = new Dictionary() { ["src_format"] = src_format, ["dst_format"] = dst_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return data_format_vec_permute_eager_fallback(x, src_format: src_format, dst_format: dst_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (src_format is null) + { + src_format = "NHWC"; + } + if (dst_format is null) + { + dst_format = "NCHW"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["src_format"] = src_format; + keywords["dst_format"] = dst_format; + var _op = tf.OpDefLib._apply_op_helper("DataFormatVecPermute", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "src_format", _op.get_attr("src_format"), "dst_format", _op.get_attr("dst_format") }; + _execute.record_gradient("DataFormatVecPermute", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor data_format_vec_permute_eager_fallback(Tensor x, string src_format, string dst_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x }; + object[] _attrs = new object[] { "T", x.dtype, "src_format", src_format, "dst_format", dst_format }; + var _result = _execute.execute("DataFormatVecPermute", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DataFormatVecPermute", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. + /// + /// + /// + /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + /// and a filter / kernel tensor of shape + /// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + /// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies + /// a different filter to each input channel (expanding from 1 channel to + /// `channel_multiplier` channels for each), then concatenates the results + /// together. Thus, the output has `in_channels * channel_multiplier` channels. + /// + /// ``` + /// for k in 0..in_channels-1 + /// for q in 0..channel_multiplier-1 + /// output[b, i, j, k * channel_multiplier + q] = + /// sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * + /// filter[di, dj, k, q] + /// ``` + /// + /// Must have `strides[0] = strides[3] = 1`. For the most common case of the same + /// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + /// + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor depthwise_conv2d_native(Tensor input, Tensor filter, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNative", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return depthwise_conv2d_native_eager_fallback(input, filter, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNative", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("DepthwiseConv2dNative", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor depthwise_conv2d_native_eager_fallback(Tensor input, Tensor filter, int[] strides, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("DepthwiseConv2dNative", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DepthwiseConv2dNative", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of depthwise convolution with respect to the filter. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// of the convolution. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor depthwise_conv2d_native_backprop_filter(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNativeBackpropFilter", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return depthwise_conv2d_native_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter_sizes"] = filter_sizes; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNativeBackpropFilter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("DepthwiseConv2dNativeBackpropFilter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor depthwise_conv2d_native_backprop_filter_eager_fallback(Tensor input, Tensor filter_sizes, Tensor out_backprop, int[] strides, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter_sizes, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("DepthwiseConv2dNativeBackpropFilter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradients of depthwise convolution with respect to the input. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// of the convolution. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// + /// + public static Tensor depthwise_conv2d_native_backprop_input(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNativeBackpropInput", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return depthwise_conv2d_native_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input_sizes"] = input_sizes; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("DepthwiseConv2dNativeBackpropInput", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("DepthwiseConv2dNativeBackpropInput", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor depthwise_conv2d_native_backprop_input_eager_fallback(Tensor input_sizes, Tensor filter, Tensor out_backprop, int[] strides, string padding, int[] explicit_paddings, string data_format, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input_sizes, filter, out_backprop }; + object[] _attrs = new object[] { "T", filter.dtype, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "dilations", dilations }; + var _result = _execute.execute("DepthwiseConv2dNativeBackpropInput", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. + /// + /// + /// + /// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the + /// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each + /// input channel is processed independently of the others with its own structuring + /// function. The `output` tensor has shape + /// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + /// tensor depend on the `padding` algorithm. We currently only support the default + /// "NHWC" `data_format`. + /// + /// In detail, the grayscale morphological 2-D dilation is the max-sum correlation + /// (for consistency with `conv2d`, we use unmirrored filters): + /// + /// output[b, y, x, c] = + /// max_{dy, dx} input[b, + /// strides[1] * y + rates[1] * dy, + /// strides[2] * x + rates[2] * dx, + /// c] + + /// filter[dy, dx, c] + /// + /// Max-pooling is a special case when the filter has size equal to the pooling + /// kernel size and contains all zeros. + /// + /// Note on duality: The dilation of `input` by the `filter` is equal to the + /// negation of the erosion of `-input` by the reflected `filter`. + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// + /// + /// + /// + /// The input stride for atrous morphological dilation. Must be: + /// `[1, rate_height, rate_width, 1]`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor dilation2d(Tensor input, Tensor filter, int[] strides, int[] rates, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2D", name) { args = new object[] { input, filter }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dilation2d_eager_fallback(input, filter, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["strides"] = strides; + keywords["rates"] = rates; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("Dilation2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("Dilation2D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dilation2d_eager_fallback(Tensor input, Tensor filter, int[] strides, int[] rates, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "rates", rates, "padding", padding }; + var _result = _execute.execute("Dilation2D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Dilation2D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient of morphological 2-D dilation with respect to the filter. + /// + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension of + /// the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// + /// + /// + /// + /// 1-D of length 4. The input stride for atrous morphological dilation. + /// Must be: `[1, rate_height, rate_width, 1]`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor dilation2d_backprop_filter(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2DBackpropFilter", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dilation2d_backprop_filter_eager_fallback(input, filter, out_backprop, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["rates"] = rates; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("Dilation2DBackpropFilter", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("Dilation2DBackpropFilter", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dilation2d_backprop_filter_eager_fallback(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "rates", rates, "padding", padding }; + var _result = _execute.execute("Dilation2DBackpropFilter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Dilation2DBackpropFilter", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the gradient of morphological 2-D dilation with respect to the input. + /// + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension of + /// the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// + /// + /// + /// + /// 1-D of length 4. The input stride for atrous morphological dilation. + /// Must be: `[1, rate_height, rate_width, 1]`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor dilation2d_backprop_input(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2DBackpropInput", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return dilation2d_backprop_input_eager_fallback(input, filter, out_backprop, strides: strides, rates: rates, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["out_backprop"] = out_backprop; + keywords["strides"] = strides; + keywords["rates"] = rates; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("Dilation2DBackpropInput", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("Dilation2DBackpropInput", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor dilation2d_backprop_input_eager_fallback(Tensor input, Tensor filter, Tensor out_backprop, int[] strides, int[] rates, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, out_backprop }; + object[] _attrs = new object[] { "T", input.dtype, "strides", strides, "rates", rates, "padding", padding }; + var _result = _execute.execute("Dilation2DBackpropInput", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Dilation2DBackpropInput", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes the exponential linear function. + /// + /// + /// + /// The ELU function is defined as: + /// + /// * $ e ^ x - 1 $ if $ x < 0 $ + /// * $ x $ if $ x >= 0 $ + /// + /// Examples: + /// + /// >>> tf.nn.elu(1.0) + /// + /// >>> tf.nn.elu(0.0) + /// + /// >>> tf.nn.elu(-1000.0) + /// + /// + /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + /// ](http://arxiv.org/abs/1511.07289) + /// + /// + /// + /// + public static Tensor elu(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Elu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return elu_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Elu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Elu", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor elu_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Elu", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Elu", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients for the exponential linear (Elu) operation. + /// + /// + /// + /// + public static Tensor elu_grad(Tensor gradients, Tensor outputs, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EluGrad", name) { args = new object[] { gradients, outputs }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return elu_grad_eager_fallback(gradients, outputs, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["outputs"] = outputs; + var _op = tf.OpDefLib._apply_op_helper("EluGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("EluGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor elu_grad_eager_fallback(Tensor gradients, Tensor outputs, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, outputs }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("EluGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("EluGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs fractional average pooling on the input. + /// + /// + /// + /// Fractional average pooling is similar to Fractional max pooling in the pooling + /// region generation step. The only difference is that after pooling regions are + /// generated, a mean operation is performed instead of a max operation in each + /// pooling region. + /// + /// + /// + /// + /// + /// Pooling ratio for each dimension of `value`, currently only + /// supports row and col dimension and should be >= 1.0. For example, a valid + /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + /// must be 1.0 because we don't allow pooling on batch and channels + /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + /// respectively. + /// + /// + /// + /// + /// When set to True, generates the pooling sequence in a + /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + /// difference between pseudorandom and random. + /// + /// + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [41/3, 26/3] for fractional avg pooling. + /// + /// + /// + /// + /// When set to True, a fixed pooling region will be used when + /// iterating over a FractionalAvgPool node in the computation graph. Mainly used + /// in unit test to make FractionalAvgPool deterministic. + /// + /// + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// + public static Tensor[] fractional_avg_pool(Tensor value, float[] pooling_ratio, bool pseudo_random = false, bool overlapping = false, bool deterministic = false, int seed = 0, int seed2 = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalAvgPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["pooling_ratio"] = pooling_ratio, ["pseudo_random"] = pseudo_random, ["overlapping"] = overlapping, ["deterministic"] = deterministic, ["seed"] = seed, ["seed2"] = seed2 } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fractional_avg_pool_eager_fallback(value, pooling_ratio: pooling_ratio, pseudo_random: pseudo_random, overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["pooling_ratio"] = pooling_ratio; + keywords["pseudo_random"] = pseudo_random; + keywords["overlapping"] = overlapping; + keywords["deterministic"] = deterministic; + keywords["seed"] = seed; + keywords["seed2"] = seed2; + var _op = tf.OpDefLib._apply_op_helper("FractionalAvgPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random", _op._get_attr_bool("pseudo_random"), "overlapping", _op._get_attr_bool("overlapping"), "deterministic", _op._get_attr_bool("deterministic"), "seed", _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("FractionalAvgPool", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fractional_avg_pool_eager_fallback(Tensor value, float[] pooling_ratio, bool pseudo_random, bool overlapping, bool deterministic, int seed, int seed2, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value }; + object[] _attrs = new object[] { "pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random, "overlapping", overlapping, "deterministic", deterministic, "seed", seed, "seed2", seed2, "T", value.dtype }; + var _result = _execute.execute("FractionalAvgPool", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FractionalAvgPool", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes gradient of the FractionalAvgPool function. + /// + /// + /// + /// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for + /// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of + /// out_backprop to those indices that form the same pooling cell. Therefore, we + /// just need to know the shape of original input tensor, instead of the whole + /// tensor. + /// + /// + /// + /// + /// + /// + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [41/3, 26/3] for fractional avg pooling. + /// + /// + /// + public static Tensor fractional_avg_pool_grad(Tensor orig_input_tensor_shape, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool overlapping = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalAvgPoolGrad", name) { args = new object[] { orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence }, attrs = new Dictionary() { ["overlapping"] = overlapping } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fractional_avg_pool_grad_eager_fallback(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping: overlapping, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["orig_input_tensor_shape"] = orig_input_tensor_shape; + keywords["out_backprop"] = out_backprop; + keywords["row_pooling_sequence"] = row_pooling_sequence; + keywords["col_pooling_sequence"] = col_pooling_sequence; + keywords["overlapping"] = overlapping; + var _op = tf.OpDefLib._apply_op_helper("FractionalAvgPoolGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "overlapping", _op._get_attr_bool("overlapping"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("FractionalAvgPoolGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fractional_avg_pool_grad_eager_fallback(Tensor orig_input_tensor_shape, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool overlapping, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence }; + object[] _attrs = new object[] { "overlapping", overlapping, "T", out_backprop.dtype }; + var _result = _execute.execute("FractionalAvgPoolGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FractionalAvgPoolGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs fractional max pooling on the input. + /// + /// + /// + /// Fractional max pooling is slightly different than regular max pooling. In + /// regular max pooling, you downsize an input set by taking the maximum value of + /// smaller N x N subsections of the set (often 2x2), and try to reduce the set by + /// a factor of N, where N is an integer. Fractional max pooling, as you might + /// expect from the word "fractional", means that the overall reduction ratio N + /// does not have to be an integer. + /// + /// The sizes of the pooling regions are generated randomly but are fairly uniform. + /// For example, let's look at the height dimension, and the constraints on the + /// list of rows that will be pool boundaries. + /// + /// First we define the following: + /// + /// 1. input_row_length : the number of rows from the input set + /// 2. output_row_length : which will be smaller than the input + /// 3. alpha = input_row_length / output_row_length : our reduction ratio + /// 4. K = floor(alpha) + /// 5. row_pooling_sequence : this is the result list of pool boundary rows + /// + /// Then, row_pooling_sequence should satisfy: + /// + /// 1. a[0] = 0 : the first value of the sequence is 0 + /// 2. a[end] = input_row_length : the last value of the sequence is the size + /// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size + /// 4. length(row_pooling_sequence) = output_row_length+1 + /// + /// For more details on fractional max pooling, see this paper: + /// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) + /// + /// + /// + /// + /// + /// Pooling ratio for each dimension of `value`, currently only + /// supports row and col dimension and should be >= 1.0. For example, a valid + /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + /// must be 1.0 because we don't allow pooling on batch and channels + /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + /// respectively. + /// + /// + /// + /// + /// When set to True, generates the pooling sequence in a + /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + /// difference between pseudorandom and random. + /// + /// + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [20, 16] for fractional max pooling. + /// + /// + /// + /// + /// When set to True, a fixed pooling region will be used when + /// iterating over a FractionalMaxPool node in the computation graph. Mainly used + /// in unit test to make FractionalMaxPool deterministic. + /// + /// + /// + /// + /// If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// + /// + /// + /// + /// An second seed to avoid seed collision. + /// + /// + /// + public static Tensor[] fractional_max_pool(Tensor value, float[] pooling_ratio, bool pseudo_random = false, bool overlapping = false, bool deterministic = false, int seed = 0, int seed2 = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalMaxPool", name) { args = new object[] { value }, attrs = new Dictionary() { ["pooling_ratio"] = pooling_ratio, ["pseudo_random"] = pseudo_random, ["overlapping"] = overlapping, ["deterministic"] = deterministic, ["seed"] = seed, ["seed2"] = seed2 } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fractional_max_pool_eager_fallback(value, pooling_ratio: pooling_ratio, pseudo_random: pseudo_random, overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["value"] = value; + keywords["pooling_ratio"] = pooling_ratio; + keywords["pseudo_random"] = pseudo_random; + keywords["overlapping"] = overlapping; + keywords["deterministic"] = deterministic; + keywords["seed"] = seed; + keywords["seed2"] = seed2; + var _op = tf.OpDefLib._apply_op_helper("FractionalMaxPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random", _op._get_attr_bool("pseudo_random"), "overlapping", _op._get_attr_bool("overlapping"), "deterministic", _op._get_attr_bool("deterministic"), "seed", _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("FractionalMaxPool", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fractional_max_pool_eager_fallback(Tensor value, float[] pooling_ratio, bool pseudo_random, bool overlapping, bool deterministic, int seed, int seed2, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { value }; + object[] _attrs = new object[] { "pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random, "overlapping", overlapping, "deterministic", deterministic, "seed", seed, "seed2", seed2, "T", value.dtype }; + var _result = _execute.execute("FractionalMaxPool", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FractionalMaxPool", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes gradient of the FractionalMaxPool function. + /// + /// + /// + /// + /// + /// + /// + /// + /// When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [20, 16] for fractional max pooling. + /// + /// + /// + public static Tensor fractional_max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool overlapping = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalMaxPoolGrad", name) { args = new object[] { orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence }, attrs = new Dictionary() { ["overlapping"] = overlapping } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fractional_max_pool_grad_eager_fallback(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping: overlapping, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["out_backprop"] = out_backprop; + keywords["row_pooling_sequence"] = row_pooling_sequence; + keywords["col_pooling_sequence"] = col_pooling_sequence; + keywords["overlapping"] = overlapping; + var _op = tf.OpDefLib._apply_op_helper("FractionalMaxPoolGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "overlapping", _op._get_attr_bool("overlapping"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("FractionalMaxPoolGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fractional_max_pool_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor out_backprop, Tensor row_pooling_sequence, Tensor col_pooling_sequence, bool overlapping, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence }; + object[] _attrs = new object[] { "overlapping", overlapping, "T", orig_input.dtype }; + var _result = _execute.execute("FractionalMaxPoolGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FractionalMaxPoolGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// + /// The data format for x and y. Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon = 0.0001f, float exponential_avg_factor = 1f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNorm", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_eager_fallback(x, scale, offset, mean, variance, epsilon: epsilon, exponential_avg_factor: exponential_avg_factor, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["scale"] = scale; + keywords["offset"] = offset; + keywords["mean"] = mean; + keywords["variance"] = variance; + keywords["epsilon"] = epsilon; + keywords["exponential_avg_factor"] = exponential_avg_factor; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNorm", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "epsilon", _op.get_attr("epsilon"), "exponential_avg_factor", _op.get_attr("exponential_avg_factor"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNorm", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_eager_fallback(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon, float exponential_avg_factor, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, scale, offset, mean, variance }; + object[] _attrs = new object[] { "T", x.dtype, "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNorm", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNorm", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Gradient for batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_grad(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float epsilon = 0.0001f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGrad", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_grad_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon: epsilon, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["y_backprop"] = y_backprop; + keywords["x"] = x; + keywords["scale"] = scale; + keywords["reserve_space_1"] = reserve_space_1; + keywords["reserve_space_2"] = reserve_space_2; + keywords["epsilon"] = epsilon; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "epsilon", _op.get_attr("epsilon"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormGrad", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_grad_eager_fallback(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float epsilon, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }; + object[] _attrs = new object[] { "T", y_backprop.dtype, "epsilon", epsilon, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormGrad", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormGrad", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Gradient for batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_grad_v2(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float epsilon = 0.0001f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGradV2", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_grad_v2_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon: epsilon, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["y_backprop"] = y_backprop; + keywords["x"] = x; + keywords["scale"] = scale; + keywords["reserve_space_1"] = reserve_space_1; + keywords["reserve_space_2"] = reserve_space_2; + keywords["epsilon"] = epsilon; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormGradV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), "epsilon", _op.get_attr("epsilon"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormGradV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_grad_v2_eager_fallback(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, float epsilon, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }; + object[] _attrs = new object[] { "T", y_backprop.dtype, "U", reserve_space_1.dtype, "epsilon", epsilon, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormGradV2", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormGradV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Gradient for batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_grad_v3(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, Tensor reserve_space_3, float epsilon = 0.0001f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGradV3", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3 }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_grad_v3_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon: epsilon, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["y_backprop"] = y_backprop; + keywords["x"] = x; + keywords["scale"] = scale; + keywords["reserve_space_1"] = reserve_space_1; + keywords["reserve_space_2"] = reserve_space_2; + keywords["reserve_space_3"] = reserve_space_3; + keywords["epsilon"] = epsilon; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormGradV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), "epsilon", _op.get_attr("epsilon"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormGradV3", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_grad_v3_eager_fallback(Tensor y_backprop, Tensor x, Tensor scale, Tensor reserve_space_1, Tensor reserve_space_2, Tensor reserve_space_3, float epsilon, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3 }; + object[] _attrs = new object[] { "T", y_backprop.dtype, "U", reserve_space_1.dtype, "epsilon", epsilon, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormGradV3", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormGradV3", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// + /// The data format for x and y. Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_v2(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon = 0.0001f, float exponential_avg_factor = 1f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormV2", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_v2_eager_fallback(x, scale, offset, mean, variance, epsilon: epsilon, exponential_avg_factor: exponential_avg_factor, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["scale"] = scale; + keywords["offset"] = offset; + keywords["mean"] = mean; + keywords["variance"] = variance; + keywords["epsilon"] = epsilon; + keywords["exponential_avg_factor"] = exponential_avg_factor; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), "epsilon", _op.get_attr("epsilon"), "exponential_avg_factor", _op.get_attr("exponential_avg_factor"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_v2_eager_fallback(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon, float exponential_avg_factor, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, scale, offset, mean, variance }; + object[] _attrs = new object[] { "T", x.dtype, "U", scale.dtype, "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormV2", 5, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormV2", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Batch normalization. + /// + /// + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number added to the variance of x. + /// + /// + /// + /// + /// + /// The data format for x and y. Either "NHWC" (default) or "NCHW". + /// + /// + /// + /// + /// A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// + /// + public static Tensor[] fused_batch_norm_v3(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon = 0.0001f, float exponential_avg_factor = 1f, string data_format = "NHWC", bool is_training = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormV3", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return fused_batch_norm_v3_eager_fallback(x, scale, offset, mean, variance, epsilon: epsilon, exponential_avg_factor: exponential_avg_factor, data_format: data_format, is_training: is_training, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["x"] = x; + keywords["scale"] = scale; + keywords["offset"] = offset; + keywords["mean"] = mean; + keywords["variance"] = variance; + keywords["epsilon"] = epsilon; + keywords["exponential_avg_factor"] = exponential_avg_factor; + keywords["data_format"] = data_format; + keywords["is_training"] = is_training; + var _op = tf.OpDefLib._apply_op_helper("FusedBatchNormV3", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), "epsilon", _op.get_attr("epsilon"), "exponential_avg_factor", _op.get_attr("exponential_avg_factor"), "data_format", _op.get_attr("data_format"), "is_training", _op._get_attr_bool("is_training") }; + _execute.record_gradient("FusedBatchNormV3", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] fused_batch_norm_v3_eager_fallback(Tensor x, Tensor scale, Tensor offset, Tensor mean, Tensor variance, float epsilon, float exponential_avg_factor, string data_format, bool is_training, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { x, scale, offset, mean, variance }; + object[] _attrs = new object[] { "T", x.dtype, "U", scale.dtype, "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, "data_format", data_format, "is_training", is_training }; + var _result = _execute.execute("FusedBatchNormV3", 6, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedBatchNormV3", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Performs a padding as a preprocess during a convolution. + /// + /// + /// + /// Similar to FusedResizeAndPadConv2d, this op allows for an optimized + /// implementation where the spatial padding transformation stage is fused with the + /// im2col lookup, but in this case without the bilinear filtering required for + /// resizing. Fusing the padding prevents the need to write out the intermediate + /// results as whole tensors, reducing memory pressure, and we can get some latency + /// gains by merging the transformation calculations. + /// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' + /// order is used instead. + /// Internally this op uses a single per-graph scratch buffer, which means that it + /// will block if multiple versions are being run in parallel. This is because this + /// operator is primarily an optimization to minimize memory usage. + /// + /// + /// + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. Must be in the same order as the dimension specified with format. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor fused_pad_conv2d(Tensor input, Tensor paddings, Tensor filter, string mode, int[] strides, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedPadConv2D", name) { args = new object[] { input, paddings, filter }, attrs = new Dictionary() { ["mode"] = mode, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fused_pad_conv2d_eager_fallback(input, paddings, filter, mode: mode, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["paddings"] = paddings; + keywords["filter"] = filter; + keywords["mode"] = mode; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("FusedPadConv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("FusedPadConv2D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fused_pad_conv2d_eager_fallback(Tensor input, Tensor paddings, Tensor filter, string mode, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, paddings, filter }; + object[] _attrs = new object[] { "T", input.dtype, "mode", mode, "strides", strides, "padding", padding }; + var _result = _execute.execute("FusedPadConv2D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedPadConv2D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs a resize and padding as a preprocess during a convolution. + /// + /// + /// + /// It's often possible to do spatial transformations more efficiently as part of + /// the packing stage of a convolution, so this op allows for an optimized + /// implementation where these stages are fused together. This prevents the need to + /// write out the intermediate results as whole tensors, reducing memory pressure, + /// and we can get some latency gains by merging the transformation calculations. + /// The data_format attribute for Conv2D isn't supported by this op, and defaults to + /// 'NHWC' order. + /// Internally this op uses a single per-graph scratch buffer, which means that it + /// will block if multiple versions are being run in parallel. This is because this + /// operator is primarily an optimization to minimize memory usage. + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// + /// + /// + /// + /// 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. Must be in the same order as the dimension specified with format. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor fused_resize_and_pad_conv2d(Tensor input, Tensor size, Tensor paddings, Tensor filter, string mode, int[] strides, string padding, bool resize_align_corners = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedResizeAndPadConv2D", name) { args = new object[] { input, size, paddings, filter }, attrs = new Dictionary() { ["resize_align_corners"] = resize_align_corners, ["mode"] = mode, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return fused_resize_and_pad_conv2d_eager_fallback(input, size, paddings, filter, resize_align_corners: resize_align_corners, mode: mode, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["size"] = size; + keywords["paddings"] = paddings; + keywords["filter"] = filter; + keywords["resize_align_corners"] = resize_align_corners; + keywords["mode"] = mode; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("FusedResizeAndPadConv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "resize_align_corners", _op._get_attr_bool("resize_align_corners"), "mode", _op.get_attr("mode"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("FusedResizeAndPadConv2D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor fused_resize_and_pad_conv2d_eager_fallback(Tensor input, Tensor size, Tensor paddings, Tensor filter, bool resize_align_corners, string mode, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, size, paddings, filter }; + object[] _attrs = new object[] { "T", input.dtype, "resize_align_corners", resize_align_corners, "mode", mode, "strides", strides, "padding", padding }; + var _result = _execute.execute("FusedResizeAndPadConv2D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Says whether the targets are in the top `K` predictions. + /// + /// + /// + /// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + /// prediction for the target class is among the top `k` predictions among + /// all predictions for example `i`. Note that the behavior of `InTopK` differs + /// from the `TopK` op in its handling of ties; if multiple classes have the + /// same prediction value and straddle the top-`k` boundary, all of those + /// classes are considered to be in the top `k`. + /// + /// More formally, let + /// + /// \(predictions_i\) be the predictions for all classes for example `i`, + /// \(targets_i\) be the target class for example `i`, + /// \(out_i\) be the output for example `i`, + /// + /// $$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$ + /// + /// + /// + /// + /// + /// + /// Number of top elements to look at for computing precision. + /// + /// + /// + public static Tensor in_top_k(Tensor predictions, Tensor targets, int k = 0, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InTopK", name) { args = new object[] { predictions, targets }, attrs = new Dictionary() { ["k"] = k } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return in_top_k_eager_fallback(predictions, targets, k: k, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["predictions"] = predictions; + keywords["targets"] = targets; + keywords["k"] = k; + var _op = tf.OpDefLib._apply_op_helper("InTopK", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "k", _op._get_attr_int("k"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("InTopK", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor in_top_k_eager_fallback(Tensor predictions, Tensor targets, int k, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { predictions, targets }; + object[] _attrs = new object[] { "k", k, "T", targets.dtype }; + var _result = _execute.execute("InTopK", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InTopK", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Says whether the targets are in the top `K` predictions. + /// + /// + /// + /// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + /// prediction for the target class is among the top `k` predictions among + /// all predictions for example `i`. Note that the behavior of `InTopK` differs + /// from the `TopK` op in its handling of ties; if multiple classes have the + /// same prediction value and straddle the top-`k` boundary, all of those + /// classes are considered to be in the top `k`. + /// + /// More formally, let + /// + /// \(predictions_i\) be the predictions for all classes for example `i`, + /// \(targets_i\) be the target class for example `i`, + /// \(out_i\) be the output for example `i`, + /// + /// $$out_i = predictions_{i, targets_i} in TopKIncludingTies(predictions_i)$$ + /// + /// + /// + /// + /// + /// + public static Tensor in_top_kv2(Tensor predictions, Tensor targets, Tensor k, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InTopKV2", name) { args = new object[] { predictions, targets, k }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return in_top_kv2_eager_fallback(predictions, targets, k, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["predictions"] = predictions; + keywords["targets"] = targets; + keywords["k"] = k; + var _op = tf.OpDefLib._apply_op_helper("InTopKV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("InTopKV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor in_top_kv2_eager_fallback(Tensor predictions, Tensor targets, Tensor k, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { predictions, targets, k }; + object[] _attrs = new object[] { "T", targets.dtype }; + var _result = _execute.execute("InTopKV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("InTopKV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Solves a batch of isotonic regression problems. + /// + /// + /// + /// Dtype of output. + /// + /// + public static Tensor[] isotonic_regression(Tensor input, TF_DataType output_dtype = TF_DataType.TF_FLOAT, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsotonicRegression", name) { args = new object[] { input }, attrs = new Dictionary() { ["output_dtype"] = output_dtype } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return isotonic_regression_eager_fallback(input, output_dtype: output_dtype, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["output_dtype"] = output_dtype; + var _op = tf.OpDefLib._apply_op_helper("IsotonicRegression", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "output_dtype", _op._get_attr_type("output_dtype") }; + _execute.record_gradient("IsotonicRegression", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] isotonic_regression_eager_fallback(Tensor input, TF_DataType output_dtype, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "output_dtype", output_dtype }; + var _result = _execute.execute("IsotonicRegression", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("IsotonicRegression", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Local Response Normalization. + /// + /// + /// + /// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last + /// dimension), and each vector is normalized independently. Within a given vector, + /// each component is divided by the weighted, squared sum of inputs within + /// `depth_radius`. In detail, + /// + /// sqr_sum[a, b, c, d] = + /// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + /// output = input / (bias + alpha * sqr_sum) ** beta + /// + /// For details, see [Krizhevsky et al., ImageNet classification with deep + /// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). + /// + /// + /// + /// + /// + /// 0-D. Half-width of the 1-D normalization window. + /// + /// + /// + /// + /// An offset (usually positive to avoid dividing by 0). + /// + /// + /// + /// + /// A scale factor, usually positive. + /// + /// + /// + /// + /// An exponent. + /// + /// + /// + public static Tensor lrn(Tensor input, int depth_radius = 5, float bias = 1f, float alpha = 1f, float beta = 0.5f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LRN", name) { args = new object[] { input }, attrs = new Dictionary() { ["depth_radius"] = depth_radius, ["bias"] = bias, ["alpha"] = alpha, ["beta"] = beta } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return lrn_eager_fallback(input, depth_radius: depth_radius, bias: bias, alpha: alpha, beta: beta, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["depth_radius"] = depth_radius; + keywords["bias"] = bias; + keywords["alpha"] = alpha; + keywords["beta"] = beta; + var _op = tf.OpDefLib._apply_op_helper("LRN", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "depth_radius", _op._get_attr_int("depth_radius"), "bias", _op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta", _op.get_attr("beta"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("LRN", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor lrn_eager_fallback(Tensor input, int depth_radius, float bias, float alpha, float beta, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "depth_radius", depth_radius, "bias", bias, "alpha", alpha, "beta", beta, "T", input.dtype }; + var _result = _execute.execute("LRN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LRN", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes rectified linear: `max(features, features * alpha)`. + /// + /// + /// + /// + public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LeakyRelu", name) { args = new object[] { features }, attrs = new Dictionary() { ["alpha"] = alpha } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return leaky_relu_eager_fallback(features, alpha: alpha, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["alpha"] = alpha; + var _op = tf.OpDefLib._apply_op_helper("LeakyRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("LeakyRelu", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor leaky_relu_eager_fallback(Tensor features, float alpha, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "alpha", alpha, "T", features.dtype }; + var _result = _execute.execute("LeakyRelu", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LeakyRelu", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes rectified linear gradients for a LeakyRelu operation. + /// + /// + /// + /// + /// + public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float alpha = 0.2f, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LeakyReluGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { ["alpha"] = alpha } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return leaky_relu_grad_eager_fallback(gradients, features, alpha: alpha, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["features"] = features; + keywords["alpha"] = alpha; + var _op = tf.OpDefLib._apply_op_helper("LeakyReluGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("LeakyReluGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor leaky_relu_grad_eager_fallback(Tensor gradients, Tensor features, float alpha, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, features }; + object[] _attrs = new object[] { "alpha", alpha, "T", gradients.dtype }; + var _result = _execute.execute("LeakyReluGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LeakyReluGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes log softmax activations. + /// + /// + /// + /// For each batch `i` and class `j` we have + /// + /// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) + /// + /// + /// + /// + public static Tensor log_softmax(Tensor logits, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogSoftmax", name) { args = new object[] { logits }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return log_softmax_eager_fallback(logits, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["logits"] = logits; + var _op = tf.OpDefLib._apply_op_helper("LogSoftmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("LogSoftmax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor log_softmax_eager_fallback(Tensor logits, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { logits }; + object[] _attrs = new object[] { "T", logits.dtype }; + var _result = _execute.execute("LogSoftmax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("LogSoftmax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs max pooling on the input. + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool(Tensor input, int[] ksize, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_eager_fallback(input, ksize: ksize, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("MaxPool", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_eager_fallback(Tensor input, int[] ksize, int[] strides, string padding, int[] explicit_paddings, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "T", input.dtype, "ksize", ksize, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format }; + var _result = _execute.execute("MaxPool", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPool", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs 3D max pooling on the input. + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool3d(Tensor input, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3D", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool3d_eager_fallback(input, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPool3D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPool3D", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool3d_eager_fallback(Tensor input, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", input.dtype }; + var _result = _execute.execute("MaxPool3D", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPool3D", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of 3D max pooling function. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool3d_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3DGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool3d_grad_eager_fallback(orig_input, orig_output, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPool3DGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T"), "TInput", _op._get_attr_type("TInput") }; + _execute.record_gradient("MaxPool3DGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool3d_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", grad.dtype, "TInput", orig_input.dtype }; + var _result = _execute.execute("MaxPool3DGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPool3DGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// + /// + /// + /// + /// 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool3d_grad_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NDHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3DGradGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool3d_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NDHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPool3DGradGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPool3DGradGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool3d_grad_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPool3DGradGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPool3DGradGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, int[] explicit_paddings = null, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (explicit_paddings is null) + { + explicit_paddings = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_eager_fallback(orig_input, orig_output, grad, ksize: ksize, strides: strides, padding: padding, explicit_paddings: explicit_paddings, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["explicit_paddings"] = explicit_paddings; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "explicit_paddings", _op.get_attr("explicit_paddings"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, int[] explicit_paddings, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "explicit_paddings", explicit_paddings, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPoolGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_grad_grad(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize: ksize, strides: strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_grad_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, int[] ksize, int[] strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPoolGradGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_grad_grad_v2(Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGradV2", name) { args = new object[] { orig_input, orig_output, grad, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradGradV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradGradV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_grad_v2_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad, ksize, strides }; + object[] _attrs = new object[] { "padding", padding, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPoolGradGradV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradGradV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes second-order gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Whether to include batch dimension in flattened index of `argmax`. + /// + /// + /// + public static Tensor max_pool_grad_grad_with_argmax(Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, bool include_batch_in_index = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGradWithArgmax", name) { args = new object[] { input, grad, argmax }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_grad_with_argmax_eager_fallback(input, grad, argmax, ksize: ksize, strides: strides, padding: padding, include_batch_in_index: include_batch_in_index, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["grad"] = grad; + keywords["argmax"] = argmax; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["include_batch_in_index"] = include_batch_in_index; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradGradWithArgmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "include_batch_in_index", _op._get_attr_bool("include_batch_in_index"), "Targmax", _op._get_attr_type("Targmax"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradGradWithArgmax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_grad_with_argmax_eager_fallback(Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, bool include_batch_in_index, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, grad, argmax }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "include_batch_in_index", include_batch_in_index, "Targmax", argmax.dtype, "T", input.dtype }; + var _result = _execute.execute("MaxPoolGradGradWithArgmax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_grad_v2(Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradV2", name) { args = new object[] { orig_input, orig_output, grad, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["orig_input"] = orig_input; + keywords["orig_output"] = orig_output; + keywords["grad"] = grad; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_v2_eager_fallback(Tensor orig_input, Tensor orig_output, Tensor grad, Tensor ksize, Tensor strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { orig_input, orig_output, grad, ksize, strides }; + object[] _attrs = new object[] { "padding", padding, "data_format", data_format, "T", orig_input.dtype }; + var _result = _execute.execute("MaxPoolGradV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients of the maxpooling function. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Whether to include batch dimension in flattened index of `argmax`. + /// + /// + /// + public static Tensor max_pool_grad_with_argmax(Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, bool include_batch_in_index = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradWithArgmax", name) { args = new object[] { input, grad, argmax }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_grad_with_argmax_eager_fallback(input, grad, argmax, ksize: ksize, strides: strides, padding: padding, include_batch_in_index: include_batch_in_index, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["grad"] = grad; + keywords["argmax"] = argmax; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["include_batch_in_index"] = include_batch_in_index; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolGradWithArgmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "include_batch_in_index", _op._get_attr_bool("include_batch_in_index"), "Targmax", _op._get_attr_type("Targmax"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolGradWithArgmax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_grad_with_argmax_eager_fallback(Tensor input, Tensor grad, Tensor argmax, int[] ksize, int[] strides, string padding, bool include_batch_in_index, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, grad, argmax }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "padding", padding, "include_batch_in_index", include_batch_in_index, "Targmax", argmax.dtype, "T", input.dtype }; + var _result = _execute.execute("MaxPoolGradWithArgmax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs max pooling on the input. + /// + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// + /// + public static Tensor max_pool_v2(Tensor input, Tensor ksize, Tensor strides, string padding, string data_format = "NHWC", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolV2", name) { args = new object[] { input, ksize, strides }, attrs = new Dictionary() { ["padding"] = padding, ["data_format"] = data_format } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return max_pool_v2_eager_fallback(input, ksize, strides, padding: padding, data_format: data_format, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (data_format is null) + { + data_format = "NHWC"; + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["data_format"] = data_format; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding"), "data_format", _op.get_attr("data_format") }; + _execute.record_gradient("MaxPoolV2", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor max_pool_v2_eager_fallback(Tensor input, Tensor ksize, Tensor strides, string padding, string data_format, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, ksize, strides }; + object[] _attrs = new object[] { "T", input.dtype, "padding", padding, "data_format", data_format }; + var _result = _execute.execute("MaxPoolV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolV2", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Performs max pooling on the input and outputs both max values and indices. + /// + /// + /// + /// The indices in `argmax` are flattened, so that a maximum value at position + /// `[b, y, x, c]` becomes flattened index: + /// `(y * width + x) * channels + c` if `include_batch_in_index` is False; + /// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + /// + /// The indices returned are always in `[0, height) x [0, width)` before flattening, + /// even if padding is involved and the mathematically correct answer is outside + /// (either negative or too large). This is a bug, but fixing it is difficult to do + /// in a safe backwards compatible way, especially due to flattening. + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// Whether to include batch dimension in flattened index of `argmax`. + /// + /// + /// + public static Tensor[] max_pool_with_argmax(Tensor input, int[] ksize, int[] strides, string padding, TF_DataType Targmax = TF_DataType.TF_INT64, bool include_batch_in_index = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolWithArgmax", name) { args = new object[] { input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["Targmax"] = Targmax, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return max_pool_with_argmax_eager_fallback(input, ksize: ksize, strides: strides, Targmax: Targmax, padding: padding, include_batch_in_index: include_batch_in_index, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["Targmax"] = Targmax; + keywords["padding"] = padding; + keywords["include_batch_in_index"] = include_batch_in_index; + var _op = tf.OpDefLib._apply_op_helper("MaxPoolWithArgmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "Targmax", _op._get_attr_type("Targmax"), "padding", _op.get_attr("padding"), "include_batch_in_index", _op._get_attr_bool("include_batch_in_index"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("MaxPoolWithArgmax", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] max_pool_with_argmax_eager_fallback(Tensor input, int[] ksize, int[] strides, TF_DataType Targmax, string padding, bool include_batch_in_index, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "ksize", ksize, "strides", strides, "Targmax", Targmax, "padding", padding, "include_batch_in_index", include_batch_in_index, "T", input.dtype }; + var _result = _execute.execute("MaxPoolWithArgmax", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("MaxPoolWithArgmax", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds values of the `n`-th order statistic for the last dimension. + /// + /// + /// + /// If the input is a vector (rank-1), finds the entries which is the nth-smallest + /// value in the vector and outputs their values as scalar tensor. + /// + /// For matrices (resp. higher rank input), computes the entries which is the + /// nth-smallest value in each row (resp. vector along the last dimension). Thus, + /// + /// values.shape = input.shape[:-1] + /// + /// + /// + /// + /// + /// + /// When set to True, find the nth-largest value in the vector and vice + /// versa. + /// + /// + /// + public static Tensor nth_element(Tensor input, Tensor n, bool reverse = false, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NthElement", name) { args = new object[] { input, n }, attrs = new Dictionary() { ["reverse"] = reverse } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return nth_element_eager_fallback(input, n, reverse: reverse, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["n"] = n; + keywords["reverse"] = reverse; + var _op = tf.OpDefLib._apply_op_helper("NthElement", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "reverse", _op._get_attr_bool("reverse"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("NthElement", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor nth_element_eager_fallback(Tensor input, Tensor n, bool reverse, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, n }; + object[] _attrs = new object[] { "reverse", reverse, "T", input.dtype }; + var _result = _execute.execute("NthElement", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("NthElement", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Produces the average pool of the input tensor for quantized types. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// The length must be 4 to match the number of dimensions of the input. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// tensor. The length must be 4 to match the number of dimensions of the input. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor[] quantized_avg_pool(Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedAvgPool", name) { args = new object[] { input, min_input, max_input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_avg_pool_eager_fallback(input, min_input, max_input, ksize: ksize, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("QuantizedAvgPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("QuantizedAvgPool", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_avg_pool_eager_fallback(Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, min_input, max_input }; + object[] _attrs = new object[] { "T", input.dtype, "ksize", ksize, "strides", strides, "padding", padding }; + var _result = _execute.execute("QuantizedAvgPool", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedAvgPool", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Quantized Batch normalization. + /// + /// + /// + /// This op is deprecated and will be removed in the future. Prefer + /// `tf.nn.batch_normalization`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// A small float number to avoid dividing by 0. + /// + /// + /// + /// + /// A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// + /// + public static Tensor[] quantized_batch_norm_with_global_normalization(Tensor t, Tensor t_min, Tensor t_max, Tensor m, Tensor m_min, Tensor m_max, Tensor v, Tensor v_min, Tensor v_max, Tensor beta, Tensor beta_min, Tensor beta_max, Tensor gamma, Tensor gamma_min, Tensor gamma_max, TF_DataType out_type, float variance_epsilon, bool scale_after_normalization, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedBatchNormWithGlobalNormalization", name) { args = new object[] { t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max }, attrs = new Dictionary() { ["out_type"] = out_type, ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_batch_norm_with_global_normalization_eager_fallback(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type: out_type, variance_epsilon: variance_epsilon, scale_after_normalization: scale_after_normalization, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["t"] = t; + keywords["t_min"] = t_min; + keywords["t_max"] = t_max; + keywords["m"] = m; + keywords["m_min"] = m_min; + keywords["m_max"] = m_max; + keywords["v"] = v; + keywords["v_min"] = v_min; + keywords["v_max"] = v_max; + keywords["beta"] = beta; + keywords["beta_min"] = beta_min; + keywords["beta_max"] = beta_max; + keywords["gamma"] = gamma; + keywords["gamma_min"] = gamma_min; + keywords["gamma_max"] = gamma_max; + keywords["out_type"] = out_type; + keywords["variance_epsilon"] = variance_epsilon; + keywords["scale_after_normalization"] = scale_after_normalization; + var _op = tf.OpDefLib._apply_op_helper("QuantizedBatchNormWithGlobalNormalization", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type"), "variance_epsilon", _op.get_attr("variance_epsilon"), "scale_after_normalization", _op._get_attr_bool("scale_after_normalization") }; + _execute.record_gradient("QuantizedBatchNormWithGlobalNormalization", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_batch_norm_with_global_normalization_eager_fallback(Tensor t, Tensor t_min, Tensor t_max, Tensor m, Tensor m_min, Tensor m_max, Tensor v, Tensor v_min, Tensor v_max, Tensor beta, Tensor beta_min, Tensor beta_max, Tensor gamma, Tensor gamma_min, Tensor gamma_max, TF_DataType out_type, float variance_epsilon, bool scale_after_normalization, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max }; + object[] _attrs = new object[] { "Tinput", t.dtype, "out_type", out_type, "variance_epsilon", variance_epsilon, "scale_after_normalization", scale_after_normalization }; + var _result = _execute.execute("QuantizedBatchNormWithGlobalNormalization", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Adds Tensor 'bias' to Tensor 'input' for Quantized types. + /// + /// + /// + /// Broadcasts the values of bias on dimensions 0..N-2 of 'input'. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_bias_add(Tensor input, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_bias, Tensor max_bias, TF_DataType out_type, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedBiasAdd", name) { args = new object[] { input, bias, min_input, max_input, min_bias, max_bias }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_bias_add_eager_fallback(input, bias, min_input, max_input, min_bias, max_bias, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_bias"] = min_bias; + keywords["max_bias"] = max_bias; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizedBiasAdd", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizedBiasAdd", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_bias_add_eager_fallback(Tensor input, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_bias, Tensor max_bias, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, bias, min_input, max_input, min_bias, max_bias }; + object[] _attrs = new object[] { "T1", input.dtype, "T2", bias.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizedBiasAdd", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedBiasAdd", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes a 2D convolution given quantized 4D input and filter tensors. + /// + /// + /// + /// The inputs are quantized tensors where the lowest value represents the real + /// number of the associated minimum, and the highest represents the maximum. + /// This means that you can only interpret the quantized output in the same way, by + /// taking the returned minimum and maximum values into account. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// tensor. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + /// + /// 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// + /// + public static Tensor[] quantized_conv2d(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2D", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("QuantizedConv2D", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("QuantizedConv2D", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2D", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_and_relu(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndRelu", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_and_relu_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_and_relu_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_and_relu_and_requantize(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndReluAndRequantize", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_and_relu_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_and_requantize(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndRequantize", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes QuantizedConv2D per channel. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The quantized type of output tensor that needs to be converted. + /// + /// + /// + /// list of stride values. + /// + /// + /// + /// list of dilation values. + /// + /// + public static Tensor[] quantized_conv2d_per_channel(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DPerChannel", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_per_channel_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DPerChannel", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("QuantizedConv2DPerChannel", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_per_channel_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("QuantizedConv2DPerChannel", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DPerChannel", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBias", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBias", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBias", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBias", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBias", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_and_relu(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_and_relu_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_and_relu_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, Tensor summand, Tensor min_summand, Tensor max_summand, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["summand"] = summand; + keywords["min_summand"] = min_summand; + keywords["max_summand"] = max_summand; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "Tsummand", _op._get_attr_type("Tsummand"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, Tensor summand, Tensor min_summand, Tensor max_summand, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "Tsummand", summand.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_sum_and_relu(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor summand, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSumAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_sum_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["summand"] = summand; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasSumAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasSumAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_sum_and_relu_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor summand, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, summand }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasSumAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasSumAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_conv2d_with_bias_sum_and_relu_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, Tensor summand, Tensor min_summand, Tensor max_summand, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSumAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["summand"] = summand; + keywords["min_summand"] = min_summand; + keywords["max_summand"] = max_summand; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedConv2DWithBiasSumAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "Tsummand", _op._get_attr_type("Tsummand"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedConv2DWithBiasSumAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, Tensor summand, Tensor min_summand, Tensor max_summand, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "Tsummand", summand.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedConv2DWithBiasSumAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedConv2DWithBiasSumAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes quantized depthwise Conv2D. + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. + /// + /// + /// List of stride values. + /// + /// + /// + /// List of dilation values. + /// + /// + public static Tensor[] quantized_depthwise_conv2d(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2D", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_depthwise_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("QuantizedDepthwiseConv2D", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("QuantizedDepthwiseConv2D", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_depthwise_conv2d_eager_fallback(Tensor input, Tensor filter, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("QuantizedDepthwiseConv2D", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedDepthwiseConv2D", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes quantized depthwise Conv2D with Bias. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. + /// + /// + /// List of stride values. + /// + /// + /// + /// List of dilation values. + /// + /// + public static Tensor[] quantized_depthwise_conv2d_with_bias(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBias", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_depthwise_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + var _op = tf.OpDefLib._apply_op_helper("QuantizedDepthwiseConv2DWithBias", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations") }; + _execute.record_gradient("QuantizedDepthwiseConv2DWithBias", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_depthwise_conv2d_with_bias_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations }; + var _result = _execute.execute("QuantizedDepthwiseConv2DWithBias", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedDepthwiseConv2DWithBias", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes quantized depthwise Conv2D with Bias and Relu. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. + /// + /// + /// List of stride values. + /// + /// + /// + /// List of dilation values. + /// + /// + /// + public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QINT32, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBiasAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedDepthwiseConv2DWithBiasAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedDepthwiseConv2DWithBiasAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedDepthwiseConv2DWithBiasAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedDepthwiseConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes quantized depthwise Conv2D with Bias, Relu and Requantize. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// The type of the output. + /// + /// + /// List of stride values. + /// + /// + /// + /// List of dilation values. + /// + /// + /// + public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, int[] strides, string padding, TF_DataType out_type = TF_DataType.TF_QUINT8, int[] dilations = null, int[] padding_list = null, string? name = null) + { + var _ctx = tf.Context; + if (dilations is null) + { + dilations = new int[] { 1, 1, 1, 1 }; + } + if (padding_list is null) + { + padding_list = new int[] { }; + } + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, out_type: out_type, strides: strides, padding: padding, dilations: dilations, padding_list: padding_list, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["filter"] = filter; + keywords["bias"] = bias; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["min_filter"] = min_filter; + keywords["max_filter"] = max_filter; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["out_type"] = out_type; + keywords["strides"] = strides; + keywords["padding"] = padding; + keywords["dilations"] = dilations; + keywords["padding_list"] = padding_list; + var _op = tf.OpDefLib._apply_op_helper("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "Tfilter", _op._get_attr_type("Tfilter"), "Tbias", _op._get_attr_type("Tbias"), "out_type", _op._get_attr_type("out_type"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding"), "dilations", _op.get_attr("dilations"), "padding_list", _op.get_attr("padding_list") }; + _execute.record_gradient("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(Tensor input, Tensor filter, Tensor bias, Tensor min_input, Tensor max_input, Tensor min_filter, Tensor max_filter, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType out_type, int[] strides, string padding, int[] dilations, int[] padding_list, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "Tinput", input.dtype, "Tfilter", filter.dtype, "Tbias", bias.dtype, "out_type", out_type, "strides", strides, "padding", padding, "dilations", dilations, "padding_list", padding_list }; + var _result = _execute.execute("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// ~~%~~Performs a quantized matrix multiplication of `a` by the matrix `b` with bias~~%~~add.~~%~~ + /// + /// + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// multiplication result. The bias size must match inner dimension of `b`. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, `a` is transposed before multiplication. + /// + /// + /// If true, `b` is transposed before multiplication. + /// + /// + /// + /// Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// + /// + public static Tensor[] quantized_mat_mul_with_bias(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput = TF_DataType.TF_QINT32, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBias", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBias", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Tbias", _op._get_attr_type("Tbias"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBias", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_with_bias_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Tbias", bias.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBias", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBias", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor quantized_mat_mul_with_bias_and_dequantize(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndDequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_and_dequantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBiasAndDequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Tbias", _op._get_attr_type("Tbias"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBiasAndDequantize", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor quantized_mat_mul_with_bias_and_dequantize_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Tbias", bias.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBiasAndDequantize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBiasAndDequantize", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// ~~%~~Perform a quantized matrix multiplication of `a` by the matrix `b` with bias~~%~~add and relu fusion.~~%~~ + /// + /// + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// multiplication result. The bias size must match inner dimension of `b`. Then do + /// relu activation to get non-negative result. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, `a` is transposed before multiplication. + /// + /// + /// If true, `b` is transposed before multiplication. + /// + /// + /// + /// Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// + /// + public static Tensor[] quantized_mat_mul_with_bias_and_relu(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput = TF_DataType.TF_QINT32, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndRelu", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_and_relu_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBiasAndRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBiasAndRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_with_bias_and_relu_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBiasAndRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBiasAndRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// ~~%~~Perform a quantized matrix multiplication of `a` by the matrix `b` with bias~~%~~add and relu and requantize fusion.~~%~~ + /// + /// + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// multiplication result. The bias size must match inner dimension of `b`. Then do + /// relu activation to get non-negative result. Then do requantize operation to get + /// final uint8 result. + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// If true, `a` is transposed before multiplication. + /// + /// + /// If true, `b` is transposed before multiplication. + /// + /// + /// + /// Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// + /// + public static Tensor[] quantized_mat_mul_with_bias_and_relu_and_requantize(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput = TF_DataType.TF_QUINT8, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndReluAndRequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBiasAndReluAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Tbias", _op._get_attr_type("Tbias"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBiasAndReluAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Tbias", bias.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBiasAndReluAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_mat_mul_with_bias_and_requantize(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput = TF_DataType.TF_QUINT8, bool transpose_a = false, bool transpose_b = false, string input_quant_mode = "MIN_FIRST", string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndRequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_mat_mul_with_bias_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput: Toutput, transpose_a: transpose_a, transpose_b: transpose_b, input_quant_mode: input_quant_mode, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + if (input_quant_mode is null) + { + input_quant_mode = "MIN_FIRST"; + } + Dictionary keywords = new(); + keywords["a"] = a; + keywords["b"] = b; + keywords["bias"] = bias; + keywords["min_a"] = min_a; + keywords["max_a"] = max_a; + keywords["min_b"] = min_b; + keywords["max_b"] = max_b; + keywords["min_freezed_output"] = min_freezed_output; + keywords["max_freezed_output"] = max_freezed_output; + keywords["Toutput"] = Toutput; + keywords["transpose_a"] = transpose_a; + keywords["transpose_b"] = transpose_b; + keywords["input_quant_mode"] = input_quant_mode; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMatMulWithBiasAndRequantize", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), "Tbias", _op._get_attr_type("Tbias"), "Toutput", _op._get_attr_type("Toutput"), "transpose_a", _op._get_attr_bool("transpose_a"), "transpose_b", _op._get_attr_bool("transpose_b"), "input_quant_mode", _op.get_attr("input_quant_mode") }; + _execute.record_gradient("QuantizedMatMulWithBiasAndRequantize", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_mat_mul_with_bias_and_requantize_eager_fallback(Tensor a, Tensor b, Tensor bias, Tensor min_a, Tensor max_a, Tensor min_b, Tensor max_b, Tensor min_freezed_output, Tensor max_freezed_output, TF_DataType Toutput, bool transpose_a, bool transpose_b, string input_quant_mode, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }; + object[] _attrs = new object[] { "T1", a.dtype, "T2", b.dtype, "Tbias", bias.dtype, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "input_quant_mode", input_quant_mode }; + var _result = _execute.execute("QuantizedMatMulWithBiasAndRequantize", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMatMulWithBiasAndRequantize", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Produces the max pool of the input tensor for quantized types. + /// + /// + /// + /// + /// + /// + /// The size of the window for each dimension of the input tensor. + /// The length must be 4 to match the number of dimensions of the input. + /// + /// + /// + /// + /// The stride of the sliding window for each dimension of the input + /// tensor. The length must be 4 to match the number of dimensions of the input. + /// + /// + /// + /// + /// The type of padding algorithm to use. + /// + /// + /// + public static Tensor[] quantized_max_pool(Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMaxPool", name) { args = new object[] { input, min_input, max_input }, attrs = new Dictionary() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_max_pool_eager_fallback(input, min_input, max_input, ksize: ksize, strides: strides, padding: padding, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["min_input"] = min_input; + keywords["max_input"] = max_input; + keywords["ksize"] = ksize; + keywords["strides"] = strides; + keywords["padding"] = padding; + var _op = tf.OpDefLib._apply_op_helper("QuantizedMaxPool", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), "strides", _op.get_attr("strides"), "padding", _op.get_attr("padding") }; + _execute.record_gradient("QuantizedMaxPool", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_max_pool_eager_fallback(Tensor input, Tensor min_input, Tensor max_input, int[] ksize, int[] strides, string padding, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, min_input, max_input }; + object[] _attrs = new object[] { "T", input.dtype, "ksize", ksize, "strides", strides, "padding", padding }; + var _result = _execute.execute("QuantizedMaxPool", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedMaxPool", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes Quantized Rectified Linear: `max(features, 0)` + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_relu(Tensor features, Tensor min_features, Tensor max_features, TF_DataType out_type = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedRelu", name) { args = new object[] { features, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_relu_eager_fallback(features, min_features, max_features, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["min_features"] = min_features; + keywords["max_features"] = max_features; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizedRelu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizedRelu", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_relu_eager_fallback(Tensor features, Tensor min_features, Tensor max_features, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, min_features, max_features }; + object[] _attrs = new object[] { "Tinput", features.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizedRelu", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedRelu", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_relu6(Tensor features, Tensor min_features, Tensor max_features, TF_DataType out_type = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedRelu6", name) { args = new object[] { features, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_relu6_eager_fallback(features, min_features, max_features, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["min_features"] = min_features; + keywords["max_features"] = max_features; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizedRelu6", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizedRelu6", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_relu6_eager_fallback(Tensor features, Tensor min_features, Tensor max_features, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, min_features, max_features }; + object[] _attrs = new object[] { "Tinput", features.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizedRelu6", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedRelu6", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + /// + /// + /// + /// + /// + /// + /// + public static Tensor[] quantized_relu_x(Tensor features, Tensor max_value, Tensor min_features, Tensor max_features, TF_DataType out_type = TF_DataType.TF_QUINT8, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReluX", name) { args = new object[] { features, max_value, min_features, max_features }, attrs = new Dictionary() { ["out_type"] = out_type } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return quantized_relu_x_eager_fallback(features, max_value, min_features, max_features, out_type: out_type, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["max_value"] = max_value; + keywords["min_features"] = min_features; + keywords["max_features"] = max_features; + keywords["out_type"] = out_type; + var _op = tf.OpDefLib._apply_op_helper("QuantizedReluX", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "Tinput", _op._get_attr_type("Tinput"), "out_type", _op._get_attr_type("out_type") }; + _execute.record_gradient("QuantizedReluX", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] quantized_relu_x_eager_fallback(Tensor features, Tensor max_value, Tensor min_features, Tensor max_features, TF_DataType out_type, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, max_value, min_features, max_features }; + object[] _attrs = new object[] { "Tinput", features.dtype, "out_type", out_type }; + var _result = _execute.execute("QuantizedReluX", 3, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("QuantizedReluX", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Computes rectified linear: `max(features, 0)`. + /// + /// + /// + /// See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) + /// Example usage: + /// >>> tf.nn.relu([-2., 0., 3.]).numpy() + /// array([0., 0., 3.], dtype=float32) + /// + /// + /// + /// + public static Tensor relu(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Relu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return relu_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Relu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Relu", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor relu_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Relu", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Relu", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes rectified linear 6: `min(max(features, 0), 6)`. + /// + /// + /// + public static Tensor relu6(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Relu6", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return relu6_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Relu6", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Relu6", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor relu6_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Relu6", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Relu6", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes rectified linear gradients for a Relu operation. + /// + /// + /// + /// + public static Tensor relu_grad(Tensor gradients, Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReluGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return relu_grad_eager_fallback(gradients, features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("ReluGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("ReluGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor relu_grad_eager_fallback(Tensor gradients, Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, features }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("ReluGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("ReluGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` + /// + /// + /// + /// if < 0, `scale * features` otherwise. + /// + /// To be used together with + /// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. + /// For correct dropout, use `tf.contrib.nn.alpha_dropout`. + /// + /// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + /// + /// + /// + /// + public static Tensor selu(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Selu", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return selu_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Selu", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Selu", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor selu_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Selu", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Selu", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes gradients for the scaled exponential linear (Selu) operation. + /// + /// + /// + /// + public static Tensor selu_grad(Tensor gradients, Tensor outputs, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SeluGrad", name) { args = new object[] { gradients, outputs }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return selu_grad_eager_fallback(gradients, outputs, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["outputs"] = outputs; + var _op = tf.OpDefLib._apply_op_helper("SeluGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SeluGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor selu_grad_eager_fallback(Tensor gradients, Tensor outputs, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, outputs }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("SeluGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SeluGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softmax activations. + /// + /// + /// + /// For each batch `i` and class `j` we have + /// + /// $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ + /// + /// + /// + /// + public static Tensor softmax(Tensor logits, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softmax", name) { args = new object[] { logits }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softmax_eager_fallback(logits, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["logits"] = logits; + var _op = tf.OpDefLib._apply_op_helper("Softmax", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Softmax", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softmax_eager_fallback(Tensor logits, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { logits }; + object[] _attrs = new object[] { "T", logits.dtype }; + var _result = _execute.execute("Softmax", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Softmax", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// + /// + /// Inputs are the logits, not probabilities. + /// + /// + /// + /// + /// + public static Tensor[] softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftmaxCrossEntropyWithLogits", name) { args = new object[] { features, labels }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return softmax_cross_entropy_with_logits_eager_fallback(features, labels, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["labels"] = labels; + var _op = tf.OpDefLib._apply_op_helper("SoftmaxCrossEntropyWithLogits", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SoftmaxCrossEntropyWithLogits", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] softmax_cross_entropy_with_logits_eager_fallback(Tensor features, Tensor labels, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, labels }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("SoftmaxCrossEntropyWithLogits", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// + /// + /// + /// + public static Tensor softplus(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softplus", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softplus_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Softplus", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Softplus", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softplus_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Softplus", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Softplus", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softplus gradients for a softplus operation. + /// + /// + /// + /// + public static Tensor softplus_grad(Tensor gradients, Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftplusGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softplus_grad_eager_fallback(gradients, features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("SoftplusGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SoftplusGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softplus_grad_eager_fallback(Tensor gradients, Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, features }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("SoftplusGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SoftplusGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softsign: `features / (abs(features) + 1)`. + /// + /// + /// + public static Tensor softsign(Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softsign", name) { args = new object[] { features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softsign_eager_fallback(features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("Softsign", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("Softsign", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softsign_eager_fallback(Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features }; + object[] _attrs = new object[] { "T", features.dtype }; + var _result = _execute.execute("Softsign", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("Softsign", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softsign gradients for a softsign operation. + /// + /// + /// + /// + public static Tensor softsign_grad(Tensor gradients, Tensor features, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftsignGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary() { } }); + return _fast_path_result[0]; + } + catch (Exception) + { + } + try + { + return softsign_grad_eager_fallback(gradients, features, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["gradients"] = gradients; + keywords["features"] = features; + var _op = tf.OpDefLib._apply_op_helper("SoftsignGrad", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T") }; + _execute.record_gradient("SoftsignGrad", _op.inputs, _attrs, _result); + } + return _result[0]; + } + + public static Tensor softsign_grad_eager_fallback(Tensor gradients, Tensor features, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { gradients, features }; + object[] _attrs = new object[] { "T", gradients.dtype }; + var _result = _execute.execute("SoftsignGrad", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SoftsignGrad", _inputs_flat, _attrs, _result); + } + return _result[0]; + } + /// + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// + /// + /// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + /// a matrix of label probabilities, but rather a single label per row + /// of features. This label is considered to have probability 1.0 for the + /// given row. + /// + /// Inputs are the logits, not probabilities. + /// + /// + /// + /// + /// + public static Tensor[] sparse_softmax_cross_entropy_with_logits(Tensor features, Tensor labels, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSoftmaxCrossEntropyWithLogits", name) { args = new object[] { features, labels }, attrs = new Dictionary() { } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return sparse_softmax_cross_entropy_with_logits_eager_fallback(features, labels, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["features"] = features; + keywords["labels"] = labels; + var _op = tf.OpDefLib._apply_op_helper("SparseSoftmaxCrossEntropyWithLogits", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "T", _op._get_attr_type("T"), "Tlabels", _op._get_attr_type("Tlabels") }; + _execute.record_gradient("SparseSoftmaxCrossEntropyWithLogits", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] sparse_softmax_cross_entropy_with_logits_eager_fallback(Tensor features, Tensor labels, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { features, labels }; + object[] _attrs = new object[] { "T", features.dtype, "Tlabels", labels.dtype }; + var _result = _execute.execute("SparseSoftmaxCrossEntropyWithLogits", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds values and indices of the `k` largest elements for the last dimension. + /// + /// + /// + /// If the input is a vector (rank-1), finds the `k` largest entries in the vector + /// and outputs their values and indices as vectors. Thus `values[j]` is the + /// `j`-th largest entry in `input`, and its index is `indices[j]`. + /// + /// For matrices (resp. higher rank input), computes the top `k` entries in each + /// row (resp. vector along the last dimension). Thus, + /// + /// values.shape = indices.shape = input.shape[:-1] + [k] + /// + /// If two elements are equal, the lower-index element appears first. + /// + /// If `k` varies dynamically, use `TopKV2` below. + /// + /// + /// + /// + /// + /// Number of top elements to look for along the last dimension (along each + /// row for matrices). + /// + /// + /// + /// + /// If true the resulting `k` elements will be sorted by the values in + /// descending order. + /// + /// + /// + public static Tensor[] top_k(Tensor input, int k = 0, bool sorted = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TopK", name) { args = new object[] { input }, attrs = new Dictionary() { ["k"] = k, ["sorted"] = sorted } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return top_k_eager_fallback(input, k: k, sorted: sorted, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["sorted"] = sorted; + var _op = tf.OpDefLib._apply_op_helper("TopK", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "k", _op._get_attr_int("k"), "sorted", _op._get_attr_bool("sorted"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("TopK", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] top_k_eager_fallback(Tensor input, int k, bool sorted, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input }; + object[] _attrs = new object[] { "k", k, "sorted", sorted, "T", input.dtype }; + var _result = _execute.execute("TopK", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TopK", _inputs_flat, _attrs, _result); + } + return _result; + } + /// + /// Finds values and indices of the `k` largest elements for the last dimension. + /// + /// + /// + /// If the input is a vector (rank-1), finds the `k` largest entries in the vector + /// and outputs their values and indices as vectors. Thus `values[j]` is the + /// `j`-th largest entry in `input`, and its index is `indices[j]`. + /// + /// For matrices (resp. higher rank input), computes the top `k` entries in each + /// row (resp. vector along the last dimension). Thus, + /// + /// values.shape = indices.shape = input.shape[:-1] + [k] + /// + /// If two elements are equal, the lower-index element appears first. + /// + /// + /// + /// + /// + /// + /// If true the resulting `k` elements will be sorted by the values in + /// descending order. + /// + /// + /// + public static Tensor[] top_kv2(Tensor input, Tensor k, bool sorted = true, string? name = null) + { + var _ctx = tf.Context; + if (_ctx.executing_eagerly()) + { + try + { + var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TopKV2", name) { args = new object[] { input, k }, attrs = new Dictionary() { ["sorted"] = sorted } }); + return _fast_path_result; + } + catch (Exception) + { + } + try + { + return top_kv2_eager_fallback(input, k, sorted: sorted, name: name, ctx: _ctx); + } + catch (Exception) + { + } + } + Dictionary keywords = new(); + keywords["input"] = input; + keywords["k"] = k; + keywords["sorted"] = sorted; + var _op = tf.OpDefLib._apply_op_helper("TopKV2", name, keywords); + var _result = _op.outputs; + if (_execute.must_record_gradient()) + { + object[] _attrs = new object[] { "sorted", _op._get_attr_bool("sorted"), "T", _op._get_attr_type("T") }; + _execute.record_gradient("TopKV2", _op.inputs, _attrs, _result); + } + return _result; + } + + public static Tensor[] top_kv2_eager_fallback(Tensor input, Tensor k, bool sorted, string name, Context ctx) + { + Tensor[] _inputs_flat = new Tensor[] { input, k }; + object[] _attrs = new object[] { "sorted", sorted, "T", input.dtype }; + var _result = _execute.execute("TopKV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name); + if (_execute.must_record_gradient()) + { + _execute.record_gradient("TopKV2", _inputs_flat, _attrs, _result); + } + return _result; + } +} diff --git a/src/TensorFlowNET.Core/Operations/gen_ops.cs b/src/TensorFlowNET.Core/Operations/gen_ops.cs index fe67c2b84..5fa4c97dd 100644 --- a/src/TensorFlowNET.Core/Operations/gen_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_ops.cs @@ -10055,7 +10055,7 @@ public static Tensor ensure_shape(Tensor input, Shape shape, string name = "Ensu { try { - var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("EnsureShape", name, input, shape)); + var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "EnsureShape", name, input, shape)); return _result[0]; } catch (Exception) @@ -10076,7 +10076,7 @@ public static Tensor ensure_shape(Tensor input, Shape shape, string name = "Ensu dict["input"] = input; dict["shape"] = shape; var op = tf.OpDefLib._apply_op_helper("EnsureShape", name: name, keywords: dict); - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } @@ -10086,9 +10086,9 @@ public static Tensor ensure_shape(Tensor input, Shape shape, string name = "Ensu public static Tensor ensure_shape_eager_fallback(Tensor input, Shape shape, string name, Context ctx) { object[] attrs = new object[4] { "shape", shape, "T", input.dtype.as_datatype_enum() }; - var _result = execute.executes("EnsureShape", 1, new Tensor[] { input }, + var _result = _execute.execute("EnsureShape", 1, new Tensor[] { input }, attrs, ctx, name); - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { throw new NotImplementedException(); } @@ -17194,7 +17194,7 @@ public static Operation merge_v2_checkpoints(Tensor[] checkpoint_prefixes, Tenso var ctx = tf.Context; if (ctx.executing_eagerly()) { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("MergeV2Checkpoints", name, + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "MergeV2Checkpoints", name, checkpoint_prefixes, destination_prefix, "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files)); result = null; return null; @@ -24297,7 +24297,7 @@ public static Tensor regex_full_match(Tensor input, Tensor pattern, string name var ctx = tf.Context; if (ctx.executing_eagerly()) { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("RegexFullMatch", name, input, pattern)); + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "RegexFullMatch", name, input, pattern)); return result[0]; } var dict = new Dictionary(); @@ -27201,7 +27201,7 @@ public static Tensor[] restore_v2(Tensor prefix, string[] tensor_names, string[] Dictionary attrs = new(); attrs["dtypes"] = dtypes; var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - "RestoreV2", name, prefix, tensor_names, shape_and_slices + tf.Context, "RestoreV2", name, prefix, tensor_names, shape_and_slices ) { attrs = attrs }); return result; @@ -27236,9 +27236,9 @@ public static Tensor[] restore_v2_eager_fallback(Tensor prefix, string[] tensor_ var shape_and_slices_tensor = ops.convert_to_tensor(shape_and_slices, TF_DataType.TF_STRING); object[] attrs = new object[] { "dtypes", dtypes }; Tensor[] inputs_flat = new Tensor[] { prefix, tensor_names_tensor, shape_and_slices_tensor }; - var result = execute.quick_execute("RestoreV2", dtypes.Length, inputs_flat, attrs, ctx, name); + var result = _execute.quick_execute("RestoreV2", dtypes.Length, inputs_flat, attrs, ctx, name); - if (execute.must_record_gradient()) + if (_execute.must_record_gradient()) { // TODO(Rinne); record the gradient } @@ -29829,7 +29829,7 @@ public static Tensor sharded_filename(Tensor basename, Tensor shard, Tensor num_ var ctx = tf.Context; if (ctx.executing_eagerly()) { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("ShardedFilename", name, basename, shard, num_shards)); + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "ShardedFilename", name, basename, shard, num_shards)); return result[0]; } var dict = new Dictionary(); @@ -34759,7 +34759,7 @@ public static Tensor string_join(Tensor[] inputs, string separator = null, strin var ctx = tf.Context; if (ctx.executing_eagerly()) { - var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("StringJoin", name, inputs, "separator", separator)); + var result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "StringJoin", name, inputs, "separator", separator)); return result[0]; } var dict = new Dictionary(); diff --git a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs index 330903252..c4e8f8c41 100644 --- a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs +++ b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs @@ -25,7 +25,7 @@ public static Operation assign_sub_variable_op(Tensor resource, Tensor value, st if (tf.Context.executing_eagerly()) { tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo( - "AssignSubVariableOp", name, resource, value)); + tf.Context, "AssignSubVariableOp", name, resource, value)); return null; } @@ -44,7 +44,7 @@ public static Operation assign_add_variable_op(Tensor resource, Tensor value, st { if (tf.Context.executing_eagerly()) { - tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("AssignAddVariableOp", name, + tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AssignAddVariableOp", name, resource, value)); return null; @@ -59,7 +59,7 @@ public static Operation assign_variable_op(Tensor resource, Tensor value, string { if (tf.Context.executing_eagerly()) { - tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("AssignVariableOp", name, + tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AssignVariableOp", name, resource, value)); return null; @@ -74,7 +74,7 @@ public static Tensor var_is_initialized_op(Tensor resource, string name = null) { if (tf.Context.executing_eagerly()) { - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("VarIsInitializedOp", name, + var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "VarIsInitializedOp", name, resource)); return results[0]; @@ -99,7 +99,7 @@ public static Tensor var_handle_op(TF_DataType dtype, Shape shape, { if (tf.Context.executing_eagerly()) { - var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo("VarHandleOp", name) + var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "VarHandleOp", name) { attrs = ConvertToDict(new { diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs index e0bc037d2..9d52f5161 100644 --- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs +++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs @@ -177,11 +177,11 @@ internal static Tensor _random_flip(Tensor image, int flip_index, int seed, stri if (shape.ndim == 3 || shape.ndim == Unknown) { Tensor uniform_random = random_ops.random_uniform(new int[] { }, 0f, 1.0f, seed: seed); - var mirror_cond = gen_math_ops.less(uniform_random, .5); + var mirror_cond = gen_math_ops.less(uniform_random, ops.convert_to_tensor(.5)); var result = control_flow_ops.cond( pred: mirror_cond, - true_fn: () => gen_array_ops.reverse(image, new { flip_index }), + true_fn: () => gen_array_ops.reverse(image, ops.convert_to_tensor(new int[] { flip_index })), false_fn: () => image, name: scope ); @@ -197,7 +197,7 @@ internal static Tensor _random_flip(Tensor image, int flip_index, int seed, stri var flips = math_ops.round( array_ops.reshape(uniform_random, shape: array_ops.constant(value: new object[] { batch_size[0], 1, 1, 1 }))); flips = math_ops.cast(flips, image.dtype); - var flipped_input = gen_array_ops.reverse(image, new int[] { flip_index + 1 }); + var flipped_input = gen_array_ops.reverse(image, ops.convert_to_tensor(new int[] { flip_index + 1 })); return flips * flipped_input + (1 - flips) * image; } else @@ -222,11 +222,11 @@ internal static Tensor _flip(Tensor image, int flip_index, string scope_name) Shape shape = image.shape; if (shape.ndim == 3 || shape.ndim == Unknown) { - return fix_image_flip_shape(image, gen_array_ops.reverse(image, new { flip_index })); + return fix_image_flip_shape(image, gen_array_ops.reverse(image, ops.convert_to_tensor(new int[] { flip_index }))); } else if (shape.ndim == 4) { - return gen_array_ops.reverse(image, new[] { flip_index + 1 }); + return gen_array_ops.reverse(image, ops.convert_to_tensor(new[] { flip_index + 1 })); } else { @@ -268,15 +268,15 @@ internal static Tensor _rot90_3D(Tensor image, int k, string name_scope) { Tensor _rot90() { - return array_ops.transpose(gen_array_ops.reverse(image, new[] { 1, 0, 2 }), new int[] { 1 }); + return array_ops.transpose(gen_array_ops.reverse(image, ops.convert_to_tensor(new[] { 1, 0, 2 })), new int[] { 1 }); }; Tensor _rot180() { - return gen_array_ops.reverse(image, new[] { 0, 1 }); + return gen_array_ops.reverse(image, ops.convert_to_tensor(new[] { 0, 1 })); }; Tensor _rot270() { - return gen_array_ops.reverse(array_ops.transpose(image, new[] { 1, 0, 2 }), new[] { 1 }); + return gen_array_ops.reverse(array_ops.transpose(image, new[] { 1, 0, 2 }), ops.convert_to_tensor(new[] { 1 })); }; var cases = new[] {math_ops.equal(k, 1), _rot90(), @@ -1389,7 +1389,7 @@ internal static (Tensor, Tensor, Operation[]) _verify_compatible_image_shapes(Te Operation[] checks = new Operation[] { }; checks.append( control_flow_ops.Assert( - gen_math_ops.greater_equal(array_ops.size(shape1_tensor), 3), new[] { shape1, shape2 }, + gen_math_ops.greater_equal(array_ops.size(shape1_tensor), ops.convert_to_tensor(3)), new[] { shape1, shape2 }, summarize: 10)); checks.append( control_flow_ops.Assert( @@ -1762,8 +1762,8 @@ internal static (Tensor, Tensor, Tensor, Tensor) _cross_suppression(Tensor boxes { var batch_size = array_ops.shape(boxes)[0]; var new_slice = array_ops.slice( - boxes, new object[] { 0, inner_idx * tile_size, 0 }, - new object[] { batch_size, tile_size, 4 }); + boxes, new Tensor[] { ops.convert_to_tensor(0), ops.convert_to_tensor(inner_idx * tile_size), ops.convert_to_tensor(0) }, + new Tensor[] { ops.convert_to_tensor(batch_size), ops.convert_to_tensor(tile_size), ops.convert_to_tensor(4) }); var iou = _bbox_overlap(new_slice, box_slice); var box_slice_after_suppression = array_ops.expand_dims( math_ops.cast(math_ops.reduce_all(iou < iou_threshold, new(1)), @@ -1816,8 +1816,8 @@ internal static (Tensor, float, Tensor, int) _suppression_loop_body(Tensor boxes (Tensor, Tensor, Tensor, Tensor) cross_suppression_func(Tensor boxes, Tensor box_slice, Tensor iou_threshold, Tensor inner_idx, int tile_size) => _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size); - var box_slice = array_ops.slice(boxes, new[] { 0, idx * tile_size, 0 }, - new[] { batch_size, tile_size, 4 }); + var box_slice = array_ops.slice(boxes, new Tensor[]{ ops.convert_to_tensor(0), ops.convert_to_tensor(idx * tile_size), ops.convert_to_tensor(0) }, + new Tensor[] { ops.convert_to_tensor(batch_size), ops.convert_to_tensor(tile_size), ops.convert_to_tensor(4) }); var iou = _bbox_overlap(box_slice, box_slice); var mask = array_ops.expand_dims( diff --git a/src/TensorFlowNET.Core/Operations/io_ops.cs b/src/TensorFlowNET.Core/Operations/io_ops.cs index 16e1bac47..0b77689d5 100644 --- a/src/TensorFlowNET.Core/Operations/io_ops.cs +++ b/src/TensorFlowNET.Core/Operations/io_ops.cs @@ -31,7 +31,7 @@ public Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_an try { var result = tf.Runner.TFE_FastPathExecute( - new FastPathOpExecInfo("SaveV2", name, new object[] { prefix, tensor_names, shape_and_slices, tensors })); + new FastPathOpExecInfo(tf.Context, "SaveV2", name, new object[] { prefix, tensor_names, shape_and_slices, tensors })); result = null; return null; } @@ -48,14 +48,14 @@ public Operation save_v2(Tensor prefix, string[] tensor_names, string[] shape_an public Operation save_v2_eager_fallback(Tensor prefix, string[] tensor_names, string[] shape_and_slices, Tensor[] tensors, string name, Context ctx) { DataType[] attr_dtypes; - (attr_dtypes, tensors) = execute.onvert_to_mixed_eager_tensors(tensors, ctx); + (attr_dtypes, tensors) = _execute.onvert_to_mixed_eager_tensors(tensors, ctx); prefix = ops.convert_to_tensor(prefix, TF_DataType.TF_STRING); var tensor_names_tensor = ops.convert_to_tensor(tensor_names, TF_DataType.TF_STRING); var shape_and_slices_tensor = ops.convert_to_tensor(shape_and_slices, TF_DataType.TF_STRING); var inputs_flat = tensors.Concat(new Tensor[] { prefix, tensor_names_tensor, shape_and_slices_tensor }).ToArray(); var attrs = new object[] { "dtypes", attr_dtypes }; - var result = execute.quick_execute("SaveV2", 0, inputs_flat, attrs, ctx, name); + var result = _execute.quick_execute("SaveV2", 0, inputs_flat, attrs, ctx, name); result = null; return null; } diff --git a/src/TensorFlowNET.Core/Operations/math_ops.cs b/src/TensorFlowNET.Core/Operations/math_ops.cs index f7b428bb4..5ded448ac 100644 --- a/src/TensorFlowNET.Core/Operations/math_ops.cs +++ b/src/TensorFlowNET.Core/Operations/math_ops.cs @@ -21,6 +21,7 @@ limitations under the License. using Tensorflow.Framework; using static Tensorflow.Binding; using Tensorflow.Operations; +using System.Runtime.CompilerServices; namespace Tensorflow { @@ -39,18 +40,18 @@ public static Tensor abs(Tensor x, string name = null) { return gen_ops.complex_abs(x, Tout: x.dtype.real_dtype(), name: name); } - return gen_math_ops._abs(x, name: name); + return gen_math_ops.abs(x, name: name); }); } public static Tensor add(Tx x, Ty y, string name = null) - => gen_math_ops.add(x, y, name); + => gen_math_ops.add(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); public static Tensor add_v2(Tensor x, Tensor y, string name = null) => tf.Context.ExecuteOp("AddV2", name, new ExecuteOpArgs(x, y)); public static Tensor add_v2(Tx x, Ty y, string name = null) - => gen_math_ops.add_v2(x, y, name); + => gen_math_ops.add_v2(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); /// /// Adds all input tensors element-wise. @@ -254,9 +255,9 @@ public static Tensor einsum(string equation, Tensors inputs, string name = null) } public static Tensor greater_equal(Tx x, Ty y, string name = null) - => gen_math_ops.greater_equal(x, y, name: name); + => gen_math_ops.greater_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor equal(Tx x, Ty y, string name = null) - => gen_math_ops.equal(x, y, name: name); + => gen_math_ops.equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); /// /// Computes the Gauss error function of `x` element-wise. @@ -274,13 +275,13 @@ public static Tensor multiply(Tensor x, Tensor y, string name = null) => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(x, y)); public static Tensor multiply(Tx x, Ty y, string name = null) - => gen_math_ops.mul(x, y, name: name); + => gen_math_ops.mul(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor not_equal(Tx x, Ty y, string name = null) - => gen_math_ops.not_equal(x, y, name: name); + => gen_math_ops.not_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor mul_no_nan(Tx x, Ty y, string name = null) - => gen_math_ops.mul_no_nan(x, y, name: name); + => gen_math_ops.mul_no_nan(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor scalar_mul(Tscale scale, Tx x, string name = null) => tf.Context.ExecuteOp("Mul", name, new ExecuteOpArgs(scale, x)); @@ -396,7 +397,7 @@ public static Tensor sigmoid(T x, string name = null) }); public static Tensor sign(T x, string name = null) - => gen_math_ops.sign(x, name: name); + => gen_math_ops.sign(ops.convert_to_tensor(x), name: name); public static Tensor sin(Tensor x, string name = null) => tf.Context.ExecuteOp("Sin", name, new ExecuteOpArgs(x)); @@ -421,7 +422,7 @@ public static Tensor square(Tensor x, string name = null) public static Tensor subtract(Tx x, Ty y, string name = null) { - return gen_math_ops.sub(x, y, name); + return gen_math_ops.sub(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name); } public static Tensor log(Tensor x, string name = null) @@ -455,8 +456,8 @@ public static Tensor linspace(Tensor start, Tensor stop, int num = 50, string na var axis_tensor = array_ops.where_v2(constant_op.constant(axis >= 0), x: axis, y: ndims + axis); // The purpose is to avoid having negative values when repeating. - var num_fill = gen_math_ops.maximum(num_int_tensor - 2, 0); - var n_steps = gen_math_ops.maximum(num_int_tensor - 1, 1); + var num_fill = gen_math_ops.maximum(num_int_tensor - 2, ops.convert_to_tensor(0)); + var n_steps = gen_math_ops.maximum(num_int_tensor - 1, ops.convert_to_tensor(1)); var delta = (expanded_stop - expanded_start) / cast(n_steps, expanded_stop.dtype); var range_end = array_ops.where_v2(num_int_tensor >= 0, n_steps, -1); @@ -503,7 +504,7 @@ public static Tensor reduced_shape(Tensor input_shape, Tensor axes) var axes_shape = array_ops.shape(axes); var rng = math_ops.range(input_rank); var a1 = new Tensor[] { rng, axes }; - var fill = gen_array_ops.fill(axes_shape, 1); + var fill = gen_array_ops.fill(axes_shape, ops.convert_to_tensor(1)); var a2 = new Tensor[] { input_shape, fill }; return gen_data_flow_ops.dynamic_stitch(a1, a2); @@ -528,7 +529,7 @@ public static Tensor reciprocal(Tensor x, string name = null) /// public static Tensor reduce_all(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null) { - var all = gen_math_ops._all(input_tensor, + var all = gen_math_ops.all(input_tensor, _ReductionDims(input_tensor, axis), keepdims, name: name); @@ -581,23 +582,23 @@ public static Tensor reduce_logsumexp(Tensor input_tensor, Axis axis = null, boo public static Tensor reduce_any(Tensor input_tensor, Axis axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); - var max = (axis != null) ? gen_math_ops._any(input_tensor, axis, keepdims, name) : - gen_math_ops._any(input_tensor, r, keepdims, name); + var max = (axis != null) ? gen_math_ops.any(input_tensor, axis, keepdims, name) : + gen_math_ops.any(input_tensor, r, keepdims, name); return _may_reduce_to_scalar(keepdims, axis, max); } public static Tensor reduce_max(Tensor input_tensor, Axis axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); - var max = (axis != null) ? gen_math_ops._max(input_tensor, axis, keepdims, name) : - gen_math_ops._max(input_tensor, r, keepdims, name); + var max = (axis != null) ? gen_math_ops.max(input_tensor, axis, keepdims, name) : + gen_math_ops.max(input_tensor, r, keepdims, name); return _may_reduce_to_scalar(keepdims, axis, max); } public static Tensor reduce_min(Tensor input_tensor, Axis axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); - var min = gen_math_ops._min(input_tensor, r, keepdims, name); + var min = gen_math_ops.min(input_tensor, r, keepdims, name); return _may_reduce_to_scalar(keepdims, axis, min); } @@ -643,7 +644,7 @@ public static Tensor __case__(Tensor x, TF_DataType dtype, string name = null) public static Tensor reduce_sum(Tensor input_tensor, Tensor axis = null, bool keepdims = false, string name = null) { var r = _ReductionDims(input_tensor, axis); - var m = gen_math_ops._sum(input_tensor, r, keep_dims: keepdims, name: name); + var m = gen_math_ops.sum(input_tensor, r, keep_dims: keepdims, name: name); return _may_reduce_to_scalar(keepdims, axis, m); } @@ -752,10 +753,10 @@ public static Tensor floordiv(Tensor x, Tensor y, string name = null) } public static Tensor minimum(Tx x, Ty y, string name = null) - => gen_math_ops.minimum(x, y, name: name); + => gen_math_ops.minimum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); public static Tensor maximum(Tx x, Ty y, string name = null) - => gen_math_ops.maximum(x, y, name: name); + => gen_math_ops.maximum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name); /// /// Multiplies matrix `a` by matrix `b`, producing `a` * `b`. diff --git a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs index d24e81ef4..ca4b885f7 100644 --- a/src/TensorFlowNET.Core/Operations/nn_impl.py.cs +++ b/src/TensorFlowNET.Core/Operations/nn_impl.py.cs @@ -236,7 +236,7 @@ public static Tensor zero_fraction(Tensor value, string name = null) Tensor size = array_ops.size(value, out_type: dtypes.int64); Tensor zero_fraction_float32 = null; - size = gen_math_ops.less_equal(size, dtypes.int32.max()); + size = gen_math_ops.less_equal(size, ops.convert_to_tensor(dtypes.int32.max())); Tensor num_nonzero = control_flow_ops.cond( size, () => math_ops.cast(_count_nonzero(value, dtype: dtypes.int32), TF_DataType.TF_INT64), diff --git a/src/TensorFlowNET.Core/Operations/nn_ops.cs b/src/TensorFlowNET.Core/Operations/nn_ops.cs index b8d5103c4..00d7d316b 100644 --- a/src/TensorFlowNET.Core/Operations/nn_ops.cs +++ b/src/TensorFlowNET.Core/Operations/nn_ops.cs @@ -55,7 +55,7 @@ public static Tensor bias_add(Tensor value, return tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => { name = scope; - return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); + return gen_nn_ops.bias_add(value, ops.convert_to_tensor(bias), data_format: data_format, name: name); }); } @@ -117,7 +117,7 @@ public static Tensor in_top_k(Tensor predictions, Tensor targets, int k, string { return tf_with(ops.name_scope(name, "in_top_k"), delegate { - return gen_nn_ops.in_top_kv2(predictions, targets, k, name: name); + return gen_nn_ops.in_top_kv2(predictions, targets, ops.convert_to_tensor(k), name: name); }); } @@ -222,8 +222,8 @@ public static Tensor sparse_softmax_cross_entropy_with_logits(Tensor labels = nu // Check if no reshapes are required. if (logits.shape.ndim == 2) { - var (cost, _) = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( - precise_logits, labels, name: name); + var cost = gen_nn_ops.sparse_softmax_cross_entropy_with_logits( + precise_logits, labels, name: name)[0]; if (logits.dtype == dtypes.float16) return math_ops.cast(cost, dtypes.float32); else @@ -261,7 +261,8 @@ public static Tensor softmax_cross_entropy_with_logits_v2_helper(Tensor labels, // The second output tensor contains the gradients. We use it in // _CrossEntropyGrad() in nn_grad but not here. - var (cost, unused_backprop) = gen_nn_ops.softmax_cross_entropy_with_logits(precise_logits, labels, name: name); + var entropy = gen_nn_ops.softmax_cross_entropy_with_logits(precise_logits, labels, name: name); + var (cost, unused_backprop) = (entropy[0], entropy[1]); // The output cost shape should be the input minus axis. var output_shape = array_ops.slice(input_shape, diff --git a/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs b/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs index b1dbf5864..29dc525df 100644 --- a/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs +++ b/src/TensorFlowNET.Core/Tensors/Ragged/RowPartition.cs @@ -78,7 +78,7 @@ public static RowPartition from_value_rowids(Tensor value_rowids, minlength: nrows_int32, maxlength: nrows_int32, dtype: value_rowids.dtype); - var row_splits = array_ops.concat(new object[] + var row_splits = array_ops.concat(new Tensor[] { ops.convert_to_tensor(new long[] { 0 }), tf.cumsum(row_lengths) diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs index ef71be2c0..c7a631d8b 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensor.Operators.cs @@ -154,103 +154,103 @@ public partial class Tensor public static Tensor operator >(Tensor lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); public static Tensor operator >(Tensor lhs, NDArray rhs) => gen_math_ops.greater(lhs, rhs); public static Tensor operator >(NDArray lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, sbyte rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(sbyte lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, byte rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(byte lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, short rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(short lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, ushort rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(ushort lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, int rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(int lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, uint rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(uint lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, ulong rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(ulong lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, long rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(long lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, float rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(float lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, double rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(double lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Tensor lhs, Complex rhs) => gen_math_ops.greater(lhs, rhs); - public static Tensor operator >(Complex lhs, Tensor rhs) => gen_math_ops.greater(lhs, rhs); + public static Tensor operator >(Tensor lhs, sbyte rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(sbyte lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, byte rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(byte lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, short rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(short lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, ushort rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(ushort lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, int rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(int lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, uint rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(uint lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, ulong rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(ulong lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), ops.convert_to_tensor(rhs)); + public static Tensor operator >(Tensor lhs, long rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(long lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, float rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(float lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, double rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(double lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >(Tensor lhs, Complex rhs) => gen_math_ops.greater(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >(Complex lhs, Tensor rhs) => gen_math_ops.greater(ops.convert_to_tensor(lhs), rhs); public static Tensor operator <(Tensor lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); public static Tensor operator <(Tensor lhs, NDArray rhs) => gen_math_ops.less(lhs, rhs); public static Tensor operator <(NDArray lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, sbyte rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(sbyte lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, byte rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(byte lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, short rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(short lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, ushort rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(ushort lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, int rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(int lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, uint rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(uint lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, ulong rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(ulong lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, long rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(long lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, float rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(float lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, double rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(double lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Tensor lhs, Complex rhs) => gen_math_ops.less(lhs, rhs); - public static Tensor operator <(Complex lhs, Tensor rhs) => gen_math_ops.less(lhs, rhs); + public static Tensor operator <(Tensor lhs, sbyte rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(sbyte lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, byte rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(byte lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, short rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(short lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, ushort rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(ushort lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, int rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(int lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, uint rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(uint lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, ulong rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(ulong lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, long rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(long lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, float rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(float lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, double rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(double lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <(Tensor lhs, Complex rhs) => gen_math_ops.less(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <(Complex lhs, Tensor rhs) => gen_math_ops.less(ops.convert_to_tensor(lhs), rhs); public static Tensor operator >=(Tensor lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); public static Tensor operator >=(Tensor lhs, NDArray rhs) => gen_math_ops.greater_equal(lhs, rhs); public static Tensor operator >=(NDArray lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, sbyte rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(sbyte lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, byte rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(byte lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, short rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(short lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, ushort rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(ushort lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, int rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(int lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, uint rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(uint lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, ulong rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(ulong lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, long rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(long lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, float rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(float lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, double rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(double lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Tensor lhs, Complex rhs) => gen_math_ops.greater_equal(lhs, rhs); - public static Tensor operator >=(Complex lhs, Tensor rhs) => gen_math_ops.greater_equal(lhs, rhs); + public static Tensor operator >=(Tensor lhs, sbyte rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(sbyte lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, byte rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(byte lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, short rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(short lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, ushort rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(ushort lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, int rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(int lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, uint rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(uint lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, ulong rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(ulong lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, long rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(long lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, float rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(float lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, double rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(double lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator >=(Tensor lhs, Complex rhs) => gen_math_ops.greater_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator >=(Complex lhs, Tensor rhs) => gen_math_ops.greater_equal(ops.convert_to_tensor(lhs), rhs); public static Tensor operator <=(Tensor lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); public static Tensor operator <=(Tensor lhs, NDArray rhs) => gen_math_ops.less_equal(lhs, rhs); public static Tensor operator <=(NDArray lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, sbyte rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(sbyte lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, byte rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(byte lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, short rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(short lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, ushort rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(ushort lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, int rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(int lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, uint rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(uint lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, ulong rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(ulong lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, long rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(long lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, float rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(float lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, double rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(double lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Tensor lhs, Complex rhs) => gen_math_ops.less_equal(lhs, rhs); - public static Tensor operator <=(Complex lhs, Tensor rhs) => gen_math_ops.less_equal(lhs, rhs); + public static Tensor operator <=(Tensor lhs, sbyte rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(sbyte lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, byte rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(byte lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, short rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(short lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, ushort rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(ushort lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, int rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(int lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, uint rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(uint lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, ulong rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(ulong lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, long rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(long lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, float rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(float lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, double rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(double lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); + public static Tensor operator <=(Tensor lhs, Complex rhs) => gen_math_ops.less_equal(lhs, ops.convert_to_tensor(rhs)); + public static Tensor operator <=(Complex lhs, Tensor rhs) => gen_math_ops.less_equal(ops.convert_to_tensor(lhs), rhs); public static Tensor operator -(Tensor x) => gen_math_ops.neg(x); diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs index b98495a32..d063ee39f 100644 --- a/src/TensorFlowNET.Core/Tensors/Tensors.cs +++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs @@ -161,6 +161,9 @@ public unsafe static explicit operator string(Tensors tensor) EnsureSingleTensor(tensor, "explicit conversion to string"); return (string)tensor[0]; } + + public static explicit operator object[](Tensors tensors) + => tensors.items.ToArray(); #endregion #region Implicit Conversions diff --git a/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs b/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs index 10a85d9d9..e16f82c05 100644 --- a/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs +++ b/src/TensorFlowNET.Core/Training/Saving/BaseSaverBuilder.cs @@ -106,7 +106,7 @@ public virtual SaverDef _build_internal(IVariableV1[] names_to_saveables, name = scope; // Add a placeholder string tensor for the filename. - var filename_tensor = array_ops.placeholder_with_default(string.IsNullOrEmpty(filename) ? "model" : filename, shape: new int[0], name: "filename"); + var filename_tensor = array_ops.placeholder_with_default(tf.convert_to_tensor(string.IsNullOrEmpty(filename) ? "model" : filename), shape: new int[0], name: "filename"); // Keep the name "Const" for backwards compatibility. filename_tensor = gen_array_ops.placeholder_with_default(filename_tensor, shape: new int[0], name: "Const"); diff --git a/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs b/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs index a7e1d7e34..b93c6aed7 100644 --- a/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs +++ b/src/TensorFlowNET.Keras/Engine/DataAdapters/TensorLikeDataAdapter.cs @@ -57,7 +57,8 @@ Tensors permutation(Tensors tensor) IDatasetV2 slice_batch_indices(Tensor indices) { var num_in_full_batch = num_full_batches * _batch_size; - var first_k_indices = array_ops.slice(indices, new int[] { 0 }, new int[] { num_in_full_batch }); + var first_k_indices = array_ops.slice(indices, new Tensor[] { ops.convert_to_tensor(0) }, + new Tensor[] { ops.convert_to_tensor(num_in_full_batch) }); first_k_indices = array_ops.reshape(first_k_indices, new int[] { num_full_batches, _batch_size }); var flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices); if (_partial_batch_size > 0) @@ -81,7 +82,7 @@ IDatasetV2 slice_inputs(IDatasetV2 indices_dataset, Tensors elements) { var indices = inputs[0]; var results = inputs.Skip(1) - .Select(x => gen_array_ops.gather_v2(x, indices, 0)) + .Select(x => array_ops.gather(x, indices, axis: 0)) .ToArray(); return new Tensors(results); }, -1); diff --git a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs index b1cc2446c..aa6617ddc 100644 --- a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs +++ b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs @@ -79,7 +79,7 @@ protected override Tensors Call(Tensors inputs, Tensor state = null, bool? train } else { - outputs = gen_math_ops.mat_mul(inputs, kernel.AsTensor()); + outputs = math_ops.matmul(inputs, kernel.AsTensor()); } if (args.UseBias) diff --git a/src/TensorFlowNET.Keras/Losses/Huber.cs b/src/TensorFlowNET.Keras/Losses/Huber.cs index a256786f1..7169ba461 100644 --- a/src/TensorFlowNET.Keras/Losses/Huber.cs +++ b/src/TensorFlowNET.Keras/Losses/Huber.cs @@ -30,7 +30,7 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro return gen_math_ops.mean(array_ops.where_v2(abs_error <= delta, half * math_ops.pow(error, 2), half * math_ops.pow(delta, 2) + delta * (abs_error - delta)), - axis: -1); + ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/LogCosh.cs b/src/TensorFlowNET.Keras/Losses/LogCosh.cs index 8acbbe9d2..7cfd4f67b 100644 --- a/src/TensorFlowNET.Keras/Losses/LogCosh.cs +++ b/src/TensorFlowNET.Keras/Losses/LogCosh.cs @@ -20,7 +20,8 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); Tensor x = y_pred_dispatch - y_true_cast; - return gen_math_ops.mean(x + gen_math_ops.softplus(-2.0 * x) - math_ops.cast(math_ops.log(tf.Variable(2.0)), x.dtype), axis: -1); + return gen_math_ops.mean(x + gen_nn_ops.softplus(-2.0 * x) - math_ops.cast(math_ops.log(tf.Variable(2.0)), x.dtype), + ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs b/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs index 5d0f83d43..c203bc5ad 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanAbsoluteError.cs @@ -17,7 +17,7 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro { Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - return gen_math_ops.mean(math_ops.abs(y_pred_dispatch - y_true_cast), axis: -1); + return gen_math_ops.mean(math_ops.abs(y_pred_dispatch - y_true_cast), ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs b/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs index 3295b12b1..8dcaa1bcc 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanAbsolutePercentageError.cs @@ -18,7 +18,7 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); Tensor diff = math_ops.abs(y_true_cast - y_pred_dispatch) / gen_math_ops.maximum(math_ops.abs(y_true_cast), gen_math_ops.cast(tf.constant(1e-7), y_pred_dispatch.dtype)); - return gen_math_ops.cast(tf.constant(100), y_pred_dispatch.dtype) * gen_math_ops.mean(diff, axis: -1); + return gen_math_ops.cast(tf.constant(100), y_pred_dispatch.dtype) * gen_math_ops.mean(diff, ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs b/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs index 6ae7d86d4..73cddef14 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanSquaredError.cs @@ -17,7 +17,7 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro { Tensor y_pred_dispatch = ops.convert_to_tensor(y_pred); Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); - return gen_math_ops.mean(gen_math_ops.squared_difference(y_pred_dispatch, y_true_cast), axis: -1); + return gen_math_ops.mean(gen_math_ops.squared_difference(y_pred_dispatch, y_true_cast), ops.convert_to_tensor(-1)); } } } diff --git a/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs b/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs index 22b5a6ff9..e29659218 100644 --- a/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs +++ b/src/TensorFlowNET.Keras/Losses/MeanSquaredLogarithmicError.cs @@ -20,14 +20,14 @@ public override Tensor Apply(Tensor y_true = null, Tensor y_pred =null, bool fro Tensor y_true_cast = gen_math_ops.cast(y_true, y_pred_dispatch.dtype); Tensor first_log=null, second_log=null; if (y_pred_dispatch.dtype == TF_DataType.TF_DOUBLE) { - first_log = math_ops.log(gen_math_ops.maximum(y_pred_dispatch, 1e-7) + 1.0); - second_log = math_ops.log(gen_math_ops.maximum(y_true_cast, 1e-7) + 1.0); + first_log = math_ops.log(math_ops.maximum(y_pred_dispatch, 1e-7) + 1.0); + second_log = math_ops.log(math_ops.maximum(y_true_cast, 1e-7) + 1.0); } else { - first_log = math_ops.log(gen_math_ops.maximum(y_pred_dispatch, 1e-7f) + 1.0f); - second_log = math_ops.log(gen_math_ops.maximum(y_true_cast, 1e-7f) + 1.0f); + first_log = math_ops.log(math_ops.maximum(y_pred_dispatch, 1e-7f) + 1.0f); + second_log = math_ops.log(math_ops.maximum(y_true_cast, 1e-7f) + 1.0f); } - return gen_math_ops.mean(gen_math_ops.squared_difference(first_log, second_log), axis: -1); + return gen_math_ops.mean(gen_math_ops.squared_difference(first_log, second_log), ops.convert_to_tensor(-1)); } } } diff --git a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs index a31dea7d2..c637cf858 100644 --- a/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs +++ b/test/TensorFlowNET.Graph.UnitTest/ControlFlowTest/WhileContextTestCase.cs @@ -25,8 +25,8 @@ private void _testWhileContextHelper(int maximum_iterations) // TODO: implement missing code dependencies var sess = this.cached_session(); var i = constant_op.constant(0, name: "i"); - var c = new Func(x => gen_math_ops.less(x, 10, name: "c")); - var b = new Func(x => gen_math_ops.add(x, 1, name: "c")); + var c = new Func(x => gen_math_ops.less(x, ops.convert_to_tensor(10), name: "c")); + var b = new Func(x => math_ops.add(x, 1, name: "c")); //control_flow_ops.while_loop( // c, b, i , maximum_iterations: tf.constant(maximum_iterations)); foreach (Operation op in sess.graph.get_operations()) diff --git a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs index 92afd6a3f..f240817b4 100644 --- a/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs +++ b/test/TensorFlowNET.Graph.UnitTest/GradientTest/GradientTest.cs @@ -260,7 +260,7 @@ public void testConcatGrad() public void testStopGradientFunction() { var ap = tf.constant(1f); - var b = tf.tanh(ap) + gen_array_ops.stop_gradient(ap); + var b = tf.tanh(ap) + array_ops.stop_gradient(ap); var g = tf.gradients(b, ap); var sess = tf.Session(); var result = sess.run(g); diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs index 6a12ed20b..72f598e46 100644 --- a/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs +++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ArrayOpsTest.cs @@ -18,7 +18,7 @@ public void Slice() var input_array = tf.constant(np.array(new int[] { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }).reshape((3,2,3))); var indices = tf.constant(np.array(new int[] { 0, 2 })); - var r1 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 1, 1, 3 }); + var r1 = array_ops.slice(input_array, ops.convert_n_to_tensor(new object[] { 1, 0, 0 }), ops.convert_n_to_tensor(new object[] { 1, 1, 3 })); Assert.AreEqual(new Shape(1,1,3), r1.shape); var r1np = r1.numpy(); Assert.AreEqual(r1np[0, 0, 0], 3); @@ -26,7 +26,7 @@ public void Slice() Assert.AreEqual(r1np[0, 0, 2], 3); - var r2 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 1, 2, 3 }); + var r2 = array_ops.slice(input_array, ops.convert_n_to_tensor(new object[] { 1, 0, 0 }), ops.convert_n_to_tensor(new object[] { 1, 2, 3 })); Assert.AreEqual(new Shape(1, 2, 3), r2.shape); var r2np = r2.numpy(); Assert.AreEqual(r2np[0, 0, 0], 3); @@ -36,7 +36,7 @@ public void Slice() Assert.AreEqual(r2np[0, 1, 1], 4); Assert.AreEqual(r2np[0, 1, 2], 4); - var r3 = array_ops.slice(input_array, new int[] { 1, 0, 0 }, new int[] { 2, 1, 3 }); + var r3 = array_ops.slice(input_array, ops.convert_n_to_tensor(new object[] { 1, 0, 0 }), ops.convert_n_to_tensor(new object[] { 2, 1, 3 })); Assert.AreEqual(new Shape(2, 1, 3), r3.shape); var r3np = r3.numpy(); Assert.AreEqual(r3np[0, 0, 0], 3); From 2ae1dc21cbf0b894b4f1cac09a9af1ad5360dc42 Mon Sep 17 00:00:00 2001 From: Yaohui Liu Date: Mon, 8 May 2023 02:07:56 +0800 Subject: [PATCH 5/5] build: revise package dependencies. --- TensorFlow.NET.sln | 20 -------------------- Tensorflow.CodeGen/Tensorflow.CodeGen.csproj | 2 +- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln index 8d5488146..2950c5d23 100644 --- a/TensorFlow.NET.sln +++ b/TensorFlow.NET.sln @@ -37,8 +37,6 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistH EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{BADBB104-2F03-4824-A249-803A871D8122}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "protobuf.Text", "..\protobuf.Text\src\protobuf.Text\protobuf.Text.csproj", "{151B3A8A-8576-4190-BD58-F42944A49718}" -EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -304,24 +302,6 @@ Global {BADBB104-2F03-4824-A249-803A871D8122}.Release|x64.Build.0 = Release|Any CPU {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.ActiveCfg = Release|Any CPU {BADBB104-2F03-4824-A249-803A871D8122}.Release|x86.Build.0 = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|Any CPU.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x64.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x64.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x86.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Debug|x86.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|Any CPU.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|Any CPU.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x64.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x64.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x86.ActiveCfg = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.GPU|x86.Build.0 = Debug|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|Any CPU.ActiveCfg = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|Any CPU.Build.0 = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x64.ActiveCfg = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x64.Build.0 = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x86.ActiveCfg = Release|Any CPU - {151B3A8A-8576-4190-BD58-F42944A49718}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj index 865db126b..5948fb2c3 100644 --- a/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj +++ b/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj @@ -9,10 +9,10 @@ + -