,name,file,line,type,comment 0,UserInputError,tensorflow/configure.py,74,class, 1,is_windows,tensorflow/configure.py,78,function, 2,is_linux,tensorflow/configure.py,82,function, 3,is_macos,tensorflow/configure.py,86,function, 4,is_ppc64le,tensorflow/configure.py,90,function, 5,is_cygwin,tensorflow/configure.py,94,function, 6,get_input,tensorflow/configure.py,98,function, 7,symlink_force,tensorflow/configure.py,109,function,"Force symlink, equivalent of 'ln -sf'. Args: target: items to link to. link_name: name of the link." 8,sed_in_place,tensorflow/configure.py,126,function,"Replace old string with new string in file. Args: filename: string for filename. old: string to replace. new: new string to replace to." 9,write_to_bazelrc,tensorflow/configure.py,141,function, 10,write_action_env_to_bazelrc,tensorflow/configure.py,146,function, 11,run_shell,tensorflow/configure.py,150,function, 12,cygpath,tensorflow/configure.py,163,function,Convert path from posix to windows. 13,get_python_path,tensorflow/configure.py,168,function,Get the python site package paths. 14,get_python_major_version,tensorflow/configure.py,198,function,Get the python major version. 15,setup_python,tensorflow/configure.py,203,function,Setup python related env variables. 16,reset_tf_configure_bazelrc,tensorflow/configure.py,273,function,Reset file that contains customized config settings. 17,cleanup_makefile,tensorflow/configure.py,278,function,"Delete any leftover BUILD files from the Makefile build. These files could interfere with Bazel parsing." 18,get_var,tensorflow/configure.py,292,function,"Get boolean input from user. If var_name is not set in env, ask user to enable query_item or not. If the response is empty, use the default. Args: environ_cp: copy of the os.environ. var_name: string for name of environment variable, e.g. ""TF_NEED_CUDA"". query_item: string for feature related to the variable, e.g. ""CUDA for Nvidia GPUs"". enabled_by_default: boolean for default behavior. question: optional string for how to ask for user input. yes_reply: optional string for reply when feature is enabled. no_reply: optional string for reply when feature is disabled. Returns: boolean value of the variable. Raises: UserInputError: if an environment variable is set, but it cannot be interpreted as a boolean indicator, assume that the user has made a scripting error, and will continue to provide invalid input. Raise the error to avoid infinitely looping." 19,set_build_var,tensorflow/configure.py,377,function,"Set if query_item will be enabled for the build. Ask user if query_item will be enabled. Default is used if no input is given. Set subprocess environment variable and write to .bazelrc if enabled. Args: environ_cp: copy of the os.environ. var_name: string for name of environment variable, e.g. ""TF_NEED_CUDA"". query_item: string for feature related to the variable, e.g. ""CUDA for Nvidia GPUs"". option_name: string for option to define in .bazelrc. enabled_by_default: boolean for default behavior. bazel_config_name: Name for Bazel --config argument to enable build feature." 20,set_action_env_var,tensorflow/configure.py,411,function,"Set boolean action_env variable. Ask user if query_item will be enabled. Default is used if no input is given. Set environment variable and write to .bazelrc. Args: environ_cp: copy of the os.environ. var_name: string for name of environment variable, e.g. ""TF_NEED_CUDA"". query_item: string for feature related to the variable, e.g. ""CUDA for Nvidia GPUs"". enabled_by_default: boolean for default behavior. question: optional string for how to ask for user input. yes_reply: optional string for reply when feature is enabled. no_reply: optional string for reply when feature is disabled. bazel_config_name: adding config to .bazelrc instead of action_env." 21,convert_version_to_int,tensorflow/configure.py,446,function,"Convert a version number to a integer that can be used to compare. Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The 'xxxxx' part, for instance 'homebrew' on OS/X, is ignored. Args: version: a version to be converted Returns: An integer if converted successfully, otherwise return None." 22,check_bazel_version,tensorflow/configure.py,471,function,"Check installed bazel version is between min_version and max_version. Args: min_version: string for minimum bazel version (must exist!). max_version: string for maximum bazel version (must exist!). Returns: The bazel version detected." 23,set_cc_opt_flags,tensorflow/configure.py,518,function,"Set up architecture-dependent optimization flags. Also append CC optimization flags to bazel.rc.. Args: environ_cp: copy of the os.environ." 24,set_tf_cuda_clang,tensorflow/configure.py,546,function,"set TF_CUDA_CLANG action_env. Args: environ_cp: copy of the os.environ." 25,set_tf_download_clang,tensorflow/configure.py,566,function,Set TF_DOWNLOAD_CLANG action_env. 26,get_from_env_or_user_or_default,tensorflow/configure.py,582,function,"Get var_name either from env, or user or default. If var_name has been set as environment variable, use the preset value, else ask for user input. If no input is provided, the default is used. Args: environ_cp: copy of the os.environ. var_name: string for name of environment variable, e.g. ""TF_NEED_CUDA"". ask_for_var: string for how to ask for user input. var_default: default value string. Returns: string value for var_name" 27,set_clang_cuda_compiler_path,tensorflow/configure.py,607,function,Set CLANG_CUDA_COMPILER_PATH. 28,prompt_loop_or_load_from_env,tensorflow/configure.py,630,function,"Loop over user prompts for an ENV param until receiving a valid response. For the env param var_name, read from the environment or verify user input until receiving valid input. When done, set var_name in the environ_cp to its new value. Args: environ_cp: (Dict) copy of the os.environ. var_name: (String) string for name of environment variable, e.g. ""TF_MYVAR"". var_default: (String) default value string. ask_for_var: (String) string for how to ask for user input. check_success: (Function) function that takes one argument and returns a boolean. Should return True if the value provided is considered valid. May contain a complex error message if error_msg does not provide enough information. In that case, set suppress_default_error to True. error_msg: (String) String with one and only one '%s'. Formatted with each invalid response upon check_success(input) failure. suppress_default_error: (Bool) Suppress the above error message in favor of one from the check_success function. resolve_symlinks: (Bool) Translate symbolic links into the real filepath. n_ask_attempts: (Integer) Number of times to query for valid input before raising an error and quitting. Returns: [String] The value of var_name after querying for input. Raises: UserInputError: if a query has been attempted n_ask_attempts times without success, assume that the user has made a scripting error, and will continue to provide invalid input. Raise the error to avoid infinitely looping." 29,create_android_ndk_rule,tensorflow/configure.py,696,function,Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule. 30,create_android_sdk_rule,tensorflow/configure.py,724,function,Set Android variables and write Android SDK WORKSPACE rule. 31,get_ndk_api_level,tensorflow/configure.py,788,function,Gets the appropriate NDK API level to use for the provided Android NDK path. 32,set_gcc_host_compiler_path,tensorflow/configure.py,836,function,Set GCC_HOST_COMPILER_PATH. 33,reformat_version_sequence,tensorflow/configure.py,858,function,"Reformat the version string to have the given number of sequences. For example: Given (7, 2) -> 7.0 (7.0.1, 2) -> 7.0 (5, 1) -> 5 (5.0.3.2, 1) -> 5 Args: version_str: String, the version string. sequence_count: int, an integer. Returns: string, reformatted version string." 34,set_tf_cuda_paths,tensorflow/configure.py,881,function,Set TF_CUDA_PATHS. 35,set_tf_cuda_version,tensorflow/configure.py,892,function,Set TF_CUDA_VERSION. 36,set_tf_cudnn_version,tensorflow/configure.py,904,function,Set TF_CUDNN_VERSION. 37,is_cuda_compatible,tensorflow/configure.py,916,function,Check compatibility between given library and cudnn/cudart libraries. 38,set_tf_tensorrt_version,tensorflow/configure.py,945,function,Set TF_TENSORRT_VERSION. 39,set_tf_nccl_version,tensorflow/configure.py,962,function,Set TF_NCCL_VERSION. 40,get_native_cuda_compute_capabilities,tensorflow/configure.py,979,function,"Get native cuda compute capabilities. Args: environ_cp: copy of the os.environ. Returns: string of native cuda compute capabilities, separated by comma." 41,set_tf_cuda_compute_capabilities,tensorflow/configure.py,1003,function,Set TF_CUDA_COMPUTE_CAPABILITIES. 42,set_other_cuda_vars,tensorflow/configure.py,1074,function,Set other CUDA related variables. 43,set_host_cxx_compiler,tensorflow/configure.py,1083,function,Set HOST_CXX_COMPILER. 44,set_host_c_compiler,tensorflow/configure.py,1100,function,Set HOST_C_COMPILER. 45,set_computecpp_toolkit_path,tensorflow/configure.py,1117,function,Set COMPUTECPP_TOOLKIT_PATH. 46,set_trisycl_include_dir,tensorflow/configure.py,1149,function,Set TRISYCL_INCLUDE_DIR. 47,system_specific_test_config,tensorflow/configure.py,1173,function,Add default build and test flags required for TF tests to bazelrc. 48,set_system_libs_flag,tensorflow/configure.py,1216,function, 49,is_reduced_optimize_huge_functions_available,tensorflow/configure.py,1233,function,"Check to see if the system supports /d2ReducedOptimizeHugeFunctions. The above compiler flag is a new compiler flag introduced to the Visual Studio compiler in version 16.4 (available in Visual Studio 2019, Preview edition only, as of 2019-11-19). TensorFlow needs this flag to massively reduce compile times, but until 16.4 is officially released, we can't depend on it. See also https://groups.google.com/a/tensorflow.org/d/topic/build/SsW98Eo7l3o/discussion Because it's very annoying to check this manually (to check the MSVC installed versions, you need to use the registry, and it's not clear if Bazel will be using that install version anyway), we expect enviroments who know they may use this flag to export TF_VC_VERSION=16.4 TODO(angerson, gunan): Remove this function when TensorFlow's minimum VS version is upgraded to 16.4. Arguments: environ_cp: Environment of the current execution Returns: boolean, whether or not /d2ReducedOptimizeHugeFunctions is available on this machine." 50,set_windows_build_flags,tensorflow/configure.py,1262,function,Set Windows specific build options. 51,config_info_line,tensorflow/configure.py,1283,function,Helper function to print formatted help text for Bazel config options. 52,configure_ios,tensorflow/configure.py,1288,function,"Configures TensorFlow for iOS builds. This function will only be executed if `is_macos()` is true." 53,validate_cuda_config,tensorflow/configure.py,1305,function,"Run find_cuda_config.py and return cuda_toolkit_path, or None." 54,main,tensorflow/configure.py,1365,function, 55,_running_from_pip_package,tensorflow/tensorflow/api_template.__init__.py,132,function, 56,_running_from_pip_package,tensorflow/tensorflow/api_template_v1.__init__.py,142,function, 57,_LazyLoader,tensorflow/tensorflow/virtual_root_template_v1.__init__.py,33,class,Lazily import a module so that we can forward it. 58,_forward_module,tensorflow/tensorflow/virtual_root_template_v1.__init__.py,63,function, 59,_LazyLoader,tensorflow/tensorflow/virtual_root_template_v2.__init__.py,33,class,Lazily import a module so that we can forward it. 60,_forward_module,tensorflow/tensorflow/virtual_root_template_v2.__init__.py,63,function, 61,VarsAndArithmeticObjectGraph,tensorflow/tensorflow/cc/saved_model/testdata/generate_saved_models.py,37,class,Three vars (one in a sub-module) and compute method. 62,ReferencesParent,tensorflow/tensorflow/cc/saved_model/testdata/generate_saved_models.py,55,class, 63,CyclicModule,tensorflow/tensorflow/cc/saved_model/testdata/generate_saved_models.py,64,class, 64,main,tensorflow/tensorflow/cc/saved_model/testdata/generate_saved_models.py,77,function, 65,tfadd,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,48,function, 66,tfadd_with_ckpt,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,54,function, 67,tfadd_with_ckpt_saver,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,69,function, 68,tfassert_eq,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,88,function, 69,tfcond,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,96,function, 70,tfgather,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,104,function, 71,tfmatmul,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,110,function, 72,tfmatmulandadd,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,116,function, 73,tffunction,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,124,function, 74,tfsplits,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,135,function,"A more complex graph, including splits." 75,tftop_k,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,152,function, 76,tfvariable_readonly,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,158,function, 77,tfvariable,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,169,function, 78,tfvariable_sequential_updates,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,177,function, 79,export_debug_info,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,189,function,"Exports debug information from a graph. Args: exported_graph: A Graph that has been created by tracing a saveable view. Returns: Corresponding GraphDebugInfo with traces for all ops in exported_graph." 80,write_graph,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,204,function,Build a graph using build_graph and write it out. 81,main,tensorflow/tensorflow/compiler/aot/tests/make_test_graphs.py,223,function, 82,_XlaClusterOutputGrad,tensorflow/tensorflow/compiler/jit/ops/xla_ops_grad.py,25,function, 83,TestGraphDebugInfo,tensorflow/tensorflow/compiler/mlir/lite/tests/debuginfo/concrete_function_error.py,32,class,Test stack trace can be displayed. 84,main,tensorflow/tensorflow/compiler/mlir/lite/tests/debuginfo/concrete_function_error.py,64,function, 85,TestModule,tensorflow/tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py,32,class,The test model has unsupported op. 86,TestGraphDebugInfo,tensorflow/tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py,41,class,Test stack trace can be displayed. 87,main,tensorflow/tensorflow/compiler/mlir/lite/tests/debuginfo/saved_model_error.py,73,function,test driver method writes the error message to stdout. 88,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/basic.py,38,class, 89,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/basic_v1.py,49,function, 90,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/call_to_exported.py,27,class, 91,do_test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common.py,43,function,"Runs test. 1. Performs absl and tf ""main""-like initialization that must run before almost anything else. 2. Converts `tf.Module` to SavedModel 3. Converts SavedModel to MLIR 4. Prints the textual MLIR to stdout (it is expected that the caller will have FileCheck checks in its file to check this output). This is only for use by the MLIR SavedModel importer tests. Args: create_module_fn: A callable taking no arguments, which returns the `tf.Module` to be converted and printed. exported_names: A set of exported names for the MLIR converter (default is ""export all""). show_debug_info: If true, shows debug locations in the resulting MLIR." 92,set_tf_options,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common_v1.py,38,function, 93,do_test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common_v1.py,49,function,"Runs test. 1. Performs absl and tf ""main""-like initialization that must run before almost anything else. 2. Converts signature_def_map to SavedModel V1 3. Converts SavedModel V1 to MLIR 4. Prints the textual MLIR to stdout (it is expected that the caller will have FileCheck checks in its file to check this output). This is only for use by the MLIR SavedModel importer tests. Args: create_signature: A functor that return signature_def_map, init_op and assets_collection. signature_def_map is a map from string key to signature_def. The key will be used as function name in the resulting MLIR. canonicalize: If true, canonicalizer will be run on the resulting MLIR. show_debug_info: If true, shows debug locations in the resulting MLIR." 94,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/control_flow_duplicate_v1.py,42,function, 95,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/control_flow_upgrade_legacy_v1.py,34,function, 96,ReferencesParent,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/cyclic_object_graph.py,27,class, 97,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/cyclic_object_graph.py,38,class, 98,Child,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/dag_object_graph.py,27,class, 99,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/dag_object_graph.py,37,class, 100,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/debug_info.py,27,class, 101,plus,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/defun_export.py,29,function, 102,test_defun,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/defun_export.py,33,function, 103,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/duplicate_method_names_v1.py,37,function, 104,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py,27,class, 105,write_vocabulary_file,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/hash_table_asset_v1.py,39,function,Write temporary vocab file for module construction. 106,test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/hash_table_asset_v1.py,49,function, 107,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/hash_table_v1.py,60,function, 108,mnist_model,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py,27,function,Creates a MNIST model. 109,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py,36,class, 110,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_arguments_results_v1.py,52,function, 111,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_variables_v1.py,39,function, 112,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/partially_shaped_variables.py,27,class, 113,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/remove_init_variable_v1.py,50,function, 114,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shapes_for_arguments.py,27,class, 115,Test,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shared_variable_v1.py,41,function, 116,TestModule,tensorflow/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/structured_input.py,27,class, 117,AdadeltaOptimizerTest,tensorflow/tensorflow/compiler/tests/adadelta_test.py,31,class, 118,AdagradDAOptimizerTest,tensorflow/tensorflow/compiler/tests/adagrad_da_test.py,32,class, 119,AdagradOptimizerTest,tensorflow/tensorflow/compiler/tests/adagrad_test.py,31,class, 120,adam_update_numpy,tensorflow/tensorflow/compiler/tests/adam_test.py,34,function, 121,AdamOptimizerTest,tensorflow/tensorflow/compiler/tests/adam_test.py,52,class, 122,XlaAddNTest,tensorflow/tensorflow/compiler/tests/add_n_test.py,30,class, 123,ArgMinMaxTest,tensorflow/tensorflow/compiler/tests/argminmax_test.py,30,class, 124,BinaryOpsTest,tensorflow/tensorflow/compiler/tests/binary_ops_test.py,39,class,Test cases for binary operators. 125,BucketizationOpTest,tensorflow/tensorflow/compiler/tests/bucketize_op_test.py,30,class, 126,CaseTest,tensorflow/tensorflow/compiler/tests/case_test.py,31,class, 127,CategoricalTest,tensorflow/tensorflow/compiler/tests/categorical_op_test.py,36,class,Test cases for random-number generating operators. 128,CholeskyOpTest,tensorflow/tensorflow/compiler/tests/cholesky_op_test.py,35,class, 129,ClusteringTest,tensorflow/tensorflow/compiler/tests/clustering_test.py,35,class, 130,ComplexNumbersDivisionTest,tensorflow/tensorflow/compiler/tests/complex_div_test.py,35,class,Test cases for complex numbers division operators. 131,ConcatTest,tensorflow/tensorflow/compiler/tests/concat_ops_test.py,34,class, 132,ConcatOffsetTest,tensorflow/tensorflow/compiler/tests/concat_ops_test.py,335,class, 133,PackTest,tensorflow/tensorflow/compiler/tests/concat_ops_test.py,349,class, 134,CondTest,tensorflow/tensorflow/compiler/tests/cond_test.py,39,class, 135,Conv2DTest,tensorflow/tensorflow/compiler/tests/conv2d_test.py,42,class, 136,Conv2DBackpropInputTest,tensorflow/tensorflow/compiler/tests/conv2d_test.py,236,class, 137,Conv2DBackpropFilterTest,tensorflow/tensorflow/compiler/tests/conv2d_test.py,534,class, 138,Conv3DBackpropFilterV2GradTest,tensorflow/tensorflow/compiler/tests/conv3d_test.py,36,class, 139,Conv3DTransposeTest,tensorflow/tensorflow/compiler/tests/conv3d_test.py,69,class, 140,ConvolutionNodeNameTest,tensorflow/tensorflow/compiler/tests/conv_node_name_test.py,35,class,"Verify convolution node name match. Verify convolution node names on TPU and CPU match with dilation > 1." 141,XlaDataFormatDimMapTest,tensorflow/tensorflow/compiler/tests/data_format_ops_test.py,30,class, 142,XlaPermuteOpTest,tensorflow/tensorflow/compiler/tests/data_format_ops_test.py,67,class, 143,GetRunMetadataLabels,tensorflow/tensorflow/compiler/tests/dense_layer_test.py,36,function,Returns all labels in run_metadata. 144,InLabels,tensorflow/tensorflow/compiler/tests/dense_layer_test.py,45,function,Returns true iff one of the labels contains substr. 145,DenseLayerTest,tensorflow/tensorflow/compiler/tests/dense_layer_test.py,50,class, 146,ReferenceDepthwiseConv2D,tensorflow/tensorflow/compiler/tests/depthwise_conv_op_test.py,35,function, 147,ConfigsToTest,tensorflow/tensorflow/compiler/tests/depthwise_conv_op_test.py,64,function,"Iterator for different convolution shapes, strides and paddings. Yields: Tuple (input_size, filter_size, out_size, stride, padding), the depthwise convolution parameters." 148,ConfigsWithDilationsToTest,tensorflow/tensorflow/compiler/tests/depthwise_conv_op_test.py,91,function,"Iterator for different convolution shapes, strides and paddings. Yields: Tuple (input_size, filter_size, out_size, stride, dilation, padding), the depthwise convolution parameters." 149,CheckGradConfigsToTest,tensorflow/tensorflow/compiler/tests/depthwise_conv_op_test.py,117,function,"Iterator for different convolution shapes, strides and paddings. compute_gradient_error() is very expensive. So the configs should be relatively small. Yields: Tuple (input_size, filter_size, out_size, stride, padding), the depthwise convolution parameters." 150,DepthwiseConv2DTest,tensorflow/tensorflow/compiler/tests/depthwise_conv_op_test.py,144,class, 151,DynamicUpdateSliceOpsTest,tensorflow/tensorflow/compiler/tests/dynamic_slice_ops_test.py,30,class, 152,DynamicStitchTest,tensorflow/tensorflow/compiler/tests/dynamic_stitch_test.py,30,class, 153,EagerTest,tensorflow/tensorflow/compiler/tests/eager_test.py,47,class, 154,EagerFunctionTest,tensorflow/tensorflow/compiler/tests/eager_test.py,301,class, 155,ExcessivePaddingTest,tensorflow/tensorflow/compiler/tests/eager_test.py,721,class,"Test that eager execution works with TPU flattened tensors. Tensors that would normally be excessively padded when written to TPU memory are reshaped to 1-D flat tensors. This test case verifies that such tensors work with eager execution. The flattening currently only happens on TPU, but tests should work fine with all backends as flattening is transparent." 156,multiple_tpus,tensorflow/tensorflow/compiler/tests/eager_test.py,772,function, 157,MultiDeviceTest,tensorflow/tensorflow/compiler/tests/eager_test.py,777,class,Test running TPU computation on more than one core. 158,EinsumOpTest,tensorflow/tensorflow/compiler/tests/einsum_op_test.py,30,class,Test cases for einsum op. 159,EnsureShapeOpTest,tensorflow/tensorflow/compiler/tests/ensure_shape_op_test.py,29,class, 160,ExtractImagePatches,tensorflow/tensorflow/compiler/tests/extract_image_patches_op_test.py,29,class,Functional tests for ExtractImagePatches op. 161,FakeQuantWithMinMaxArgsTest,tensorflow/tensorflow/compiler/tests/fake_quant_ops_test.py,27,class,Test cases for FakeQuantWithMinMaxArgs operation. 162,FakeQuantWithMinMaxArgsGradientTest,tensorflow/tensorflow/compiler/tests/fake_quant_ops_test.py,125,class,Test cases for FakeQuantWithMinMaxArgsGradient operation. 163,FakeQuantWithMinMaxVarsTest,tensorflow/tensorflow/compiler/tests/fake_quant_ops_test.py,226,class,Test cases for FakeQuantWithMinMaxVars operation. 164,FakeQuantWithMinMaxVarsGradientTest,tensorflow/tensorflow/compiler/tests/fake_quant_ops_test.py,331,class,Test cases for FakeQuantWithMinMaxVarsGradient operation. 165,pick_10,tensorflow/tensorflow/compiler/tests/fft_test.py,38,function, 166,to_32bit,tensorflow/tensorflow/compiler/tests/fft_test.py,45,function, 167,FFTTest,tensorflow/tensorflow/compiler/tests/fft_test.py,60,class, 168,FIFOQueueTest,tensorflow/tensorflow/compiler/tests/fifo_queue_test.py,31,class, 169,FtrlOptimizerTest,tensorflow/tensorflow/compiler/tests/ftrl_test.py,32,class, 170,FunctionTest,tensorflow/tensorflow/compiler/tests/function_test.py,31,class, 171,FusedBatchNormTest,tensorflow/tensorflow/compiler/tests/fused_batchnorm_test.py,45,class, 172,GatherNdTest,tensorflow/tensorflow/compiler/tests/gather_nd_op_test.py,30,class, 173,GatherTest,tensorflow/tensorflow/compiler/tests/gather_test.py,34,class, 174,GatherBenchmark,tensorflow/tensorflow/compiler/tests/gather_test.py,158,class,Microbenchmarks for the gather op. 175,_generate_numpy_random_rgb,tensorflow/tensorflow/compiler/tests/image_ops_test.py,40,function, 176,RGBToHSVTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,47,class, 177,AdjustContrastTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,110,class, 178,AdjustHueTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,174,class, 179,AdjustSaturationTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,309,class, 180,ResizeNearestNeighborTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,409,class, 181,ResizeBilinearTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,548,class, 182,ResizeBilinearGradTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,631,class, 183,ResizeBilinearNonAlignCornersTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,713,class, 184,NonMaxSuppressionTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,776,class, 185,BatchedNonMaxSuppressionCorrectnessTest,tensorflow/tensorflow/compiler/tests/image_ops_test.py,985,class, 186,NoRewriteSessionConfig,tensorflow/tensorflow/compiler/tests/jit_test.py,46,function, 187,CompiledKernel,tensorflow/tensorflow/compiler/tests/jit_test.py,56,function,"Execute 'fn' as a compiled XLA kernel, with 'inputs'." 188,RunMetadataLabels,tensorflow/tensorflow/compiler/tests/jit_test.py,68,function,Returns all labels in run_metadata. 189,InLabels,tensorflow/tensorflow/compiler/tests/jit_test.py,77,function,Returns true iff one of the labels contains substr. 190,MetadataHasXlaRunOp,tensorflow/tensorflow/compiler/tests/jit_test.py,82,function,Returns true if there are XlaRun kernels in run_metadata's timeline. 191,JitLaunchTest,tensorflow/tensorflow/compiler/tests/jit_test.py,89,class, 192,XlaCompilationTest,tensorflow/tensorflow/compiler/tests/jit_test.py,279,class,Tests for auto-compilation on CPU/GPU devices. 193,ElementWiseFusionTest,tensorflow/tensorflow/compiler/tests/jit_test.py,480,class, 194,LazyCompilationTest,tensorflow/tensorflow/compiler/tests/jit_test.py,520,class, 195,ListDiffTest,tensorflow/tensorflow/compiler/tests/listdiff_op_test.py,31,class, 196,LRNTest,tensorflow/tensorflow/compiler/tests/lrn_ops_test.py,39,class, 197,Clip,tensorflow/tensorflow/compiler/tests/lstm.py,38,function,"Clips x to the range [-1., 1.]." 198,LSTMCellWeightsShape,tensorflow/tensorflow/compiler/tests/lstm.py,43,function,Returns the shape of the weights for a single LSTM cell. 199,LSTMCell,tensorflow/tensorflow/compiler/tests/lstm.py,50,function,"Unrolls a single LSTM cell with clipped activations forward by one step. Args: weights: Weight matrix with shape LSTMCellWeightsShape. m_prev: Previous m states with shape [batch_size, num_nodes]. c_prev: Previous c states with shape [batch_size, num_nodes]. x: Input with shape [batch_size, num_inputs]. pad: Padding with shape [batch_size, 1]. Each padding value is either 0 or 1, where 1 indicates padding; i.e. the input is shorter than the sequence length, and the (m, c) states should simply be passed through from the previous states. Returns: The next (m, c) states, each with shape [batch_size, num_nodes]." 200,LSTMLayer,tensorflow/tensorflow/compiler/tests/lstm.py,88,function,"Unrolls a layer of LSTM cells forward by the sequence length. The sequence length is determined by the length of x_seq and pad_seq, which must be the same. Args: cell_name: Base name of each cell. weights: Weight matrix with shape LSTMCellWeightsShape. m: Initial m states with shape [batch_size, num_nodes]. c: Initial c states with shape [batch_size, num_nodes]. x_seq: List of inputs, each with shape [batch_size, num_inputs]. The length of the list is the sequence length. pad_seq: List of paddings, each with shape [batch_size, 1]. The length of the list is the sequence length. Each padding value is either 0 or 1, where 1 indicates padding; i.e. the input is shorter than the sequence length. Returns: List of per-sequence-step outputs, each with shape [batch_size, num_nodes]. Raises: ValueError: If len(x_seq) != len(pad_seq)." 201,RandomVar,tensorflow/tensorflow/compiler/tests/lstm.py,121,function,Returns a variable of the given shape initialized to random values. 202,RandomInputs,tensorflow/tensorflow/compiler/tests/lstm.py,127,function,"Returns randomly initialized (x_seq, pad_seq) sequences." 203,BuildLSTMLayer,tensorflow/tensorflow/compiler/tests/lstm.py,140,function,"Builds a single LSTM layer with random weights and inputs. Args: batch_size: Inputs are fed in batches of this size. seq_length: The sequence length to unroll the LSTM layer. num_inputs: Dimension of inputs that are fed into each LSTM cell. num_nodes: The number of nodes in each LSTM cell. Returns: (out_seq, weights) pair. The out_seq is a list of per-sequence-step outputs, each with shape [batch_size, num_nodes]. The weights are a list of weight variables that may be trained." 204,_DumpGraph,tensorflow/tensorflow/compiler/tests/lstm_test.py,40,function, 205,_Sigmoid,tensorflow/tensorflow/compiler/tests/lstm_test.py,47,function, 206,_Clip,tensorflow/tensorflow/compiler/tests/lstm_test.py,51,function, 207,LSTMTest,tensorflow/tensorflow/compiler/tests/lstm_test.py,55,class, 208,LSTMBenchmark,tensorflow/tensorflow/compiler/tests/lstm_test.py,238,class,Mcro-benchmarks for a single layer of LSTM cells. 209,ManipOpsTest,tensorflow/tensorflow/compiler/tests/manip_ops_test.py,30,class,Test cases for manip ops. 210,MatrixBandPartTest,tensorflow/tensorflow/compiler/tests/matrix_band_part_test.py,30,class, 211,zip_to_first_list_length,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,32,function, 212,repack_diagonals,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,40,function, 213,repack_diagonals_in_tests,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,77,function, 214,square_cases,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,95,function, 215,tall_cases,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,173,function, 216,fat_cases,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,261,function, 217,all_tests,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,329,function, 218,MatrixDiagTest,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,333,class, 219,MatrixSetDiagTest,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,519,class, 220,MatrixDiagPartTest,tensorflow/tensorflow/compiler/tests/matrix_diag_ops_test.py,652,class, 221,InverseOpTest,tensorflow/tensorflow/compiler/tests/matrix_inverse_op_test.py,31,class, 222,MatrixSolveOpTest,tensorflow/tensorflow/compiler/tests/matrix_solve_op_test.py,30,class, 223,MakePlaceholder,tensorflow/tensorflow/compiler/tests/matrix_triangular_solve_op_test.py,36,function, 224,MatrixTriangularSolveOpTest,tensorflow/tensorflow/compiler/tests/matrix_triangular_solve_op_test.py,40,class, 225,MomentumOptimizerTest,tensorflow/tensorflow/compiler/tests/momentum_test.py,33,class, 226,NAryOpsTest,tensorflow/tensorflow/compiler/tests/nary_ops_test.py,32,class, 227,NullaryOpsTest,tensorflow/tensorflow/compiler/tests/nullary_ops_test.py,29,class, 228,PlaceholderTest,tensorflow/tensorflow/compiler/tests/placeholder_test.py,28,class, 229,_AvgPoolGrad,tensorflow/tensorflow/compiler/tests/pooling_ops_3d_test.py,35,function, 230,Pooling3DTest,tensorflow/tensorflow/compiler/tests/pooling_ops_3d_test.py,45,class, 231,NHWCToNCHW,tensorflow/tensorflow/compiler/tests/pooling_ops_test.py,33,function,"Convert the input from NHWC format to NCHW. Args: input_tensor: a 4-D tensor, or a 4-element array representing the same. Returns: the converted tensor or a shape array" 232,NCHWToNHWC,tensorflow/tensorflow/compiler/tests/pooling_ops_test.py,48,function,"Convert the input from NCHW format to NHWC. Args: input_tensor: a 4-D tensor, or a 4-element array representing the same. Returns: the converted tensor or a shape array" 233,GetTestConfigs,tensorflow/tensorflow/compiler/tests/pooling_ops_test.py,63,function,"Get all the valid tests configs to run. Returns: all the valid test configs" 234,PoolingTest,tensorflow/tensorflow/compiler/tests/pooling_ops_test.py,73,class, 235,PoolGradTest,tensorflow/tensorflow/compiler/tests/pooling_ops_test.py,292,class, 236,ProximalAdagradOptimizerTest,tensorflow/tensorflow/compiler/tests/proximal_adagrad_test.py,32,class, 237,ProximalGradientDescentOptimizerTest,tensorflow/tensorflow/compiler/tests/proximal_gradient_descent_test.py,32,class, 238,QrOpTest,tensorflow/tensorflow/compiler/tests/qr_op_test.py,33,class, 239,QuantizedOpsTest,tensorflow/tensorflow/compiler/tests/quantized_ops_test.py,36,class, 240,DequantizedOpsTest,tensorflow/tensorflow/compiler/tests/quantized_ops_test.py,53,class, 241,RandomOpsTest,tensorflow/tensorflow/compiler/tests/random_ops_test.py,34,class,Test cases for random-number generating operators. 242,ReduceOpsTest,tensorflow/tensorflow/compiler/tests/reduce_ops_test.py,37,class, 243,ReduceOpPrecisionTest,tensorflow/tensorflow/compiler/tests/reduce_ops_test.py,183,class, 244,ReduceWindowTest,tensorflow/tensorflow/compiler/tests/reduce_window_test.py,31,class,Test cases for xla.reduce_window. 245,ReshapeTest,tensorflow/tensorflow/compiler/tests/reshape_op_test.py,30,class, 246,ReverseOpsTest,tensorflow/tensorflow/compiler/tests/reverse_ops_test.py,32,class, 247,ReverseSequenceTest,tensorflow/tensorflow/compiler/tests/reverse_sequence_op_test.py,29,class, 248,RmspropTest,tensorflow/tensorflow/compiler/tests/rmsprop_test.py,31,class, 249,numpy_reverse,tensorflow/tensorflow/compiler/tests/scan_ops_test.py,32,function, 250,handle_options,tensorflow/tensorflow/compiler/tests/scan_ops_test.py,43,function,Adds tf options to numpy scan ops. 251,CumsumTest,tensorflow/tensorflow/compiler/tests/scan_ops_test.py,72,class, 252,CumprodTest,tensorflow/tensorflow/compiler/tests/scan_ops_test.py,150,class, 253,_AsType,tensorflow/tensorflow/compiler/tests/scatter_nd_op_test.py,31,function, 254,_FlatInnerDims,tensorflow/tensorflow/compiler/tests/scatter_nd_op_test.py,35,function, 255,_FlatOuterDims,tensorflow/tensorflow/compiler/tests/scatter_nd_op_test.py,42,function, 256,_NumpyScatterNd,tensorflow/tensorflow/compiler/tests/scatter_nd_op_test.py,49,function, 257,_NumpyUpdate,tensorflow/tensorflow/compiler/tests/scatter_nd_op_test.py,66,function, 258,ScatterNdTest,tensorflow/tensorflow/compiler/tests/scatter_nd_op_test.py,71,class, 259,ScatterNdTensorTest,tensorflow/tensorflow/compiler/tests/scatter_nd_op_test.py,193,class, 260,SearchSorteddOpTest,tensorflow/tensorflow/compiler/tests/searchsorted_op_test.py,28,class, 261,SegmentReductionOpsTest,tensorflow/tensorflow/compiler/tests/segment_reduction_ops_test.py,32,class,Test cases for segment reduction ops. 262,SelfAdjointEigOpTest,tensorflow/tensorflow/compiler/tests/self_adjoint_eig_op_test.py,32,class, 263,SliceTest,tensorflow/tensorflow/compiler/tests/slice_ops_test.py,29,class, 264,StridedSliceTest,tensorflow/tensorflow/compiler/tests/slice_ops_test.py,127,class, 265,XlaSortOpTest,tensorflow/tensorflow/compiler/tests/sort_ops_test.py,32,class, 266,space_to_batch_direct,tensorflow/tensorflow/compiler/tests/spacetobatch_op_test.py,30,function,"Direct Python implementation of space-to-batch conversion. This is used for tests only. Args: input_array: N-D array block_shape: 1-D array of shape [num_block_dims]. paddings: 2-D array of shape [num_block_dims, 2]. Returns: Converted tensor." 267,SpaceToBatchTest,tensorflow/tensorflow/compiler/tests/spacetobatch_op_test.py,71,class,Tests input-output pairs for the SpaceToBatch and BatchToSpace ops. 268,SpaceToBatchNDTest,tensorflow/tensorflow/compiler/tests/spacetobatch_op_test.py,152,class,Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops. 269,_SparseToDense,tensorflow/tensorflow/compiler/tests/sparse_to_dense_op_test.py,31,function, 270,SparseToDenseTest,tensorflow/tensorflow/compiler/tests/sparse_to_dense_op_test.py,46,class, 271,_igamma,tensorflow/tensorflow/compiler/tests/special_math_test.py,48,function, 272,_igammac,tensorflow/tensorflow/compiler/tests/special_math_test.py,53,function, 273,implicit_reparameterization_grad,tensorflow/tensorflow/compiler/tests/special_math_test.py,58,function, 274,_log1p,tensorflow/tensorflow/compiler/tests/special_math_test.py,65,function, 275,Log1pTest,tensorflow/tensorflow/compiler/tests/special_math_test.py,69,class, 276,IgammaTest,tensorflow/tensorflow/compiler/tests/special_math_test.py,139,class, 277,IgammacTest,tensorflow/tensorflow/compiler/tests/special_math_test.py,324,class, 278,StackOpTest,tensorflow/tensorflow/compiler/tests/stack_ops_test.py,32,class, 279,xla_device,tensorflow/tensorflow/compiler/tests/stateful_random_ops_test.py,41,function, 280,xla_device_name,tensorflow/tensorflow/compiler/tests/stateful_random_ops_test.py,55,function, 281,StatefulRandomOpsTest,tensorflow/tensorflow/compiler/tests/stateful_random_ops_test.py,64,class,Test cases for stateful random-number generator operators. 282,StatelessRandomOpsTest,tensorflow/tensorflow/compiler/tests/stateless_random_ops_test.py,33,class,Test cases for stateless random-number generator operators. 283,StatelessRandomOpsBenchmark,tensorflow/tensorflow/compiler/tests/stateless_random_ops_test.py,136,class,Microbenchmarks for the stateless random ops. 284,SvdOpTest,tensorflow/tensorflow/compiler/tests/svd_op_test.py,33,class, 285,_make_converter,tensorflow/tensorflow/compiler/tests/tensor_array_ops_test.py,42,function, 286,TensorArrayTest,tensorflow/tensorflow/compiler/tests/tensor_array_ops_test.py,53,class, 287,ListOpsTest,tensorflow/tensorflow/compiler/tests/tensor_list_ops_test.py,34,class, 288,TernaryOpsTest,tensorflow/tensorflow/compiler/tests/ternary_ops_test.py,34,class, 289,ConvertBetweenDataFormats,tensorflow/tensorflow/compiler/tests/test_utils.py,26,function,Converts 4D tensor between data formats. 290,PermuteDimsBetweenDataFormats,tensorflow/tensorflow/compiler/tests/test_utils.py,47,function,Get new shape for converting between data formats. 291,RunWithWarmup,tensorflow/tensorflow/compiler/tests/test_utils.py,71,function,Runs a graph a few times to ensure that its clusters are compiled. 292,_tfconst,tensorflow/tensorflow/compiler/tests/tridiagonal_solve_ops_test.py,39,function, 293,_tf_ones,tensorflow/tensorflow/compiler/tests/tridiagonal_solve_ops_test.py,43,function, 294,TridiagonalSolveOpsTest,tensorflow/tensorflow/compiler/tests/tridiagonal_solve_ops_test.py,47,class,Test for tri-diagonal matrix related ops. 295,nhwc_to_format,tensorflow/tensorflow/compiler/tests/unary_ops_test.py,37,function,Converts a numpy array from NHWC format to `data_format`. 296,UnaryOpsTest,tensorflow/tensorflow/compiler/tests/unary_ops_test.py,48,class,Test cases for unary operators. 297,UnstackOpTest,tensorflow/tensorflow/compiler/tests/unstack_test.py,29,class, 298,VariableOpsTest,tensorflow/tensorflow/compiler/tests/variable_ops_test.py,40,class,Test cases for resource variable operators. 299,StridedSliceAssignChecker,tensorflow/tensorflow/compiler/tests/variable_ops_test.py,422,class,Compares the results of a slice assignment using Tensorflow and numpy. 300,SliceAssignTest,tensorflow/tensorflow/compiler/tests/variable_ops_test.py,451,class, 301,WhileTest,tensorflow/tensorflow/compiler/tests/while_test.py,39,class, 302,is_compile_on_demand,tensorflow/tensorflow/compiler/tests/while_test.py,260,function, 303,XlaDeviceGpuTest,tensorflow/tensorflow/compiler/tests/xla_device_gpu_test.py,28,class, 304,XlaDeviceTest,tensorflow/tensorflow/compiler/tests/xla_device_test.py,32,class, 305,XlaOpsNumericalTest,tensorflow/tensorflow/compiler/tests/xla_ops_test.py,37,class, 306,XlaOpsShapeInferenceTest,tensorflow/tensorflow/compiler/tests/xla_ops_test.py,366,class, 307,parse_disabled_manifest,tensorflow/tensorflow/compiler/tests/xla_test.py,55,function, 308,XLATestCase,tensorflow/tensorflow/compiler/tests/xla_test.py,81,class,XLA test cases are parameterized test cases. 309,Benchmark,tensorflow/tensorflow/compiler/tests/xla_test.py,250,function,"Build a graph and run benchmarks against it, with or without XLA. Args: tf_bench: An instance of tf.test.Benchmark, used to run the benchmark. builder_fn: A function that builds a graph when invoked, and returns (name, fetches), where name is the name of the test, and fetches is a list of tensors to fetch as output. use_xla_jit: If true compile with the XLA JIT, otherwise use regular TF. device: The tensorflow device to run on, e.g. ""cpu"", ""gpu"". separate_compiled_gradients: If true put each gradient subgraph into a separate compilation scope. This gives fine-grained control over which portions of the graph will be compiled as a single unit. Compiling gradients separately may yield better performance for some graphs. The scope is named based on the scope of the forward computation as well as the name of the gradients. As a result, the gradients will be compiled in a scope that is separate from both the forward computation, and from other gradients." 310,XlaTestCaseTestCase,tensorflow/tensorflow/compiler/tests/xla_test_test.py,25,class, 311,_unary_op,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,70,function,Wrapper that restricts `fn` to have the correct signature. 312,_broadcasting_binary_op,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,119,function,Wraps a binary Tensorflow operator and performs XLA-style broadcasting. 313,_shift_right_logical_helper,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,152,function,Performs an integer right logical shift irrespective of input type. 314,_shift_right_arithmetic_helper,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,167,function,Performs an integer right arithmetic shift irrespective of input type. 315,_binary_op,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,211,function,Wrapper that restricts `fn` to have the correct signature. 316,broadcast,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,226,function, 317,clamp,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,234,function, 318,conv,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,241,function,"Wraps the XLA ConvGeneralDilated operator. ConvGeneralDilated is the most general form of XLA convolution and is documented at https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution Args: lhs: the input tensor rhs: the kernel tensor window_strides: the inter-window strides padding: the padding to apply at the start and end of each input dimensions lhs_dilation: dilation to apply between input elements rhs_dilation: dilation to apply between kernel elements dimension_numbers: a `ConvolutionDimensionNumbers` proto. feature_group_count: number of feature groups for grouped convolution. precision_config: a `xla.PrecisionConfig` proto. name: an optional name for the operator Returns: A tensor representing the output of the convolution." 319,dot,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,291,function, 320,dot_general,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,295,function, 321,self_adjoint_eig,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,307,function, 322,svd,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,311,function, 323,random_normal,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,327,function, 324,random_uniform,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,333,function, 325,reduce_window,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,343,function,"Wraps the XLA ReduceWindow operator. ReduceWindow is documented at https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow . Args: operand: the input tensor init: a scalar tensor representing the initial value for the reduction reducer: a reduction function that combines a pair of scalars. window_dimensions: shape of the window, as a list of integers window_strides: inter-window strides, as a list of integers. Optional; if omitted, defaults to strides of 1. padding: padding to apply to 'operand'. List of (low, high) pairs of integers that specify the padding to apply before and after each dimension. Optional; if omitted, defaults to no padding. name: the operator name, or None. Returns: A tensor that represents the output of the reduce_window operator." 326,reshape,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,391,function, 327,select,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,398,function, 328,slice,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,406,function, 329,_sharding_grad,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,418,function, 330,_spmd_full_to_shard_shape_grad,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,431,function, 331,_spmd_shard_to_full_shape_grad,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,440,function, 332,gather,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,452,function, 333,scatter,tensorflow/tensorflow/compiler/tf2xla/python/xla.py,463,function, 334,Sharding,tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py,28,class,"A class to support adding sharding attributes to Ops. Use the factory constructors and then call apply_to_tensor: Sharding.replicate().apply_to_tensor(tensor)" 335,replicate,tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py,179,function, 336,assign_device,tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py,188,function,Returns a tensor that has AssignDevice sharding attribute. 337,tile,tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py,202,function,"Returns a tensor that has tiled sharding. Args: tensor: A tf.Tensor to shard. tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: If true, adds a sharding op to set the sharding." 338,split,tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py,224,function,"Returns a tensor that is split along the given dimension. Args: tensor: A tf.Tensor to split. split_dimension: The dimension to split. num_devices: The number of devices to partition the dimension. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: If true, adds a sharding op to set the sharding. input_shape: The full shape of the input tensor." 339,get_op_sharding,tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py,248,function,"Returns sharding attribute of an op. Args: op: a TensorFlow op. Returns: The attribute representing XLA sharding on this op." 340,auto_to_manual_spmd_partition,tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py,260,function,"Switches from automatic SPMD partitioning to manual partitioning. Converts a full-shaped tensor (to be automatically partitioned by SPMD partitioner) to a shard-shaped tensor to be consumed by manually partitioned ops. Args: tensor: A tf.Tensor in full shape. manual_sharding: a serialized string of OpSharding to be used in manual partitioning. Returns: A shard-shaped tensor to be consumed by manually partitioned ops." 341,manual_to_auto_spmd_partition,tensorflow/tensorflow/compiler/xla/experimental/xla_sharding/xla_sharding.py,279,function,"Switches from manual partitioning to automatic SPMD partitioning. Converts a shard-shaped tensor (manually partitioned in SPMD-style) to a full-shaped tensor to be partitioned automatically by the SPMD partitioner. Args: tensor: A tf.Tensor in shard shape. manual_sharding: a serialized string of OpSharding to be used in manual partitioning. full_shape: the shape of tensor before partitioning. Returns: A full-shaped tensor to be partitioned automatically by the SPMD partitioner." 342,numpy_assert_allclose,tensorflow/tensorflow/compiler/xla/python/bfloat16_test.py,35,function, 343,Bfloat16Test,tensorflow/tensorflow/compiler/xla/python/bfloat16_test.py,53,class,Tests the non-numpy Python methods of the bfloat16 type. 344,Bfloat16NumPyTest,tensorflow/tensorflow/compiler/xla/python/bfloat16_test.py,251,class,Tests the NumPy integration of the bfloat16 type. 345,_interpreter_backend_factory,tensorflow/tensorflow/compiler/xla/python/xla_client.py,58,function, 346,_cpu_backend_factory,tensorflow/tensorflow/compiler/xla/python/xla_client.py,62,function, 347,_gpu_backend_factory,tensorflow/tensorflow/compiler/xla/python/xla_client.py,66,function,Returns a GPU backend. BFC allocator is used by default. 348,register_local_backend_factory,tensorflow/tensorflow/compiler/xla/python/xla_client.py,101,function, 349,_get_local_backends,tensorflow/tensorflow/compiler/xla/python/xla_client.py,108,function,Instantiates all known local backends. 350,get_local_backend,tensorflow/tensorflow/compiler/xla/python/xla_client.py,131,function,"Returns a local backend. Args: name: the backend name. If `None`, a default local backend is returned, typically `gpu` if one is present, or `cpu` if not. If a string, the named backend is returned or an exception raised. Returns: A LocalBackend object." 351,OpMetadata,tensorflow/tensorflow/compiler/xla/python/xla_client.py,152,class,Python representation of a xla.OpMetadata protobuf. 352,CurrentSourceInfoMetadata,tensorflow/tensorflow/compiler/xla/python/xla_client.py,163,function,Helper for use in source mapping that returns an OpMetadata object. 353,dtype_to_etype,tensorflow/tensorflow/compiler/xla/python/xla_client.py,206,function,Convenience function for reading DTYPE_TO_XLA_ELEMENT_TYPE. 354,shape_from_pyval,tensorflow/tensorflow/compiler/xla/python/xla_client.py,272,function,Returns a Shape that describes a tuple-tree of Numpy arrays. 355,execute_with_python_values,tensorflow/tensorflow/compiler/xla/python/xla_client.py,334,function,Execute on one replica with Python values as arguments and output. 356,execute_with_python_values_replicated,tensorflow/tensorflow/compiler/xla/python/xla_client.py,345,function,"Execute on many replicas with Python values as arguments and output. Arguments: executable: the program to run. arguments: a list of lists of Python values indexed by `[replica][arg_num]` to pass as inputs. backend: the backend we are targeting. Returns: A list of python values, one per replica." 357,PaddingType,tensorflow/tensorflow/compiler/xla/python/xla_client.py,374,class, 358,window_padding_type_to_pad_values,tensorflow/tensorflow/compiler/xla/python/xla_client.py,379,function,Maps PaddingType or string to pad values (list of pairs of ints). 359,register_custom_call_target,tensorflow/tensorflow/compiler/xla/python/xla_client.py,418,function,"Registers a custom call target. Args: name: bytes containing the name of the function. fn: a PyCapsule object containing the function pointer. platform: the target platform." 360,PaddingConfigDimension,tensorflow/tensorflow/compiler/xla/python/xla_client.py,433,class,Python representation of a xla.PaddingConfigDimension protobuf. 361,PaddingConfig,tensorflow/tensorflow/compiler/xla/python/xla_client.py,443,class,Python representation of a xla.PaddingConfig protobuf. 362,make_padding_config,tensorflow/tensorflow/compiler/xla/python/xla_client.py,451,function,"Create PaddingConfig proto from list of triples of integers. Args: padding_config: either a PaddingConfig or a list of integer triples (edge_padding_low, edge_padding_high, interior_padding) representing the configuration of the padding operation. Returns: A `PaddingConfig` object." 363,DotDimensionNumbers,tensorflow/tensorflow/compiler/xla/python/xla_client.py,476,class,Python representation of a xla.DotDimensionNumbers protobuf. 364,make_dot_dimension_numbers,tensorflow/tensorflow/compiler/xla/python/xla_client.py,488,function,"Builds a DotDimensionNumbers object from a specification. Args: dimension_numbers: either a `DotDimensionNumbers` or a nested tuple `((lhs_contract, rhs_contract), (lhs_batch, rhs_batch))` of lists of integers representing the dimensions to treat as contracting dimensions and batch dimensions on each input operand. Returns: A `DotDimensionNumbers` object." 365,ConvolutionDimensionNumbers,tensorflow/tensorflow/compiler/xla/python/xla_client.py,516,class,Python representation of a xla.ConvolutionDimensionNumbers protobuf. 366,make_convolution_dimension_numbers,tensorflow/tensorflow/compiler/xla/python/xla_client.py,536,function,"Builds a ConvolutionDimensionNumbers object from a specification. Args: dimension_numbers: optional, either a ConvolutionDimensionNumbers object or a tuple (lhs_spec, rhs_spec, out_spec). Each element is a string of length N+2 identifying by position: (1) batch dimensions in lhs, rhs, and the output with the character 'N', (2) feature dimensions in lhs and the output with the character 'C', (3) input and output feature dimensions in rhs with the characters 'I' and 'O' respectively, and (4) spatial dimension correspondences between lhs, rhs, and the output using any distinct characters. For example, to indicate dimension numbers consistent with the Conv operation with two spatial dimensions, one could use ('NCHW', 'OIHW', 'NCHW'). As another example, to indicate dimension numbers consistent with the TensorFlow Conv2D operation, one could use ('NHWC', 'HWIO', 'NHWC'). When using the latter form of convolution dimension specification, window strides are associated with spatial dimension character labels according to the order in which the labels appear in the rhs_spec string, so that window_strides[0] is matched with the dimension corresponding to the first character appearing in rhs_spec that is not 'I' or 'O'. By default, use the same dimension numbering as Conv and ConvWithGeneralPadding. num_spatial_dimensions: the number of spatial dimensions. Returns: A `ConvolutionDimensionNumbers` object." 367,OpSharding,tensorflow/tensorflow/compiler/xla/python/xla_client.py,600,class,Python representation of a xla.OpSharding protobuf. 368,PrecisionConfig,tensorflow/tensorflow/compiler/xla/python/xla_client.py,614,class,Python representation of a xla.PrecisionConfig protobuf. 369,GatherDimensionNumbers,tensorflow/tensorflow/compiler/xla/python/xla_client.py,624,class,Python representation of a xla.GatherDimensionNumbers protobuf. 370,ScatterDimensionNumbers,tensorflow/tensorflow/compiler/xla/python/xla_client.py,636,class,Python representation of a xla.ScatterDimensionNumbers protobuf. 371,ReplicaGroup,tensorflow/tensorflow/compiler/xla/python/xla_client.py,648,class,Python representation of a xla.ReplicaGroup protobuf. 372,_make_replica_group_proto,tensorflow/tensorflow/compiler/xla/python/xla_client.py,656,function, 373,make_replica_groups,tensorflow/tensorflow/compiler/xla/python/xla_client.py,662,function, 374,tracebacks,tensorflow/tensorflow/compiler/xla/python/xla_client.py,677,function,Context manager that enables or disables traceback collection. 375,heap_profile,tensorflow/tensorflow/compiler/xla/python/xla_client.py,687,function,Returns a gzipped pprof protocol buffer containing a heap profile. 376,TestFactory,tensorflow/tensorflow/compiler/xla/python/xla_client_test.py,56,function, 377,InstantiateTests,tensorflow/tensorflow/compiler/xla/python/xla_client_test.py,2103,function, 378,TpuBackend,tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py,29,class,XLA backend implemented using the Tpu driver API. 379,ConvertLiteralToNumpyArray,tensorflow/tensorflow/compiler/xla/python_api/xla_literal.py,28,function,Converts a XLA literal to a Numpy array. 380,_ConvertNumpyArrayToLiteral,tensorflow/tensorflow/compiler/xla/python_api/xla_literal.py,64,function,Converts a Numpy array to a XLA literal. 381,ConvertNumpyArrayToLiteral,tensorflow/tensorflow/compiler/xla/python_api/xla_literal.py,85,function,Converts a Numpy array or a nested tuple thereof to an XLA literal. 382,Shape,tensorflow/tensorflow/compiler/xla/python_api/xla_shape.py,29,class,"Wraps a xla_data_pb2.ShapeProto message with a convenient Python type. Provides direct access to the underlying xla_data_pb2.ShapeProto message in the message attribute, along with accessor wrappers to the message's fields. Avoid direct access to .message unless interacting directly with protobuf APIs like CopyFrom. In other words, prefer hauling the shape around in a Shape, and only access .message when strictly required by the protobuf API." 383,_CreateShapeFromNumpy,tensorflow/tensorflow/compiler/xla/python_api/xla_shape.py,103,function,"Create a Shape from a given Numpy array. Args: ndarray: Numpy array. Returns: A Shape object." 384,CreateShapeFromNumpy,tensorflow/tensorflow/compiler/xla/python_api/xla_shape.py,129,function,"Create a Shape from a Numpy array or a nested tuple structure thereof. Args: value: Numpy array or (possibly nested) tuple structure that bottoms out in Numpy arrays. Returns: A Shape object." 385,CreateShapeFromDtypeAndTuple,tensorflow/tensorflow/compiler/xla/python_api/xla_shape.py,147,function,"Create a shape from a Numpy dtype and a sequence of nonnegative integers. Args: dtype: a numpy dtype, e.g. np.dtype('int32'). shape_tuple: a sequence of nonnegative integers. Returns: A Shape object." 386,RamFilesystemTest,tensorflow/tensorflow/core/platform/ram_file_system_test.py,38,class, 387,AddOneTest,tensorflow/tensorflow/examples/adding_an_op/cuda_op_test.py,25,class, 388,FactTest,tensorflow/tensorflow/examples/adding_an_op/fact_test.py,25,class, 389,ZeroOut1Test,tensorflow/tensorflow/examples/adding_an_op/zero_out_1_test.py,29,class, 390,ZeroOut2Test,tensorflow/tensorflow/examples/adding_an_op/zero_out_2_test.py,30,class, 391,ZeroOut3Test,tensorflow/tensorflow/examples/adding_an_op/zero_out_3_test.py,27,class, 392,_zero_out_grad,tensorflow/tensorflow/examples/adding_an_op/zero_out_grad_2.py,28,function,"The gradients for `zero_out`. Args: op: The `zero_out` `Operation` that we are differentiating, which we can use to find the inputs and outputs of the original op. grad: Gradient with respect to the output of the `zero_out` op. Returns: Gradients with respect to the input of `zero_out`." 393,load_graph,tensorflow/tensorflow/examples/label_image/label_image.py,26,function, 394,read_tensor_from_image_file,tensorflow/tensorflow/examples/label_image/label_image.py,38,function, 395,load_labels,tensorflow/tensorflow/examples/label_image/label_image.py,65,function, 396,main,tensorflow/tensorflow/examples/saved_model/integration_tests/deploy_mnist_cnn.py,47,function, 397,MaybeDistributionScope,tensorflow/tensorflow/examples/saved_model/integration_tests/distribution_strategy_utils.py,48,class,Provides a context allowing no distribution strategy. 398,make_feature_extractor,tensorflow/tensorflow/examples/saved_model/integration_tests/export_mnist_cnn.py,56,function,Returns a Keras Model to compute a feature vector from MNIST images. 399,set_feature_extractor_hparams,tensorflow/tensorflow/examples/saved_model/integration_tests/export_mnist_cnn.py,72,function, 400,make_classifier,tensorflow/tensorflow/examples/saved_model/integration_tests/export_mnist_cnn.py,76,function,Returns a Keras Model to classify MNIST using feature_extractor. 401,wrap_keras_model_for_export,tensorflow/tensorflow/examples/saved_model/integration_tests/export_mnist_cnn.py,87,function,Wraps `model` for saving and loading as SavedModel. 402,_get_traced_loss,tensorflow/tensorflow/examples/saved_model/integration_tests/export_mnist_cnn.py,144,function,"Returns tf.function for model.losses[i] with a trace for zero args. The intended usage is [_get_traced_loss(model, i) for i in range(len(model.losses))] This is better than [tf.function(lambda: model.losses[i], input_signature=[]) for i ...] because it avoids capturing a loop index in a lambda, and removes any chance of deferring the trace. Args: model: a Keras Model. i: an integer between from 0 up to but to len(model.losses)." 403,main,tensorflow/tensorflow/examples/saved_model/integration_tests/export_mnist_cnn.py,163,function, 404,main,tensorflow/tensorflow/examples/saved_model/integration_tests/export_rnn_cell.py,32,function, 405,write_vocabulary_file,tensorflow/tensorflow/examples/saved_model/integration_tests/export_simple_text_embedding.py,34,function,Write temporary vocab file for module construction. 406,TextEmbeddingModel,tensorflow/tensorflow/examples/saved_model/integration_tests/export_simple_text_embedding.py,44,class,"Text embedding model. A text embeddings model that takes a sentences on input and outputs the sentence embedding." 407,main,tensorflow/tensorflow/examples/saved_model/integration_tests/export_simple_text_embedding.py,96,function, 408,TextRnnModel,tensorflow/tensorflow/examples/saved_model/integration_tests/export_text_rnn_model.py,31,class,"Text RNN model. A full generative text RNN model that can train and decode sentences from a starting word." 409,main,tensorflow/tensorflow/examples/saved_model/integration_tests/export_text_rnn_model.py,170,function, 410,TestCase,tensorflow/tensorflow/examples/saved_model/integration_tests/integration_scripts.py,42,class,Base class to write SavedModel integration tests. 411,MaybeRunScriptInstead,tensorflow/tensorflow/examples/saved_model/integration_tests/integration_scripts.py,62,function, 412,_load_random_data,tensorflow/tensorflow/examples/saved_model/integration_tests/mnist_util.py,28,function, 413,load_reshaped_data,tensorflow/tensorflow/examples/saved_model/integration_tests/mnist_util.py,34,function,Returns MNIST or Fashion MNIST or fake train and test data. 414,_prepare_image,tensorflow/tensorflow/examples/saved_model/integration_tests/mnist_util.py,44,function,"Converts images to [n,h,w,c] format in range [0,1]." 415,_prepare_label,tensorflow/tensorflow/examples/saved_model/integration_tests/mnist_util.py,49,function,Conerts labels to one-hot encoding. 416,SavedModelTest,tensorflow/tensorflow/examples/saved_model/integration_tests/saved_model_test.py,32,class, 417,make_feature_extractor,tensorflow/tensorflow/examples/saved_model/integration_tests/use_mnist_cnn.py,72,function,Load a pre-trained feature extractor and wrap it for use in Keras. 418,make_classifier,tensorflow/tensorflow/examples/saved_model/integration_tests/use_mnist_cnn.py,100,function,Returns a Keras Model to classify MNIST using feature_extractor. 419,main,tensorflow/tensorflow/examples/saved_model/integration_tests/use_mnist_cnn.py,112,function, 420,train,tensorflow/tensorflow/examples/saved_model/integration_tests/use_model_in_sequential_keras.py,35,function,Build a Keras model and train with mock data. 421,main,tensorflow/tensorflow/examples/saved_model/integration_tests/use_model_in_sequential_keras.py,67,function, 422,main,tensorflow/tensorflow/examples/saved_model/integration_tests/use_rnn_cell.py,33,function, 423,train,tensorflow/tensorflow/examples/saved_model/integration_tests/use_text_embedding_in_dataset.py,34,function,Build a Keras model and train with mock data. 424,main,tensorflow/tensorflow/examples/saved_model/integration_tests/use_text_embedding_in_dataset.py,65,function, 425,main,tensorflow/tensorflow/examples/saved_model/integration_tests/use_text_rnn_model.py,32,function, 426,StreamingAccuracyStats,tensorflow/tensorflow/examples/speech_commands/accuracy_utils.py,24,class,"Get streaming accuracy statistics every time a new command is founded. Attributes: _how_many_gt: How many ground truths. _how_many_gt_matched: How many ground truths have been matched. _how_many_fp: How many commands have been fired as false positive. _how_many_c: How many commands have been fired correctly. _how_many_w: How many commands have been fired wrongly. _gt_occurrence: A list to record which commands and when it occurs in the input audio stream. _previous_c: A variable to record the last status of _how_many_c. _previous_w: A variable to record the last status of _how_many_w. _previous_fp: A variable to record the last status of _how_many_fp." 427,create_inference_graph,tensorflow/tensorflow/examples/speech_commands/freeze.py,63,function,"Creates an audio model with the nodes needed for inference. Uses the supplied arguments to create a model, and inserts the input and output nodes that are needed to use the graph for inference. Args: wanted_words: Comma-separated list of the words we're trying to recognize. sample_rate: How many samples per second are in the input audio files. clip_duration_ms: How many samples to analyze for the audio pattern. clip_stride_ms: How often to run recognition. Useful for models with cache. window_size_ms: Time slice duration to estimate frequencies from. window_stride_ms: How far apart time slices should be. feature_bin_count: Number of frequency bands to analyze. model_architecture: Name of the kind of model to generate. preprocess: How the spectrogram is processed to produce features, for example 'mfcc', 'average', or 'micro'. Returns: Input and output tensor objects. Raises: Exception: If the preprocessing mode isn't recognized." 428,save_graph_def,tensorflow/tensorflow/examples/speech_commands/freeze.py,161,function,"Writes a graph def file out to disk. Args: file_name: Where to save the file. frozen_graph_def: GraphDef proto object to save." 429,save_saved_model,tensorflow/tensorflow/examples/speech_commands/freeze.py,176,function,"Writes a SavedModel out to disk. Args: file_name: Where to save the file. sess: TensorFlow session containing the graph. input_tensor: Tensor object defining the input's properties. output_tensor: Tensor object defining the output's properties." 430,main,tensorflow/tensorflow/examples/speech_commands/freeze.py,211,function, 431,FreezeTest,tensorflow/tensorflow/examples/speech_commands/freeze_test.py,30,class, 432,mix_in_audio_sample,tensorflow/tensorflow/examples/speech_commands/generate_streaming_test_wav.py,55,function,"Mixes the sample data into the main track at the specified offset. Args: track_data: Numpy array holding main audio data. Modified in-place. track_offset: Where to mix the sample into the main track. sample_data: Numpy array of audio data to mix into the main track. sample_offset: Where to start in the audio sample. clip_duration: How long the sample segment is. sample_volume: Loudness to mix the sample in at. ramp_in: Length in samples of volume increase stage. ramp_out: Length in samples of volume decrease stage." 433,main,tensorflow/tensorflow/examples/speech_commands/generate_streaming_test_wav.py,86,function, 434,GenerateStreamingTestWavTest,tensorflow/tensorflow/examples/speech_commands/generate_streaming_test_wav_test.py,27,class, 435,prepare_words_list,tensorflow/tensorflow/examples/speech_commands/input_data.py,58,function,"Prepends common tokens to the custom word list. Args: wanted_words: List of strings containing the custom words. Returns: List with the standard silence and unknown tokens added." 436,which_set,tensorflow/tensorflow/examples/speech_commands/input_data.py,70,function,"Determines which data partition the file should belong to. We want to keep files in the same training, validation, or testing sets even if new ones are added over time. This makes it less likely that testing samples will accidentally be reused in training when long runs are restarted for example. To keep this stability, a hash of the filename is taken and used to determine which set it should belong to. This determination only depends on the name and the set proportions, so it won't change as other files are added. It's also useful to associate particular files as related (for example words spoken by the same person), so anything after '_nohash_' in a filename is ignored for set determination. This ensures that 'bobby_nohash_0.wav' and 'bobby_nohash_1.wav' are always in the same set, for example. Args: filename: File path of the data sample. validation_percentage: How much of the data set to use for validation. testing_percentage: How much of the data set to use for testing. Returns: String, one of 'training', 'validation', or 'testing'." 437,load_wav_file,tensorflow/tensorflow/examples/speech_commands/input_data.py,118,function,"Loads an audio file and returns a float PCM-encoded array of samples. Args: filename: Path to the .wav file to load. Returns: Numpy array holding the sample data as floats between -1.0 and 1.0." 438,save_wav_file,tensorflow/tensorflow/examples/speech_commands/input_data.py,136,function,"Saves audio sample data to a .wav audio file. Args: filename: Path to save the file to. wav_data: 2D array of float PCM-encoded audio data. sample_rate: Samples per second to encode in the file." 439,get_features_range,tensorflow/tensorflow/examples/speech_commands/input_data.py,160,function,"Returns the expected min/max for generated features. Args: model_settings: Information about the current model being trained. Returns: Min/max float pair holding the range of features. Raises: Exception: If preprocessing mode isn't recognized." 440,AudioProcessor,tensorflow/tensorflow/examples/speech_commands/input_data.py,190,class,"Handles loading, partitioning, and preparing audio training data." 441,InputDataTest,tensorflow/tensorflow/examples/speech_commands/input_data_test.py,33,class, 442,load_graph,tensorflow/tensorflow/examples/speech_commands/label_wav.py,43,function,Unpersists graph from file as default graph. 443,load_labels,tensorflow/tensorflow/examples/speech_commands/label_wav.py,51,function,"Read in labels, one label per line." 444,run_graph,tensorflow/tensorflow/examples/speech_commands/label_wav.py,56,function,Runs the audio data through the graph and prints predictions. 445,label_wav,tensorflow/tensorflow/examples/speech_commands/label_wav.py,77,function,"Loads the model and labels, and runs the inference to print predictions." 446,main,tensorflow/tensorflow/examples/speech_commands/label_wav.py,98,function,"Entry point for script, converts flags to arguments." 447,load_graph,tensorflow/tensorflow/examples/speech_commands/label_wav_dir.py,44,function,Unpersists graph from file as default graph. 448,load_labels,tensorflow/tensorflow/examples/speech_commands/label_wav_dir.py,52,function,"Read in labels, one label per line." 449,run_graph,tensorflow/tensorflow/examples/speech_commands/label_wav_dir.py,57,function,Runs the audio data through the graph and prints predictions. 450,label_wav,tensorflow/tensorflow/examples/speech_commands/label_wav_dir.py,85,function,"Loads the model and labels, and runs the inference to print predictions." 451,main,tensorflow/tensorflow/examples/speech_commands/label_wav_dir.py,101,function,"Entry point for script, converts flags to arguments." 452,LabelWavTest,tensorflow/tensorflow/examples/speech_commands/label_wav_test.py,29,class, 453,_next_power_of_two,tensorflow/tensorflow/examples/speech_commands/models.py,27,function,"Calculates the smallest enclosing power of two for an input. Args: x: Positive float or integer number. Returns: Next largest power of two integer." 454,prepare_model_settings,tensorflow/tensorflow/examples/speech_commands/models.py,39,function,"Calculates common settings needed for all models. Args: label_count: How many classes are to be recognized. sample_rate: Number of audio samples per second. clip_duration_ms: Length of each audio clip to be analyzed. window_size_ms: Duration of frequency analysis window. window_stride_ms: How far to move in time between frequency windows. feature_bin_count: Number of frequency bins to use for analysis. preprocess: How the spectrogram is processed to produce features. Returns: Dictionary containing common settings. Raises: ValueError: If the preprocessing mode isn't recognized." 455,create_model,tensorflow/tensorflow/examples/speech_commands/models.py,95,function,"Builds a model of the requested architecture compatible with the settings. There are many possible ways of deriving predictions from a spectrogram input, so this function provides an abstract interface for creating different kinds of models in a black-box way. You need to pass in a TensorFlow node as the 'fingerprint' input, and this should output a batch of 1D features that describe the audio. Typically this will be derived from a spectrogram that's been run through an MFCC, but in theory it can be any feature vector of the size specified in model_settings['fingerprint_size']. The function will build the graph it needs in the current TensorFlow graph, and return the tensorflow output that will contain the 'logits' input to the softmax prediction process. If training flag is on, it will also return a placeholder node that can be used to control the dropout amount. See the implementations below for the possible model architectures that can be requested. Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. model_architecture: String specifying which kind of model to create. is_training: Whether the model is going to be used for training. runtime_settings: Dictionary of information about the runtime. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder. Raises: Exception: If the architecture type isn't recognized." 456,load_variables_from_checkpoint,tensorflow/tensorflow/examples/speech_commands/models.py,153,function,"Utility function to centralize checkpoint restoration. Args: sess: TensorFlow session. start_checkpoint: Path to saved checkpoint on disk." 457,create_single_fc_model,tensorflow/tensorflow/examples/speech_commands/models.py,164,function,"Builds a model with a single hidden fully-connected layer. This is a very simple model with just one matmul and bias layer. As you'd expect, it doesn't produce very accurate results, but it is very fast and simple, so it's useful for sanity testing. Here's the layout of the graph: (fingerprint_input) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder." 458,create_conv_model,tensorflow/tensorflow/examples/speech_commands/models.py,207,function,"Builds a standard convolutional model. This is roughly the network labeled as 'cnn-trad-fpool3' in the 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper: http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf Here's the layout of the graph: (fingerprint_input) v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [MaxPool] v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [MaxPool] v [MatMul]<-(weights) v [BiasAdd]<-(bias) v This produces fairly good quality results, but can involve a large number of weight parameters and computations. For a cheaper alternative from the same paper with slightly less accuracy, see 'low_latency_conv' below. During training, dropout nodes are introduced after each relu, controlled by a placeholder. Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder." 459,create_low_latency_conv_model,tensorflow/tensorflow/examples/speech_commands/models.py,333,function,"Builds a convolutional model with low compute requirements. This is roughly the network labeled as 'cnn-one-fstride4' in the 'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper: http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf Here's the layout of the graph: (fingerprint_input) v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [MatMul]<-(weights) v [BiasAdd]<-(bias) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v This produces slightly lower quality results than the 'conv' model, but needs fewer weight parameters and computations. During training, dropout nodes are introduced after the relu, controlled by a placeholder. Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder." 460,create_low_latency_svdf_model,tensorflow/tensorflow/examples/speech_commands/models.py,462,function,"Builds an SVDF model with low compute requirements. This is based in the topology presented in the 'Compressing Deep Neural Networks using a Rank-Constrained Topology' paper: https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43813.pdf Here's the layout of the graph: (fingerprint_input) v [SVDF]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [MatMul]<-(weights) v [BiasAdd]<-(bias) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v [MatMul]<-(weights) v [BiasAdd]<-(bias) v This model produces lower recognition accuracy than the 'conv' model above, but requires fewer weight parameters and, significantly fewer computations. During training, dropout nodes are introduced after the relu, controlled by a placeholder. Args: fingerprint_input: TensorFlow node that will output audio feature vectors. The node is expected to produce a 2D Tensor of shape: [batch, model_settings['fingerprint_width'] * model_settings['spectrogram_length']] with the features corresponding to the same time slot arranged contiguously, and the oldest slot at index [:, 0], and newest at [:, -1]. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. runtime_settings: Dictionary of information about the runtime. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder. Raises: ValueError: If the inputs tensor is incorrectly shaped." 461,create_tiny_conv_model,tensorflow/tensorflow/examples/speech_commands/models.py,673,function,"Builds a convolutional model aimed at microcontrollers. Devices like DSPs and microcontrollers can have very small amounts of memory and limited processing power. This model is designed to use less than 20KB of working RAM, and fit within 32KB of read-only (flash) memory. Here's the layout of the graph: (fingerprint_input) v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [MatMul]<-(weights) v [BiasAdd]<-(bias) v This doesn't produce particularly accurate results, but it's designed to be used as the first stage of a pipeline, running on a low-energy piece of hardware that can always be on, and then wake higher-power chips when a possible utterance has been found, so that more accurate analysis can be done. During training, a dropout node is introduced after the relu, controlled by a placeholder. Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder." 462,create_tiny_embedding_conv_model,tensorflow/tensorflow/examples/speech_commands/models.py,765,function,"Builds a convolutional model aimed at microcontrollers. Devices like DSPs and microcontrollers can have very small amounts of memory and limited processing power. This model is designed to use less than 20KB of working RAM, and fit within 32KB of read-only (flash) memory. Here's the layout of the graph: (fingerprint_input) v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [Conv2D]<-(weights) v [BiasAdd]<-(bias) v [Relu] v [MatMul]<-(weights) v [BiasAdd]<-(bias) v This doesn't produce particularly accurate results, but it's designed to be used as the first stage of a pipeline, running on a low-energy piece of hardware that can always be on, and then wake higher-power chips when a possible utterance has been found, so that more accurate analysis can be done. During training, a dropout node is introduced after the relu, controlled by a placeholder. Args: fingerprint_input: TensorFlow node that will output audio feature vectors. model_settings: Dictionary of information about the model. is_training: Whether the model is going to be used for training. Returns: TensorFlow node outputting logits results, and optionally a dropout placeholder." 463,ModelsTest,tensorflow/tensorflow/examples/speech_commands/models_test.py,28,class, 464,RecognizeResult,tensorflow/tensorflow/examples/speech_commands/recognize_commands.py,25,class,"Save recognition result temporarily. Attributes: founded_command: A string indicating the word just founded. Default value is '_silence_' score: An float representing the confidence of founded word. Default value is zero. is_new_command: A boolean indicating if the founded command is a new one against the last one. Default value is False." 465,RecognizeCommands,tensorflow/tensorflow/examples/speech_commands/recognize_commands.py,67,class,"Smooth the inference results by using average window. Maintain a slide window over the audio stream, which adds new result(a pair of the 1.confidences of all classes and 2.the start timestamp of input audio clip) directly the inference produces one and removes the most previous one and other abnormal values. Then it smooth the results in the window to get the most reliable command in this period. Attributes: _label: A list containing commands at corresponding lines. _average_window_duration: The length of average window. _detection_threshold: A confidence threshold for filtering out unreliable command. _suppression_ms: Milliseconds every two reliable founded commands should apart. _minimum_count: An integer count indicating the minimum results the average window should cover. _previous_results: A deque to store previous results. _label_count: The length of label list. _previous_top_label: Last founded command. Initial value is '_silence_'. _previous_top_time: The timestamp of _previous results. Default is -np.inf." 466,load_graph,tensorflow/tensorflow/examples/speech_commands/test_streaming_accuracy.py,80,function,"Read a tensorflow model, and creates a default graph object." 467,read_label_file,tensorflow/tensorflow/examples/speech_commands/test_streaming_accuracy.py,92,function,Load a list of label. 468,read_wav_file,tensorflow/tensorflow/examples/speech_commands/test_streaming_accuracy.py,101,function,Load a wav file and return sample_rate and numpy data of float64 type. 469,main,tensorflow/tensorflow/examples/speech_commands/test_streaming_accuracy.py,111,function, 470,main,tensorflow/tensorflow/examples/speech_commands/train.py,88,function, 471,verbosity_arg,tensorflow/tensorflow/examples/speech_commands/train.py,480,function,"Parses verbosity argument. Args: value: A member of tf.logging. Raises: ArgumentTypeError: Not an expected value." 472,requires_contrib,tensorflow/tensorflow/examples/speech_commands/train_test.py,32,function, 473,DictStruct,tensorflow/tensorflow/examples/speech_commands/train_test.py,44,class, 474,TrainTest,tensorflow/tensorflow/examples/speech_commands/train_test.py,50,class, 475,wav_to_features,tensorflow/tensorflow/examples/speech_commands/wav_to_features.py,47,function,"Converts an audio file into its corresponding feature map. Args: sample_rate: Expected sample rate of the wavs. clip_duration_ms: Expected duration in milliseconds of the wavs. window_size_ms: How long each spectrogram timeslice is. window_stride_ms: How far to move in time between spectrogram timeslices. feature_bin_count: How many bins to use for the feature fingerprint. quantize: Whether to train the model for eight-bit deployment. preprocess: Spectrogram processing mode; ""mfcc"", ""average"" or ""micro"". input_wav: Path to the audio WAV file to read. output_c_file: Where to save the generated C source file." 476,main,tensorflow/tensorflow/examples/speech_commands/wav_to_features.py,125,function, 477,WavToFeaturesTest,tensorflow/tensorflow/examples/speech_commands/wav_to_features_test.py,30,class, 478,create_model,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,69,function,"Model to recognize digits in the MNIST dataset. Network structure is equivalent to: https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py and https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py But uses the tf.keras API. Returns: A tf.keras.Model." 479,mnist_datasets,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,115,function, 480,loss,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,125,function, 481,compute_accuracy,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,131,function, 482,train,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,140,function,Trains model on `dataset` using `optimizer`. 483,test,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,166,function,Perform an evaluation of `model` on the examples from `dataset`. 484,train_and_export,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,184,function,"Run MNIST training and eval loop in eager mode. Args: flags_obj: An object containing parsed flag values." 485,import_and_eval,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,237,function, 486,apply_clean,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,247,function, 487,main,tensorflow/tensorflow/examples/tf2_showcase/mnist.py,254,function, 488,placeholder_inputs,tensorflow/tensorflow/examples/tutorials/mnist/fully_connected_feed.py,37,function,"Generate placeholder variables to represent the input tensors. These placeholders are used as inputs by the rest of the model building code and will be fed from the downloaded data in the .run() loop, below. Args: batch_size: The batch size will be baked into both placeholders. Returns: images_placeholder: Images placeholder. labels_placeholder: Labels placeholder." 489,fill_feed_dict,tensorflow/tensorflow/examples/tutorials/mnist/fully_connected_feed.py,59,function,"Fills the feed_dict for training the given step. A feed_dict takes the form of: feed_dict = { : , .... } Args: data_set: The set of images and labels, from input_data.read_data_sets() images_pl: The images placeholder, from placeholder_inputs(). labels_pl: The labels placeholder, from placeholder_inputs(). Returns: feed_dict: The feed dictionary mapping from placeholders to values." 490,do_eval,tensorflow/tensorflow/examples/tutorials/mnist/fully_connected_feed.py,87,function,"Runs one evaluation against the full epoch of data. Args: sess: The session in which the model has been trained. eval_correct: The Tensor that returns the number of correct predictions. images_placeholder: The images placeholder. labels_placeholder: The labels placeholder. data_set: The set of images and labels to evaluate, from input_data.read_data_sets()." 491,run_training,tensorflow/tensorflow/examples/tutorials/mnist/fully_connected_feed.py,116,function,Train MNIST for a number of steps. 492,main,tensorflow/tensorflow/examples/tutorials/mnist/fully_connected_feed.py,218,function, 493,_read32,tensorflow/tensorflow/examples/tutorials/mnist/input_data.py,43,function, 494,_extract_images,tensorflow/tensorflow/examples/tutorials/mnist/input_data.py,49,function,"Extract the images into a 4D uint8 numpy array [index, y, x, depth]. Args: f: A file object that can be passed into a gzip reader. Returns: data: A 4D uint8 numpy array [index, y, x, depth]. Raises: ValueError: If the bytestream does not start with 2051." 495,_dense_to_one_hot,tensorflow/tensorflow/examples/tutorials/mnist/input_data.py,78,function,Convert class labels from scalars to one-hot vectors. 496,_extract_labels,tensorflow/tensorflow/examples/tutorials/mnist/input_data.py,88,function,"Extract the labels into a 1D uint8 numpy array [index]. Args: f: A file object that can be passed into a gzip reader. one_hot: Does one hot encoding for the result. num_classes: Number of classes for the one hot encoding. Returns: labels: a 1D uint8 numpy array. Raises: ValueError: If the bystream doesn't start with 2049." 497,_DataSet,tensorflow/tensorflow/examples/tutorials/mnist/input_data.py,116,class,"Container class for a _DataSet (deprecated). THIS CLASS IS DEPRECATED." 498,_maybe_download,tensorflow/tensorflow/examples/tutorials/mnist/input_data.py,242,function,"Download the data from source url, unless it's already here. Args: filename: string, name of the file in the directory. work_directory: string, path to working directory. source_url: url to download from if file doesn't exist. Returns: Path to resulting file." 499,read_data_sets,tensorflow/tensorflow/examples/tutorials/mnist/input_data.py,266,function, 500,inference,tensorflow/tensorflow/examples/tutorials/mnist/mnist.py,45,function,"Build the MNIST model up to where it may be used for inference. Args: images: Images placeholder, from inputs(). hidden1_units: Size of the first hidden layer. hidden2_units: Size of the second hidden layer. Returns: softmax_linear: Output tensor with the computed logits." 501,loss,tensorflow/tensorflow/examples/tutorials/mnist/mnist.py,86,function,"Calculates the loss from the logits and the labels. Args: logits: Logits tensor, float - [batch_size, NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size]. Returns: loss: Loss tensor of type float." 502,training,tensorflow/tensorflow/examples/tutorials/mnist/mnist.py,101,function,"Sets up the training Ops. Creates a summarizer to track the loss over time in TensorBoard. Creates an optimizer and applies the gradients to all trainable variables. The Op returned by this function is what must be passed to the `sess.run()` call to cause the model to train. Args: loss: Loss tensor, from loss(). learning_rate: The learning rate to use for gradient descent. Returns: train_op: The Op for training." 503,evaluation,tensorflow/tensorflow/examples/tutorials/mnist/mnist.py,130,function,"Evaluate the quality of the logits at predicting the label. Args: logits: Logits tensor, float - [batch_size, NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size], with values in the range [0, NUM_CLASSES). Returns: A scalar int32 tensor with the number of examples (out of batch_size) that were predicted correctly." 504,main,tensorflow/tensorflow/examples/tutorials/mnist/mnist_softmax_xla.py,34,function, 505,train,tensorflow/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py,38,function, 506,main,tensorflow/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py,185,function, 507,_hash_file,tensorflow/tensorflow/examples/tutorials/word2vec/word2vec_basic.py,41,function, 508,word2vec_basic,tensorflow/tensorflow/examples/tutorials/word2vec/word2vec_basic.py,49,function,"Example of building, training and visualizing a word2vec model." 509,main,tensorflow/tensorflow/examples/tutorials/word2vec/word2vec_basic.py,360,function, 510,suppress_exception,tensorflow/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py,37,function, 511,TestModule,tensorflow/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py,46,class,The test model has unsupported op. 512,test_from_saved_model,tensorflow/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py,57,function,displaying stack trace when converting saved model. 513,test_from_concrete_function,tensorflow/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py,71,function,displaying stack trace when converting concrete function. 514,main,tensorflow/tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py,83,function, 515,load_labels,tensorflow/tensorflow/lite/examples/python/label_image.py,29,function, 516,_convert_bytes_to_cc_source,tensorflow/tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py,35,function,"Returns strings representing a C++ constant array containing `data`. Args: data: Byte array that will be converted into a C++ constant. array_name: String to use as the variable name for the constant array. max_line_width: The longest line length, for formatting purposes. include_guard: Name to use for the include guard macro definition. include_path: Optional path to include in the source file. use_tensorflow_license: Whether to include the standard TensorFlow Apache2 license in the generated files. Returns: Text that can be compiled as a C++ source file to link in the data as a literal array of values. Text that can be used as a C++ header file to reference the literal array." 517,main,tensorflow/tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py,155,function, 518,BidirectionalSequenceLstmTest,tensorflow/tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_lstm_test.py,36,class, 519,BidirectionalSequenceRnnTest,tensorflow/tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_rnn_test.py,38,class, 520,dynamic_rnn,tensorflow/tensorflow/lite/experimental/examples/lstm/rnn.py,42,function,"Creates a recurrent neural network specified by RNNCell `cell`. Performs fully dynamic unrolling of `inputs`. Example: ```python # create a BasicRNNCell rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size) # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size] # defining initial state initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32) # 'state' is a tensor of shape [batch_size, cell_state_size] outputs, state = tf.compat.v1.nn.dynamic_rnn(rnn_cell, input_data, initial_state=initial_state, dtype=tf.float32) ``` ```python # create 2 LSTMCells rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size) for size in [128, 256]] # create a RNN cell composed sequentially of a number of RNNCells multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers) # 'outputs' is a tensor of shape [batch_size, max_time, 256] # 'state' is a N-tuple where N is the number of LSTMCells containing a # tf.nn.rnn_cell.LSTMStateTuple for each cell outputs, state = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell, inputs=data, dtype=tf.float32) ``` Args: cell: An instance of RNNCell. inputs: The RNN inputs. If `time_major == False` (default), this must be a `Tensor` of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If `time_major == True`, this must be a `Tensor` of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. This may also be a (possibly nested) tuple of Tensors satisfying this property. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. In this case, input to `cell` at each time-step will replicate the structure of these tuples, except for the time dimension (from which the time is taken). The input to `cell` at each time step will be a `Tensor` or (possibly nested) tuple of Tensors each with dimensions `[batch_size, ...]`. sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used to copy-through state and zero-out outputs when past a batch element's sequence length. So it's more for performance than correctness. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to ""rnn"". Returns: A pair (outputs, state) where: outputs: The RNN output `Tensor`. If time_major == False (default), this will be a `Tensor` shaped: `[batch_size, max_time, cell.output_size]`. If time_major == True, this will be a `Tensor` shaped: `[max_time, batch_size, cell.output_size]`. Note, if `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `outputs` will be a tuple having the same structure as `cell.output_size`, containing Tensors having shapes corresponding to the shape data in `cell.output_size`. state: The final state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. If cells are `LSTMCells` `state` will be a tuple containing a `LSTMStateTuple` for each cell. Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If inputs is None or an empty list. RuntimeError: If not using control flow v2." 521,bidirectional_dynamic_rnn,tensorflow/tensorflow/lite/experimental/examples/lstm/rnn.py,279,function,"Creates a dynamic version of bidirectional recurrent neural network. Takes input and builds independent forward and backward RNNs. The input_size of forward and backward cell must match. The initial state for both directions is zero by default (but can be set optionally) and no intermediate states are ever returned -- the network is fully unrolled for the given (passed in) length(s) of the sequence(s) or completely unrolled if length(s) is not given. Args: cell_fw: An instance of RNNCell, to be used for forward direction. cell_bw: An instance of RNNCell, to be used for backward direction. inputs: The RNN inputs. If time_major == False (default), this must be a tensor of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If time_major == True, this must be a tensor of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences in the batch. If not provided, all batch entries are assumed to be full sequences; and time reversal is applied from time `0` to `max_time` for each sequence. initial_state_fw: (optional) An initial state for the forward RNN. This must be a tensor of appropriate type and shape `[batch_size, cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell_fw.state_size`. initial_state_bw: (optional) Same as for `initial_state_fw`, but using the corresponding properties of `cell_bw`. dtype: (optional) The data type for the initial states and expected output. Required if initial_states are not provided or RNN states have a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to ""bidirectional_rnn"" Returns: A tuple (outputs, output_states) where: outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`. If time_major == False (default), output_fw will be a `Tensor` shaped: `[batch_size, max_time, cell_fw.output_size]` and output_bw will be a `Tensor` shaped: `[batch_size, max_time, cell_bw.output_size]`. If time_major == True, output_fw will be a `Tensor` shaped: `[max_time, batch_size, cell_fw.output_size]` and output_bw will be a `Tensor` shaped: `[max_time, batch_size, cell_bw.output_size]`. It returns a tuple instead of a single concatenated `Tensor`, unlike in the `bidirectional_rnn`. If the concatenated one is preferred, the forward and backward outputs can be concatenated as `tf.concat(outputs, 2)`. output_states: A tuple (output_state_fw, output_state_bw) containing the forward and the backward final states of bidirectional rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`." 522,TfLiteRNNCell,tensorflow/tensorflow/lite/experimental/examples/lstm/rnn_cell.py,39,class,"The most basic RNN cell. This is used only for TfLite, it provides hints and it also makes the variables in the desired for the tflite ops." 523,TFLiteLSTMCell,tensorflow/tensorflow/lite/experimental/examples/lstm/rnn_cell.py,162,class,"Long short-term memory unit (LSTM) recurrent network cell. This is used only for TfLite, it provides hints and it also makes the variables in the desired for the tflite ops (transposed and separated). The default non-peephole implementation is based on: https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf Felix Gers, Jurgen Schmidhuber, and Fred Cummins. ""Learning to forget: Continual prediction with LSTM."" IET, 850-855, 1999. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf Hasim Sak, Andrew Senior, and Francoise Beaufays. ""Long short-term memory recurrent neural network architectures for large scale acoustic modeling."" INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU." 524,UnidirectionalSequenceLstmTest,tensorflow/tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_lstm_test.py,36,class, 525,UnidirectionalSequenceRnnTest,tensorflow/tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_rnn_test.py,37,class, 526,AudioFeatureGenerationTest,tensorflow/tensorflow/lite/experimental/microfrontend/python/kernel_tests/audio_microfrontend_op_test.py,35,class, 527,audio_microfrontend,tensorflow/tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py,34,function,"Audio Microfrontend Op. This Op converts a sequence of audio data into one or more feature vectors containing filterbanks of the input. The conversion process uses a lightweight library to perform: 1. A slicing window function 2. Short-time FFTs 3. Filterbank calculations 4. Noise reduction 5. PCAN Auto Gain Control 6. Logarithmic scaling Args: audio: 1D Tensor, int16 audio data in temporal ordering. sample_rate: Integer, the sample rate of the audio in Hz. window_size: Integer, length of desired time frames in ms. window_step: Integer, length of step size for the next frame in ms. num_channels: Integer, the number of filterbank channels to use. upper_band_limit: Float, the highest frequency included in the filterbanks. lower_band_limit: Float, the lowest frequency included in the filterbanks. smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction. even_smoothing: Float, smoothing coefficient for even-numbered channels. odd_smoothing: Float, smoothing coefficient for odd-numbered channels. min_signal_remaining: Float, fraction of signal to preserve in smoothing. enable_pcan: Bool, enable PCAN auto gain control. pcan_strength: Float, gain normalization exponent. pcan_offset: Float, positive value added in the normalization denominator. gain_bits: Int, number of fractional bits in the gain. enable_log: Bool, enable logarithmic scaling of filterbanks. scale_shift: Integer, scale filterbanks by 2^(scale_shift). left_context: Integer, number of preceding frames to attach to each frame. right_context: Integer, number of preceding frames to attach to each frame. frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M]. zero_padding: Bool, if left/right context is out-of-bounds, attach frame of zeroes. Otherwise, frame[0] or frame[size-1] will be copied. out_scale: Integer, divide all filterbanks by this number. out_type: DType, type of the output Tensor, defaults to UINT16. Returns: filterbanks: 2D Tensor, each row is a time frame, each column is a channel. Raises: ValueError: If the audio tensor is not explicitly a vector." 528,SupportedOp,tensorflow/tensorflow/lite/experimental/tensorboard/ops_util.py,26,class,"Spec of supported ops. Args: op: string of op name." 529,get_potentially_supported_ops,tensorflow/tensorflow/lite/experimental/tensorboard/ops_util.py,35,function,"Returns operations potentially supported by TensorFlow Lite. The potentially support list contains a list of ops that are partially or fully supported, which is derived by simply scanning op names to check whether they can be handled without real conversion and specific parameters. Given that some ops may be partially supported, the optimal way to determine if a model's operations are supported is by converting using the TensorFlow Lite converter. Returns: A list of SupportedOp." 530,OpsUtilTest,tensorflow/tensorflow/lite/experimental/tensorboard/ops_util_test.py,24,class, 531,main,tensorflow/tensorflow/lite/g3doc/tools/build_java_api_docs.py,53,function, 532,main,tensorflow/tensorflow/lite/g3doc/tools/build_py_api_docs.py,55,function, 533,time_wrapping,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_augmentation.py,29,function,Generate (molecule/denominator)x speed data. 534,augment_data,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_augmentation.py,43,function,Perform data augmentation. 535,TestAugmentation,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_augmentation_test.py,32,class, 536,DataLoader,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_load.py,35,class,Loads data and prepares for training. 537,TestLoad,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_load_test.py,30,class, 538,prepare_original_data,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_prepare.py,46,function,Read collected data from files. 539,generate_negative_data,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_prepare.py,86,function,Generate negative data labeled as 'negative6~8'. 540,write_data,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_prepare.py,143,function, 541,TestPrepare,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_prepare_test.py,32,class, 542,read_data,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_split.py,40,function, 543,split_data,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_split.py,51,function,"Splits data into train, validation and test according to ratio." 544,person_split,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_split_person.py,41,function,Split data by person. 545,TestSplitPerson,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_split_person_test.py,28,class, 546,TestSplit,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/data_split_test.py,29,class, 547,reshape_function,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/train.py,37,function, 548,calculate_model_size,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/train.py,42,function, 549,build_cnn,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/train.py,51,function,Builds a convolutional neural network in Keras. 550,build_lstm,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/train.py,78,function,Builds an LSTM in Keras. 551,load_data,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/train.py,93,function, 552,build_net,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/train.py,101,function, 553,train_net,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/train.py,111,function,Trains the model. 554,TestTrain,tensorflow/tensorflow/lite/micro/examples/magic_wand/train/train_test.py,33,class, 555,to_cc,tensorflow/tensorflow/lite/micro/examples/micro_speech/CMSIS/create_constants.py,26,function,Writes table values to a C++ source file. 556,to_h,tensorflow/tensorflow/lite/micro/examples/micro_speech/CMSIS/create_constants.py,44,function,Writes a header file for the table values. 557,new_data_to_array,tensorflow/tensorflow/lite/micro/examples/micro_speech/apollo3/captured_data_to_wav.py,28,function, 558,new_data_to_array,tensorflow/tensorflow/lite/micro/examples/micro_speech/apollo3/compare_1k.py,29,function,Converts file information to an in-memory array. 559,to_float,tensorflow/tensorflow/lite/micro/examples/micro_speech/apollo3/compare_1k.py,63,function, 560,check_file_existence,tensorflow/tensorflow/lite/micro/examples/person_detection/utils/raw_to_bitmap.py,52,function, 561,show_and_save_bitmaps,tensorflow/tensorflow/lite/micro/examples/person_detection/utils/raw_to_bitmap.py,60,function,"Display and save a list of bitmaps. Args: input_file: input file name bitmap_list: list of numpy arrays to represent bitmap images channels: color channel count" 562,reshape_bitmaps,tensorflow/tensorflow/lite/micro/examples/person_detection/utils/raw_to_bitmap.py,87,function,"Reshape flat integer arrays. Args: frame_list: list of 1-D arrays to represent raw image data width: image width in pixels height: image height in pixels channels: color channel count Returns: list of numpy arrays to represent bitmap images" 563,parse_file,tensorflow/tensorflow/lite/micro/examples/person_detection/utils/raw_to_bitmap.py,109,function,"Convert log file to array of pixels. Args: inputfile: log file to parse width: image width in pixels height: image height in pixels channels: color channel count Returns: list 1-D arrays to represent raw image data." 564,main,tensorflow/tensorflow/lite/micro/examples/person_detection/utils/raw_to_bitmap.py,159,function, 565,RawToBitmapTest,tensorflow/tensorflow/lite/micro/examples/person_detection/utils/raw_to_bitmap_test.py,94,class, 566,generate_conv_model,tensorflow/tensorflow/lite/micro/testing/generate_test_models.py,34,function,"Creates a basic Keras model and converts to tflite. This model does not make any relevant classifications. It only exists to generate a model that is designed to run on embedded devices." 567,main,tensorflow/tensorflow/lite/micro/testing/generate_test_models.py,74,function, 568,rename_example_subfolder_files,tensorflow/tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py,29,function,Moves source files in example subfolders to equivalents at root. 569,move_person_data,tensorflow/tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py,41,function,Moves the downloaded person model into the examples folder. 570,move_person_data_experimental,tensorflow/tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py,61,function,Moves the downloaded person model into the examples folder. 571,move_image_data_experimental,tensorflow/tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py,83,function,Moves the downloaded image detection model into the examples folder. 572,rename_example_main_inos,tensorflow/tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py,104,function,Makes sure the .ino sketch files match the example name. 573,main,tensorflow/tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py,114,function,Control the rewriting of source files. 574,parse_args,tensorflow/tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py,124,function,Converts the raw arguments into accessible flags. 575,sanitize_xml,tensorflow/tensorflow/lite/micro/tools/make/generate_keil_project.py,29,function,Uses a allowlist to avoid generating bad XML. 576,main,tensorflow/tensorflow/lite/micro/tools/make/generate_keil_project.py,34,function,Generates a Keil project file from a template source. 577,parse_args,tensorflow/tensorflow/lite/micro/tools/make/generate_keil_project.py,82,function,Converts the raw arguments into accessible flags. 578,main,tensorflow/tensorflow/lite/micro/tools/make/merge_arduino_zips.py,27,function,Merges multiple Arduino zipfiles into a single result. 579,parse_args,tensorflow/tensorflow/lite/micro/tools/make/merge_arduino_zips.py,39,function,Converts the raw arguments into accessible flags. 580,replace_includes,tensorflow/tensorflow/lite/micro/tools/make/transform_arduino_source.py,29,function,Updates any includes to reference the new Arduino library paths. 581,replace_main,tensorflow/tensorflow/lite/micro/tools/make/transform_arduino_source.py,43,function,Updates any occurrences of a bare main definition to the Arduino equivalent. 582,check_ino_functions,tensorflow/tensorflow/lite/micro/tools/make/transform_arduino_source.py,51,function,Ensures the required functions exist. 583,add_example_ino_library_include,tensorflow/tensorflow/lite/micro/tools/make/transform_arduino_source.py,65,function,Makes sure the example includes the header that loads the library. 584,replace_example_includes,tensorflow/tensorflow/lite/micro/tools/make/transform_arduino_source.py,71,function,Updates any includes for local example files. 585,main,tensorflow/tensorflow/lite/micro/tools/make/transform_arduino_source.py,85,function,Transforms the input source file to work when exported to Arduino. 586,parse_args,tensorflow/tensorflow/lite/micro/tools/make/transform_arduino_source.py,108,function,Converts the raw arguments into accessible flags. 587,replace_arduino_includes,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,36,function,Updates any includes to reference the new Arduino library paths. 588,replace_arduino_main,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,50,function,Updates any occurrences of a bare main definition to the Arduino equivalent. 589,check_ino_functions,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,58,function,Ensures the required functions exist. 590,add_example_ino_library_include,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,72,function,Makes sure the example includes the header that loads the library. 591,replace_arduino_example_includes,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,78,function,Updates any includes for local example files. 592,replace_esp_example_includes,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,92,function,Updates any includes for local example files. 593,transform_arduino_sources,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,109,function,"Transform sources for the Arduino platform. Args: input_lines: A sequence of lines from the input file to process. flags: Flags indicating which transformation(s) to apply. Returns: The transformed output as a string." 594,transform_esp_sources,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,138,function,"Transform sources for the ESP-IDF platform. Args: input_lines: A sequence of lines from the input file to process. flags: Flags indicating which transformation(s) to apply. Returns: The transformed output as a string." 595,main,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,158,function,Transforms the input source file to work when exported as example. 596,parse_args,tensorflow/tensorflow/lite/micro/tools/make/transform_source.py,171,function,Converts the raw arguments into accessible flags. 597,_requires_input_stats,tensorflow/tensorflow/lite/python/convert.py,49,function, 598,_try_convert_to_unicode,tensorflow/tensorflow/lite/python/convert.py,67,function, 599,OpsSet,tensorflow/tensorflow/lite/python/convert.py,80,class,"Enum class defining the sets of ops available to generate TFLite models. WARNING: Experimental interface, subject to change." 600,ConverterError,tensorflow/tensorflow/lite/python/convert.py,120,class,Raised when an error occurs during model conversion. 601,mlir_quantize,tensorflow/tensorflow/lite/python/convert.py,125,function,"Quantize `input_data_str` with calibration results. Args: input_data_str: Input data in serialized form (e.g. a TFLITE model with calibration results). disable_per_channel: Bool indicating whether to do per-channel or per-tensor quantization fully_quantize: Bool indicating whether to fully quantize the model. Besides model body, the input/output will be quantized as well. inference_type: Data type for the activations. The default value is int8. Returns: Quantized model in serialized form (e.g. a TFLITE model) with floating-point inputs and outputs." 602,mlir_sparsify,tensorflow/tensorflow/lite/python/convert.py,150,function,"Sparsify `input_data_str` to encode sparse tensor with proper format. Args: input_data_str: Input data in serialized form (e.g. a TFLITE model). Returns: Sparsified model in serialized form (e.g. a TFLITE model)." 603,toco_convert_protos,tensorflow/tensorflow/lite/python/convert.py,162,function,"Convert `input_data_str` according to model and toco parameters. Unless you know what you are doing consider using the more friendly `tf.compat.v1.lite.toco_convert`. Args: model_flags_str: Serialized proto describing model properties, see `toco/model_flags.proto`. toco_flags_str: Serialized proto describing conversion properties, see `toco/toco_flags.proto`. input_data_str: Input data in serialized form (e.g. a graphdef is common) debug_info_str: Serialized `GraphDebugInfo` proto describing logging information. (default None) enable_mlir_converter: Enables MLIR-based conversion instead of the default TOCO conversion. (default False) Returns: Converted model in serialized form (e.g. a TFLITE model is common). Raises: ConverterError: When conversion fails in TFLiteConverter, usually due to ops not being supported. RuntimeError: When conversion fails, an exception is raised with the error message embedded." 604,build_toco_convert_protos,tensorflow/tensorflow/lite/python/convert.py,291,function,"Builds protocol buffers describing a conversion of a model using TOCO. Typically this is to convert from TensorFlow GraphDef to TFLite, in which case the default `input_format` and `output_format` are sufficient. Args: input_tensors: List of input tensors. Type and shape are computed using `foo.shape` and `foo.dtype`. output_tensors: List of output tensors (only .name is used from this). inference_type: Target data type of real-number arrays in the output file. Must be `{tf.float32, tf.uint8, tf.int8}`. (default tf.float32) inference_input_type: Target data type of real-number input arrays. Allows for a different type for input arrays in the case of quantization. Must be `{tf.float32, tf.uint8, tf.int8}`. (default `inference_type`) input_format: Type of data to read Currently must be `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF) input_shapes: Input array shape. It needs to be a list of the same length as `input_tensors`, or None. (default None) output_format: Output file format. Currently must be `{TFLITE, GRAPHVIZ_DOT}`. (default TFLITE) quantized_input_stats: List of tuples of floats representing the mean and standard deviation. Each tuple maps to the corresponding input tensor. Only need if `inference_input_type` is `QUANTIZED_UINT8` or `INT8`. real_input_value = (quantized_input_value - mean_value) / std_dev_value. (default None) default_ranges_stats: Tuple of integers representing (min, max) range values for all arrays without a specified range. Intended for experimenting with quantization via ""dummy quantization"". (default None) drop_control_dependency: Boolean indicating whether to drop control dependencies silently. This is due to TFLite not supporting control dependencies. (default True) reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant nodes in unexpected locations. Used when the location of the FakeQuant nodes is preventing graph transformations necessary to convert the graph. Results in a graph that differs from the quantized training graph, potentially causing differing arithmetic behavior. (default False) allow_custom_ops: Boolean indicating whether to allow custom operations. When false any unknown operation is an error. When true, custom ops are created for any op that is unknown. The developer will need to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) custom_opdefs: List of strings representing custom ops OpDefs that are included in the GraphDef. Required when using custom operations with the MLIR-based converter. (default None) change_concat_input_ranges: Boolean to change behavior of min/max ranges for inputs and outputs of the concat operator for quantized models. Changes the ranges of concat operator overlap when true. (default False) post_training_quantize: Boolean indicating whether to quantize the weights of the converted float model. Model size will be reduced and there will be latency improvements (at the cost of accuracy). (default False) quantize_to_float16: Boolean indicating whether to convert float buffers to float16. (default False) dump_graphviz_dir: Full filepath of folder to dump the graphs at various stages of processing GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order to keep the requirements of the output file. (default None) dump_graphviz_video: Boolean indicating whether to dump the graph after every graph transformation. (default False) target_ops: Experimental flag, subject to change. Set of OpsSet options indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS])) allow_nonexistent_arrays: Allow specifying array names that don't exist or are unused in the final graph. (default False) debug_info: `GraphDebugInfo` proto containing the stack traces for the original nodes referred by the converted graph. conversion_summary_dir: A string, the path to the generated conversion logs. saved_model_dir: Filepath of the saved model to be converted. This value will be non-empty only when the saved model import path will be used. Otherwises, the graph def-based conversion will be processed. saved_model_version: SavedModel file format version of The saved model file to be converted. This value will be set only when the SavedModel import path will be used. saved_model_tags: Set of string saved model tags, formatted in the comma-separated value. This value will be set only when the SavedModel import path will be used. saved_model_exported_names: Names to be exported (default: export all) when the saved model import path is on. This value will be set only when the SavedModel import path will be used. Returns: model_flags, toco_flags, debug_info: three protocol buffers describing the conversion process and debug information. Raises: ValueError: If the input tensor type is unknown Missing mean_values or std_dev_values RuntimeError: If TOCO fails to convert (in which case the runtime error's error text will contain the TOCO error log)" 605,toco_convert_graph_def,tensorflow/tensorflow/lite/python/convert.py,485,function,"""Convert a model using TOCO. This function is used to convert GraphDefs that cannot be loaded into TensorFlow to TFLite. Conversion can be customized by providing arguments that are forwarded to `build_toco_convert_protos` (see documentation for details). Args: input_data: Input data (i.e. often `sess.graph_def`), input_arrays_with_shape: Tuple of strings representing input tensor names and list of integers representing input shapes (e.g., [(""foo"" : [1, 16, 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when `input_tensors` is None. (default None) output_arrays: List of output tensors to freeze graph with. Use only when graph cannot be loaded into TensorFlow and when `output_tensors` is None. (default None) enable_mlir_converter: Enables MLIR-based conversion instead of TOCO conversion. *args: See `build_toco_convert_protos`, **kwargs: See `build_toco_convert_protos`. Returns: The converted data. For example if TFLite was the destination, then this will be a tflite flatbuffer in a bytes array. Raises: Defined in `build_toco_convert_protos`." 606,toco_convert_impl,tensorflow/tensorflow/lite/python/convert.py,541,function,"""Convert a model using TOCO. Typically this function is used to convert from TensorFlow GraphDef to TFLite. Conversion can be customized by providing arguments that are forwarded to `build_toco_convert_protos` (see documentation for details). Args: input_data: Input data (i.e. often `sess.graph_def`), input_tensors: List of input tensors. Type and shape are computed using `foo.shape` and `foo.dtype`. output_tensors: List of output tensors (only .name is used from this). enable_mlir_converter: Enables MLIR-based conversion instead of TOCO conversion. *args: See `build_toco_convert_protos`, **kwargs: See `build_toco_convert_protos`. Returns: The converted data. For example if TFLite was the destination, then this will be a tflite flatbuffer in a bytes array. Raises: Defined in `build_toco_convert_protos`." 607,toco_convert,tensorflow/tensorflow/lite/python/convert.py,580,function,"Convert a model using TOCO. Typically this function is used to convert from TensorFlow GraphDef to TFLite. Conversion can be customized by providing arguments that are forwarded to `build_toco_convert_protos` (see documentation for details). This function has been deprecated. Please use `lite.TFLiteConverter` instead. Args: input_data: Input data (i.e. often `sess.graph_def`), input_tensors: List of input tensors. Type and shape are computed using `foo.shape` and `foo.dtype`. output_tensors: List of output tensors (only .name is used from this). *args: See `build_toco_convert_protos`, **kwargs: See `build_toco_convert_protos`. Returns: The converted data. For example if TFLite was the destination, then this will be a tflite flatbuffer in a bytes array. Raises: Defined in `build_toco_convert_protos`." 608,run_main,tensorflow/tensorflow/lite/python/convert_file_to_c_source.py,29,function,Main in convert_file_to_c_source.py. 609,main,tensorflow/tensorflow/lite/python/convert_file_to_c_source.py,101,function, 610,_log_tensor_details,tensorflow/tensorflow/lite/python/convert_saved_model.py,30,function,"Log tensor details: name, shape, and type." 611,get_meta_graph_def,tensorflow/tensorflow/lite/python/convert_saved_model.py,46,function,"Validate saved_model and extract MetaGraphDef. Args: saved_model_dir: saved_model path to convert. tag_set: Set of tag(s) of the MetaGraphDef to load. Returns: The meta_graph_def used for tflite conversion. Raises: ValueError: No valid MetaGraphDef for given tag_set." 612,get_signature_def,tensorflow/tensorflow/lite/python/convert_saved_model.py,63,function,"Get the signature def from meta_graph with given signature_key. Args: meta_graph: meta_graph_def. signature_key: signature_def in the meta_graph_def. Returns: The signature_def used for tflite conversion. Raises: ValueError: Given signature_key is not valid for this meta_graph." 613,get_inputs_outputs,tensorflow/tensorflow/lite/python/convert_saved_model.py,88,function,"Get inputs and outputs from SignatureDef. Args: signature_def: SignatureDef in the meta_graph_def for conversion. Returns: The inputs and outputs in the graph for conversion." 614,_get_tensors,tensorflow/tensorflow/lite/python/convert_saved_model.py,112,function,"Gets the tensors associated with the tensor names. Either signature_def_tensor_names or user_tensor_names should be provided. If the user provides tensors, the tensors associated with the user provided tensor names are provided. Otherwise, the tensors associated with the names in the SignatureDef are provided. Args: graph: GraphDef representing graph. signature_def_tensor_names: Tensor names stored in either the inputs or outputs of a SignatureDef. (default None) user_tensor_names: Tensor names provided by the user. (default None) Returns: List of tensors. Raises: ValueError: signature_def_tensors and user_tensor_names are undefined or empty. user_tensor_names are not valid." 615,freeze_saved_model,tensorflow/tensorflow/lite/python/convert_saved_model.py,155,function,"Converts a SavedModel to a frozen graph. Args: saved_model_dir: SavedModel directory to convert. input_arrays: List of input tensors to freeze graph with. Uses input arrays from SignatureDef when none are provided. input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {""foo"": : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {""foo"" : None}). output_arrays: List of output tensors to freeze graph with. Uses output arrays from SignatureDef when none are provided. tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. signature_key: Key identifying SignatureDef containing inputs and outputs. Returns: frozen_graph_def: Frozen GraphDef. in_tensors: List of input tensors for the graph. out_tensors: List of output tensors for the graph. graph: `Graph` object. Raises: ValueError: SavedModel doesn't contain a MetaGraphDef identified by tag_set. signature_key is not in the MetaGraphDef. assets/ directory is in the MetaGraphDef. input_shapes does not match the length of input_arrays. input_arrays or output_arrays are not valid." 616,FreezeSavedModelTest,tensorflow/tensorflow/lite/python/convert_saved_model_test.py,40,class, 617,ConvertTest,tensorflow/tensorflow/lite/python/convert_test.py,38,class, 618,ConvertTestOpHint,tensorflow/tensorflow/lite/python/convert_test.py,168,class,Test the hint to stub functionality. 619,_tf_export,tensorflow/tensorflow/lite/python/interpreter.py,37,function, 620,Delegate,tensorflow/tensorflow/lite/python/interpreter.py,42,class,"Python wrapper class to manage TfLiteDelegate objects. The shared library is expected to have two functions: TfLiteDelegate* tflite_plugin_create_delegate( char**, char**, size_t, void (*report_error)(const char *)) void tflite_plugin_destroy_delegate(TfLiteDelegate*) The first one creates a delegate object. It may return NULL to indicate an error (with a suitable error message reported by calling report_error()). The second one destroys delegate object and must be called for every created delegate object. Passing NULL as argument value is allowed, i.e. tflite_plugin_destroy_delegate(tflite_plugin_create_delegate(...)) always works." 621,load_delegate,tensorflow/tensorflow/lite/python/interpreter.py,132,function,"Returns loaded Delegate object. Args: library: Name of shared library containing the [TfLiteDelegate](https://www.tensorflow.org/lite/performance/delegates). options: Dictionary of options that are required to load the delegate. All keys and values in the dictionary should be convertible to str. Consult the documentation of the specific delegate for required and legal options. (default None) Returns: Delegate object. Raises: ValueError: Delegate failed to load. RuntimeError: If delegate loading is used on unsupported platform." 622,Interpreter,tensorflow/tensorflow/lite/python/interpreter.py,159,class,"Interpreter interface for TensorFlow Lite Models. This makes the TensorFlow Lite interpreter accessible in Python. It is possible to use this interpreter in a multithreaded Python environment, but you must be sure to call functions of a particular instance from only one thread at a time. So if you want to have 4 threads running different inferences simultaneously, create an interpreter for each one as thread-local data. Similarly, if you are calling invoke() in one thread on a single interpreter but you want to use tensor() on another thread once it is done, you must use a synchronization primitive between the threads to ensure invoke has returned before calling tensor()." 623,InterpreterWithCustomOps,tensorflow/tensorflow/lite/python/interpreter.py,552,class,"Interpreter interface for TensorFlow Lite Models that accepts custom ops. The interface provided by this class is experimental and therefore not exposed as part of the public API. Wraps the tf.lite.Interpreter class and adds the ability to load custom ops by providing the names of functions that take a pointer to a BuiltinOpResolver and add a custom op." 624,InterpreterCustomOpsTest,tensorflow/tensorflow/lite/python/interpreter_test.py,43,class, 625,InterpreterTest,tensorflow/tensorflow/lite/python/interpreter_test.py,63,class, 626,InterpreterTestErrorPropagation,tensorflow/tensorflow/lite/python/interpreter_test.py,260,class, 627,InterpreterTensorAccessorTest,tensorflow/tensorflow/lite/python/interpreter_test.py,298,class, 628,InterpreterDelegateTest,tensorflow/tensorflow/lite/python/interpreter_test.py,353,class, 629,Optimize,tensorflow/tensorflow/lite/python/lite.py,88,class,"Enum defining the optimizations to apply when generating tflite graphs. Some optimizations may come at the cost of accuracy. DEFAULT Default optimization strategy. Converter will do its best to improve size and latency based on the information provided. Enhanced optimizations are gained by providing a representative_dataset. This is recommended, and is currently equivalent to the modes below. Currently, weights will be quantized and if representative_dataset is provided, activations for quantizable operations will also be quantized. OPTIMIZE_FOR_SIZE Deprecated. Does the same as DEFAULT. OPTIMIZE_FOR_LATENCY Deprecated. Does the same as DEFAULT." 630,RepresentativeDataset,tensorflow/tensorflow/lite/python/lite.py,131,class,"Representative dataset to evaluate optimizations. A representative dataset that can be used to evaluate optimizations by the converter. E.g. converter can use these examples to estimate (min, max) ranges by calibrating the model on inputs. This can allow converter to quantize a converted floating point model." 631,TargetSpec,tensorflow/tensorflow/lite/python/lite.py,153,class,"Specification of target device. Details about target device. Converter optimizes the generated model for specific device. Attributes: supported_ops: Experimental flag, subject to change. Set of OpsSet options supported by the device. (default set([OpsSet.TFLITE_BUILTINS])) supported_types: List of types for constant values on the target device. Supported values are types exported by lite.constants. Frequently, an optimization choice is driven by the most compact (i.e. smallest) type in this list (default [constants.FLOAT])" 632,QuantizationMode,tensorflow/tensorflow/lite/python/lite.py,177,class,QuantizationMode determines the quantized conversion from user options. 633,TFLiteConverterBase,tensorflow/tensorflow/lite/python/lite.py,384,class,Converter subclass to share functionality between V1 and V2 converters. 634,TFLiteConverterBaseV2,tensorflow/tensorflow/lite/python/lite.py,522,class,"Converter subclass to share functionality between V2 converters. Attributes: allow_custom_ops: Boolean indicating whether to allow custom operations. When False, any unknown operation is an error. When True, custom ops are created for any op that is unknown. The developer needs to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) optimizations: Experimental flag, subject to change. A list of optimizations to apply when converting the model. E.g. `[Optimize.DEFAULT]` representative_dataset: A representative dataset that can be used to generate input and output samples for the model. The converter can use the dataset to evaluate different optimizations. Note that this is an optional attribute but it is necessary if INT8 is the only support builtin ops in target ops. target_spec: Experimental flag, subject to change. Specification of target device. inference_input_type: Data type of the input layer. Note that integer types (tf.int8 and tf.uint8) are currently only supported for post training integer quantization. (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) inference_output_type: Data type of the output layer. Note that integer types (tf.int8 and tf.uint8) are currently only supported for post training integer quantization. (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) experimental_new_converter: Experimental flag, subject to change. Enables MLIR-based conversion instead of TOCO conversion. (default True)" 635,TFLiteSavedModelConverterV2,tensorflow/tensorflow/lite/python/lite.py,652,class,"Converts the given SavedModel into TensorFlow Lite model. Attributes: saved_model_dir: Directory of the SavedModel." 636,TFLiteKerasModelConverterV2,tensorflow/tensorflow/lite/python/lite.py,719,class,Converts the given Keras model into TensorFlow Lite model. 637,TFLiteFrozenGraphConverterV2,tensorflow/tensorflow/lite/python/lite.py,840,class,Converts the given frozen graph into TensorFlow Lite model. 638,TFLiteConverterV2,tensorflow/tensorflow/lite/python/lite.py,910,class,"Converts a TensorFlow model into TensorFlow Lite model. Attributes: allow_custom_ops: Boolean indicating whether to allow custom operations. When False, any unknown operation is an error. When True, custom ops are created for any op that is unknown. The developer needs to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) optimizations: Experimental flag, subject to change. A list of optimizations to apply when converting the model. E.g. `[Optimize.DEFAULT]` representative_dataset: A representative dataset that can be used to generate input and output samples for the model. The converter can use the dataset to evaluate different optimizations. Note that this is an optional attribute but it is necessary if INT8 is the only support builtin ops in target ops. target_spec: Experimental flag, subject to change. Specification of target device. inference_input_type: Data type of the input layer. Note that integer types (tf.int8 and tf.uint8) are currently only supported for post training integer quantization. (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) inference_output_type: Data type of the output layer. Note that integer types (tf.int8 and tf.uint8) are currently only supported for post training integer quantization. (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) experimental_new_converter: Experimental flag, subject to change. Enables MLIR-based conversion instead of TOCO conversion. (default True) Example usage: ```python # Converting a SavedModel to a TensorFlow Lite model. converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir) tflite_model = converter.convert() # Converting a tf.Keras model to a TensorFlow Lite model. converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() # Converting ConcreteFunctions to a TensorFlow Lite model. converter = tf.lite.TFLiteConverter.from_concrete_functions([func]) tflite_model = converter.convert() ```" 639,TFLiteConverterBaseV1,tensorflow/tensorflow/lite/python/lite.py,1085,class,"Converter subclass to share functionality between V1 converters. Attributes: inference_type: Target data type of real-number arrays in the output file. Must be `{tf.float32, tf.uint8}`. If `optimzations` are provided, this parameter is ignored. (default tf.float32) inference_input_type: Target data type of real-number input arrays. Allows for a different type for input arrays. If an integer type is provided and `optimizations` are not used, `quantized_inputs_stats` must be provided. If `inference_type` is tf.uint8, signaling conversion to a fully quantized model from a quantization-aware trained input model, then `inference_input_type` defaults to tf.uint8. In all other cases, `inference_input_type` defaults to tf.float32. Must be `{tf.float32, tf.uint8, tf.int8}` inference_output_type: Target data type of real-number output arrays. Allows for a different type for output arrays. If `inference_type` is tf.uint8, signaling conversion to a fully quantized model from a quantization-aware trained output model, then `inference_output_type` defaults to tf.uint8. In all other cases, `inference_output_type` must be tf.float32, an error will be thrown otherwise. Must be `{tf.float32, tf.uint8, tf.int8}` output_format: Output file format. Currently must be `{TFLITE, GRAPHVIZ_DOT}`. (default TFLITE) quantized_input_stats: Dict of strings representing input tensor names mapped to tuple of floats representing the mean and standard deviation of the training data (e.g., {""foo"" : (0., 1.)}). Only need if `inference_input_type` is `QUANTIZED_UINT8`. real_input_value = (quantized_input_value - mean_value) / std_dev_value. (default {}) default_ranges_stats: Tuple of integers representing (min, max) range values for all arrays without a specified range. Intended for experimenting with quantization via ""dummy quantization"". (default None) drop_control_dependency: Boolean indicating whether to drop control dependencies silently. This is due to TFLite not supporting control dependencies. (default True) reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant nodes in unexpected locations. Used when the location of the FakeQuant nodes is preventing graph transformations necessary to convert the graph. Results in a graph that differs from the quantized training graph, potentially causing differing arithmetic behavior. (default False) change_concat_input_ranges: Boolean to change behavior of min/max ranges for inputs and outputs of the concat operator for quantized models. Changes the ranges of concat operator overlap when true. (default False) allow_custom_ops: Boolean indicating whether to allow custom operations. When false any unknown operation is an error. When true, custom ops are created for any op that is unknown. The developer will need to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) post_training_quantize: Deprecated. Please specify `[Optimize.DEFAULT]` for `optimizations` instead. Boolean indicating whether to quantize the weights of the converted float model. Model size will be reduced and there will be latency improvements (at the cost of accuracy). (default False) dump_graphviz_dir: Full filepath of folder to dump the graphs at various stages of processing GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order to keep the requirements of the output file. (default None) dump_graphviz_video: Boolean indicating whether to dump the graph after every graph transformation. (default False) conversion_summary_dir: A string indicating the path to the generated conversion logs. target_ops: Deprecated. Please specify `target_spec.supported_ops` instead. Set of OpsSet options indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS])) target_spec: Experimental flag, subject to change. Specification of target device. optimizations: Experimental flag, subject to change. A list of optimizations to apply when converting the model. E.g. `[Optimize.DEFAULT]` representative_dataset: A representative dataset that can be used to generate input and output samples for the model. The converter can use the dataset to evaluate different optimizations. experimental_new_converter: Experimental flag, subject to change. Enables MLIR-based conversion instead of TOCO conversion. (default True)" 640,TFLiteSavedModelConverter,tensorflow/tensorflow/lite/python/lite.py,1410,class,"Converts the given SavedModel into TensorFlow Lite model. Attributes: saved_model_dir: Directory of the SavedModel." 641,TFLiteKerasModelConverter,tensorflow/tensorflow/lite/python/lite.py,1458,class,Converts the given SavedModel into TensorFlow Lite model. 642,TFLiteFrozenGraphConverter,tensorflow/tensorflow/lite/python/lite.py,1586,class,Converts the given frozen graph def into TensorFlow Lite model. 643,TFLiteConverter,tensorflow/tensorflow/lite/python/lite.py,1634,class,"Convert a TensorFlow model into `output_format`. This is used to convert from a TensorFlow GraphDef, SavedModel or tf.keras model into either a TFLite FlatBuffer or graph visualization. Attributes: inference_type: Target data type of real-number arrays in the output file. Must be `{tf.float32, tf.uint8}`. If `optimzations` are provided, this parameter is ignored. (default tf.float32) inference_input_type: Target data type of real-number input arrays. Allows for a different type for input arrays. If an integer type is provided and `optimizations` are not used, `quantized_inputs_stats` must be provided. If `inference_type` is tf.uint8, signaling conversion to a fully quantized model from a quantization-aware trained input model, then `inference_input_type` defaults to tf.uint8. In all other cases, `inference_input_type` defaults to tf.float32. Must be `{tf.float32, tf.uint8, tf.int8}` inference_output_type: Target data type of real-number output arrays. Allows for a different type for output arrays. If `inference_type` is tf.uint8, signaling conversion to a fully quantized model from a quantization-aware trained output model, then `inference_output_type` defaults to tf.uint8. In all other cases, `inference_output_type` must be tf.float32, an error will be thrown otherwise. Must be `{tf.float32, tf.uint8, tf.int8}` output_format: Output file format. Currently must be `{TFLITE, GRAPHVIZ_DOT}`. (default TFLITE) quantized_input_stats: Dict of strings representing input tensor names mapped to tuple of floats representing the mean and standard deviation of the training data (e.g., {""foo"" : (0., 1.)}). Only need if `inference_input_type` is `QUANTIZED_UINT8`. real_input_value = (quantized_input_value - mean_value) / std_dev_value. (default {}) default_ranges_stats: Tuple of integers representing (min, max) range values for all arrays without a specified range. Intended for experimenting with quantization via ""dummy quantization"". (default None) drop_control_dependency: Boolean indicating whether to drop control dependencies silently. This is due to TFLite not supporting control dependencies. (default True) reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant nodes in unexpected locations. Used when the location of the FakeQuant nodes is preventing graph transformations necessary to convert the graph. Results in a graph that differs from the quantized training graph, potentially causing differing arithmetic behavior. (default False) change_concat_input_ranges: Boolean to change behavior of min/max ranges for inputs and outputs of the concat operator for quantized models. Changes the ranges of concat operator overlap when true. (default False) allow_custom_ops: Boolean indicating whether to allow custom operations. When false any unknown operation is an error. When true, custom ops are created for any op that is unknown. The developer will need to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) post_training_quantize: Deprecated. Please specify `[Optimize.DEFAULT]` for `optimizations` instead. Boolean indicating whether to quantize the weights of the converted float model. Model size will be reduced and there will be latency improvements (at the cost of accuracy). (default False) dump_graphviz_dir: Full filepath of folder to dump the graphs at various stages of processing GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order to keep the requirements of the output file. (default None) dump_graphviz_video: Boolean indicating whether to dump the graph after every graph transformation. (default False) conversion_summary_dir: A string indicating the path to the generated conversion logs. target_ops: Deprecated. Please specify `target_spec.supported_ops` instead. Set of OpsSet options indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS])) target_spec: Experimental flag, subject to change. Specification of target device. optimizations: Experimental flag, subject to change. A list of optimizations to apply when converting the model. E.g. `[Optimize.DEFAULT]` representative_dataset: A representative dataset that can be used to generate input and output samples for the model. The converter can use the dataset to evaluate different optimizations. experimental_new_converter: Experimental flag, subject to change. Enables MLIR-based conversion instead of TOCO conversion. (default True) Example usage: ```python # Converting a GraphDef from session. converter = tf.compat.v1.TFLiteConverter.from_session( sess, in_tensors, out_tensors) tflite_model = converter.convert() open(""converted_model.tflite"", ""wb"").write(tflite_model) # Converting a GraphDef from file. converter = tf.compat.v1.TFLiteConverter.from_frozen_graph( graph_def_file, input_arrays, output_arrays) tflite_model = converter.convert() open(""converted_model.tflite"", ""wb"").write(tflite_model) # Converting a SavedModel. converter = tf.compat.v1.TFLiteConverter.from_saved_model(saved_model_dir) tflite_model = converter.convert() open(""converted_model.tflite"", ""wb"").write(tflite_model) # Converting a tf.keras model. converter = tf.compat.v1.TFLiteConverter.from_keras_model_file(keras_model) tflite_model = converter.convert() open(""converted_model.tflite"", ""wb"").write(tflite_model) ```" 644,TocoConverter,tensorflow/tensorflow/lite/python/lite.py,1979,class,"Convert a TensorFlow model into `output_format` using TOCO. This class has been deprecated. Please use `lite.TFLiteConverter` instead." 645,FromSessionTest,tensorflow/tensorflow/lite/python/lite_flex_test.py,38,class, 646,FromConcreteFunctionTest,tensorflow/tensorflow/lite/python/lite_flex_test.py,103,class, 647,LiteTest,tensorflow/tensorflow/lite/python/lite_test.py,59,class,Base class of all the tests in this module. 648,TestModels,tensorflow/tensorflow/lite/python/lite_test.py,63,class, 649,FromConstructor,tensorflow/tensorflow/lite/python/lite_test.py,76,class, 650,FromSessionTest,tensorflow/tensorflow/lite/python/lite_test.py,115,class, 651,FromFrozenGraphFile,tensorflow/tensorflow/lite/python/lite_test.py,1464,class, 652,FromFrozenGraphObjectDetection,tensorflow/tensorflow/lite/python/lite_test.py,1650,class, 653,FromSavedModelTest,tensorflow/tensorflow/lite/python/lite_test.py,1713,class, 654,MyAddLayer,tensorflow/tensorflow/lite/python/lite_test.py,1897,class, 655,FromKerasFile,tensorflow/tensorflow/lite/python/lite_test.py,1912,class, 656,GrapplerTest,tensorflow/tensorflow/lite/python/lite_test.py,2292,class, 657,ImportOpsUtilTest,tensorflow/tensorflow/lite/python/lite_test.py,2384,class, 658,DefaultConverterAttrsTest,tensorflow/tensorflow/lite/python/lite_test.py,2390,class, 659,FromConcreteFunctionTest,tensorflow/tensorflow/lite/python/lite_v2_test.py,48,class, 660,FromSavedModelTest,tensorflow/tensorflow/lite/python/lite_v2_test.py,498,class, 661,FromKerasModelTest,tensorflow/tensorflow/lite/python/lite_v2_test.py,709,class, 662,ControlFlowTest,tensorflow/tensorflow/lite/python/lite_v2_test.py,825,class, 663,GrapplerTest,tensorflow/tensorflow/lite/python/lite_v2_test.py,1013,class, 664,UnknownShapes,tensorflow/tensorflow/lite/python/lite_v2_test.py,1047,class, 665,ModelTest,tensorflow/tensorflow/lite/python/lite_v2_test_util.py,34,class,Base test class for TensorFlow Lite 2.x model tests. 666,OpHint,tensorflow/tensorflow/lite/python/op_hint.py,97,class,"A class that helps build tflite function invocations. It allows you to take a bunch of TensorFlow ops and annotate the construction such that toco knows how to convert it to tflite. This embeds a pseudo function in a TensorFlow graph. This allows embedding high-level API usage information in a lower level TensorFlow implementation so that an alternative implementation can be substituted later. Essentially, any ""input"" into this pseudo op is fed into an identity, and attributes are added to that input before being used by the constituent ops that make up the pseudo op. A similar process is done to any output that is to be exported from the current op." 667,_LiteOperand,tensorflow/tensorflow/lite/python/op_hint.py,471,class,"Abstract operand for a tflite hint function._dynamic_rnn_loop. This is a base class that handles representing arguments to an OpHint. It also is able to serialize operands to the stubbed graph_def. Child classes are responsible for being able to store information about the hint identity operators. They are also responsible for knowing how to serialize to output graphdefs. Typically this will be implemented by holding one or more identity nodes that were previously discovered as hints." 668,_LiteSingleOperand,tensorflow/tensorflow/lite/python/op_hint.py,518,class,A simple operand that is non-aggregated (i.e. most hints). 669,_LiteAggregateOperand,tensorflow/tensorflow/lite/python/op_hint.py,544,class,"An operand for a tflite hint function that is aggregated from many. For example, an LSTM is a grid of operators that are all related. Inputs going into them may need to be fused, so they should all be tracked as related arguments." 670,_LiteFuncCall,tensorflow/tensorflow/lite/python/op_hint.py,670,class,"Represent a TensorFlow Lite custom function. This is uses to accumulate found hints in the graphdef into a single conceptual unit. Attributes: inputs: inputs to the op (hash from index # to argument) outputs: outputs to the op (hash from index # to argument) function_name: the tflite custom op name to use uuid: a unique call id for this particular call (i.e. multiple function calls would have the same function_name but different uuids. params: A param name to key value for op constant data. I.e. for axis on a reduction, strides on a convolution, etc. level: Level of the OpHint. children_inputs_mappings: If the Ophint has children, children inputs mappings indicate how their inputs & outputs are mapped." 671,_find_all_hints_in_nodes,tensorflow/tensorflow/lite/python/op_hint.py,730,function,"Look at the all the input nodes and return a list of LiteFuncCall objs. Args: nodes: A TensorFlow graph_def to look for LiteFuncCalls. Returns: a list of `LifeFuncCall` objects in the form" 672,_extract_topology_sequence_mapping,tensorflow/tensorflow/lite/python/op_hint.py,795,function, 673,_find_children_hints_in_while_loop,tensorflow/tensorflow/lite/python/op_hint.py,800,function,"Find children hints and all nodes inside the while loop. Args: function_def: Function def of the while loop. nodes_mapping: While loop input_arg : real node name. Returns: Ordered children hints and all re-mapped nodes inside the while loop." 674,_find_children_hints,tensorflow/tensorflow/lite/python/op_hint.py,833,function,"Find all children hints. For a given OpHint, we find all children hints inside it, we also copy all the nodes inside function defs (if applicable) to the original graph_def, they are returned in a list as well. Args: call: Parent OpHint that contains children ophints. graph_def: Original graph def. Returns: Ordered children hints inside the parent ophint; new graph def that contains nodes inside function defs (if applicable); nodes inside function defs." 675,_tensor_name_base,tensorflow/tensorflow/lite/python/op_hint.py,887,function,"Removes the device assignment code from a tensor. e.g. _tensor_name_base(""foo:3"") => ""foo"" Args: full_tensor_name: A tensor name that is annotated with a device placement (this is what tensor flow introspection gives). Returns: A name without any device assignment." 676,_tensorflow_output_name,tensorflow/tensorflow/lite/python/op_hint.py,904,function, 677,_check_subgraph_closed,tensorflow/tensorflow/lite/python/op_hint.py,910,function,"Checks to make sure node only connects to predecessor graph through inputs. Args: n: Node to check reachable_by_input: Nodes that are reachable by all inputs of subgraph input_nodes_set: The set of nodes that are ""inputs"". name_to_input_name: Maps from name to the list of inputs. Raises: TypeError: If the given node uses items past inputs directly." 678,_convert_single_op_hint_to_stub,tensorflow/tensorflow/lite/python/op_hint.py,940,function,"Given a graph_def, converts `call` into a stub and returns a new graph_def. Args: call: A single function call to be converted. graph_def: A graph_def to use as input (that has call obviously). function_def_nodes: Nodes inside the function def those are not connected to the graph. is_last_run: Whether it is the last run for a given pass (for OpHint has children). Returns: A new transformed graph-def that has call as a stub (single op). Note: after this process, the graph_def can no longer be loaded into the tensorflow runtime, so all future manipulations are done in graph_def level." 679,_remove_one_redundant_stack_unstack,tensorflow/tensorflow/lite/python/op_hint.py,1070,function,"Removes a stack->unstack pattern from in_graph_def in a returned graph. Args: in_graph_def: Graph def to use as input. Returns: Simplified tuple (graph_def, changed_something) where changed_something is true if anything was done." 680,_remove_redundant_stack_unstack,tensorflow/tensorflow/lite/python/op_hint.py,1161,function, 681,_get_correct_mapping,tensorflow/tensorflow/lite/python/op_hint.py,1170,function, 682,_convert_op_hints_to_stubs_helper,tensorflow/tensorflow/lite/python/op_hint.py,1180,function,"Converts a graph_def to a new graph_def where all op hints are stubbed. Args: graph_def: A graph def that we should convert. write_callback: A function pointer that can be used to write intermediate steps of graph transformation (optional). Returns: A new stubbed graph_def." 683,find_all_hinted_output_nodes,tensorflow/tensorflow/lite/python/op_hint.py,1257,function,"Find all Ophints output nodes in the graph. This is used to get all the output nodes those are ophinted, it is important for operation like convert_variables_to_constants keep all ophints structure. Note: only one of session or graph_def should be used, not both. Why this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can generate multiple outputs for unfused subgraph. If not all output nodes are consumed, graph optimization can potentially drop the unused nodes and cause ophints in an invalid states (due to missing ophinted output nodes). So it's important for us to find all those hinted output nodes and make sure they're not discarded away. Args: session: A TensorFlow session that contains the graph to convert. graph_def: A graph def that we should convert. Returns: A list of OpHints output nodes. Raises: ValueError: If both session and graph_def are provided." 684,is_ophint_converted,tensorflow/tensorflow/lite/python/op_hint.py,1292,function, 685,convert_op_hints_to_stubs,tensorflow/tensorflow/lite/python/op_hint.py,1305,function,"Converts a graphdef with LiteOp hints into stub operations. This is used to prepare for toco conversion of complex intrinsic usages. Note: only one of session or graph_def should be used, not both. Args: session: A TensorFlow session that contains the graph to convert. graph_def: A graph def that we should convert. write_callback: A function pointer that can be used to write intermediate steps of graph transformation (optional). Returns: A new graphdef with all ops contained in OpHints being replaced by a single op call with the right parameters. Raises: ValueError: If both session and graph_def are provided." 686,_parse_array,tensorflow/tensorflow/lite/python/tflite_convert.py,39,function, 687,_parse_set,tensorflow/tensorflow/lite/python/tflite_convert.py,45,function, 688,_parse_inference_type,tensorflow/tensorflow/lite/python/tflite_convert.py,51,function,"Converts the inference type to the value of the constant. Args: value: str representing the inference type. flag: str representing the flag name. Returns: tf.dtype. Raises: ValueError: Unsupported value." 689,_get_tflite_converter,tensorflow/tensorflow/lite/python/tflite_convert.py,74,function,"Makes a TFLiteConverter object based on the flags provided. Args: flags: argparse.Namespace object containing TFLite flags. Returns: TFLiteConverter object. Raises: ValueError: Invalid flags." 690,_convert_tf1_model,tensorflow/tensorflow/lite/python/tflite_convert.py,122,function,"Calls function to convert the TensorFlow 1.X model into a TFLite model. Args: flags: argparse.Namespace object. Raises: ValueError: Invalid flags." 691,_convert_tf2_model,tensorflow/tensorflow/lite/python/tflite_convert.py,219,function,"Calls function to convert the TensorFlow 2.0 model into a TFLite model. Args: flags: argparse.Namespace object. Raises: ValueError: Unsupported file format." 692,_check_tf1_flags,tensorflow/tensorflow/lite/python/tflite_convert.py,244,function,"Checks the parsed and unparsed flags to ensure they are valid in 1.X. Raises an error if previously support unparsed flags are found. Raises an error for parsed flags that don't meet the required conditions. Args: flags: argparse.Namespace object containing TFLite flags. unparsed: List of unparsed flags. Raises: ValueError: Invalid flags." 693,_check_tf2_flags,tensorflow/tensorflow/lite/python/tflite_convert.py,313,function,"Checks the parsed and unparsed flags to ensure they are valid in 2.X. Args: flags: argparse.Namespace object containing TFLite flags. Raises: ValueError: Invalid flags." 694,_get_tf1_flags,tensorflow/tensorflow/lite/python/tflite_convert.py,327,function,"Returns ArgumentParser for tflite_convert for TensorFlow 1.X. Args: parser: ArgumentParser" 695,_get_tf2_flags,tensorflow/tensorflow/lite/python/tflite_convert.py,511,function,"Returns ArgumentParser for tflite_convert for TensorFlow 2.0. Args: parser: ArgumentParser" 696,_ParseExperimentalNewConverter,tensorflow/tensorflow/lite/python/tflite_convert.py,535,class,Helper class to parse --experimental_new_converter argument. 697,_get_parser,tensorflow/tensorflow/lite/python/tflite_convert.py,565,function,"Returns an ArgumentParser for tflite_convert. Args: use_v2_converter: Indicates which converter to return. Return: ArgumentParser." 698,run_main,tensorflow/tensorflow/lite/python/tflite_convert.py,596,function,Main in tflite_convert.py. 699,main,tensorflow/tensorflow/lite/python/tflite_convert.py,639,function, 700,TestModels,tensorflow/tensorflow/lite/python/tflite_convert_test.py,45,class, 701,TfLiteConvertV1Test,tensorflow/tensorflow/lite/python/tflite_convert_test.py,81,class, 702,TfLiteConvertV2Test,tensorflow/tensorflow/lite/python/tflite_convert_test.py,298,class, 703,ArgParserTest,tensorflow/tensorflow/lite/python/tflite_convert_test.py,339,class, 704,convert_dtype_to_tflite_type,tensorflow/tensorflow/lite/python/util.py,59,function,"Converts tf.dtype to TFLite proto type. Args: tf_dtype: tf.dtype Raises: ValueError: Unsupported tf.dtype. Returns: types_flag_pb2." 705,get_tensor_name,tensorflow/tensorflow/lite/python/util.py,77,function,"Returns name of the input tensor. Args: tensor: tf.Tensor Returns: str" 706,get_tensors_from_tensor_names,tensorflow/tensorflow/lite/python/util.py,98,function,"Gets the Tensors associated with the `tensor_names` in the provided graph. Args: graph: TensorFlow Graph. tensor_names: List of strings that represent names of tensors in the graph. Returns: A list of Tensor objects in the same order the names are provided. Raises: ValueError: tensor_names contains an invalid tensor name." 707,set_tensor_shapes,tensorflow/tensorflow/lite/python/util.py,141,function,"Sets Tensor shape for each tensor if the shape is defined. Args: tensors: TensorFlow ops.Tensor. shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {""foo"": : [1, 16, 16, 3]}). Raises: ValueError: `shapes` contains an invalid tensor. `shapes` contains an invalid shape for a valid tensor." 708,get_grappler_config,tensorflow/tensorflow/lite/python/util.py,172,function,"Creates a tf.compat.v1.ConfigProto for configuring Grappler. Args: optimizers_list: List of strings that represents the list of optimizers. Returns: tf.ConfigProto." 709,run_graph_optimizations,tensorflow/tensorflow/lite/python/util.py,188,function,"Apply standard TensorFlow optimizations to the graph_def. Args: graph_def: Frozen GraphDef to be optimized. input_arrays: List of arrays that are considered inputs of the graph. output_arrays: List of arrays that are considered outputs of the graph. config: tf.ConfigProto. graph: TensorFlow Graph. Required when Eager mode is enabled. (default None) Returns: A new, optimized GraphDef." 710,_convert_op_hints_if_present,tensorflow/tensorflow/lite/python/util.py,230,function, 711,freeze_graph,tensorflow/tensorflow/lite/python/util.py,241,function,"Returns a frozen GraphDef. Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the existing GraphDef is returned. The Grappler pass is only run on models that are frozen in order to inline the functions in the graph. If OpHints is present, it will try to convert the OpHint graph. Args: sess: TensorFlow Session. input_tensors: List of input tensors. output_tensors: List of output tensors (only .name is used from this). Returns: Frozen GraphDef." 712,is_frozen_graph,tensorflow/tensorflow/lite/python/util.py,281,function,"Determines if the graph is frozen. Determines if a graph has previously been frozen by checking for any operations of type Variable*. If variables are found, the graph is not frozen. Args: sess: TensorFlow Session. Returns: Bool." 713,build_debug_info_func,tensorflow/tensorflow/lite/python/util.py,300,function,"Returns a method to retrieve the `GraphDebugInfo` from the original graph. Args: original_graph: The original `Graph` containing all the op stack traces. Returns: A function which retrieves the stack traces from the original graph and converts them to a `GraphDebugInfo` for a given set of nodes." 714,convert_debug_info_func,tensorflow/tensorflow/lite/python/util.py,339,function,"Returns a method to retrieve the `GraphDebugInfo` from the original graph. Args: saved_debug_info: The `GraphDebugInfo` containing all the debug info. Returns: A function which retrieves the stack traces from the original graph and converts them to a `GraphDebugInfo` for a given set of nodes." 715,get_debug_info,tensorflow/tensorflow/lite/python/util.py,368,function,"Returns the debug info for the original nodes in the `converted_graph`. Args: nodes_to_debug_info_func: The method to collect the op debug info for the nodes. converted_graph: A `GraphDef` after optimization and transformation. Returns: `GraphDebugInfo` for all the original nodes in `converted_graph`." 716,convert_bytes_to_c_source,tensorflow/tensorflow/lite/python/util.py,399,function,"Returns strings representing a C constant array containing `data`. Args: data: Byte array that will be converted into a C constant. array_name: String to use as the variable name for the constant array. max_line_width: The longest line length, for formatting purposes. include_guard: Name to use for the include guard macro definition. include_path: Optional path to include in the source file. use_tensorflow_license: Whether to include the standard TensorFlow Apache2 license in the generated files. Returns: Text that can be compiled as a C source file to link in the data as a literal array of values. Text that can be used as a C header file to reference the literal array." 717,UtilTest,tensorflow/tensorflow/lite/python/util_test.py,39,class, 718,TensorFunctionsTest,tensorflow/tensorflow/lite/python/util_test.py,124,class, 719,wrapped_toco_convert,tensorflow/tensorflow/lite/python/wrap_toco.py,29,function,Wraps TocoConvert with lazy loader. 720,wrapped_get_potentially_supported_ops,tensorflow/tensorflow/lite/python/wrap_toco.py,41,function,Wraps TocoGetPotentiallySupportedOps with lazy loader. 721,wrapped_experimental_mlir_quantize,tensorflow/tensorflow/lite/python/wrap_toco.py,46,function,Wraps experimental mlir quantize model. 722,wrapped_experimental_mlir_sparsify,tensorflow/tensorflow/lite/python/wrap_toco.py,55,function,Wraps experimental mlir sparsify model. 723,Calibrator,tensorflow/tensorflow/lite/python/optimize/calibrator.py,33,class,"Calibrates a floating point model and then quantizes it. This is an internal class, not a public interface." 724,CalibratorTest,tensorflow/tensorflow/lite/python/optimize/calibrator_test.py,33,class, 725,TemporaryDirectoryResource,tensorflow/tensorflow/lite/schema/upgrade_schema.py,57,function, 726,Converter,tensorflow/tensorflow/lite/schema/upgrade_schema.py,65,class,"Converts TensorFlow flatbuffer models from old to new version of schema. This can convert between any version to the latest version. It uses an incremental upgrade strategy to go from version to version. Usage: converter = Converter() converter.Convert(""a.tflite"", ""a.json"") converter.Convert(""b.json"", ""b.tflite"")" 727,main,tensorflow/tensorflow/lite/schema/upgrade_schema.py,344,function, 728,JsonDumpAndFlush,tensorflow/tensorflow/lite/schema/upgrade_schema_test.py,242,function,"Write the dictionary `data` to a JSON file `fp` (and flush). Args: data: in a dictionary that is JSON serializable. fp: File-like object" 729,TestSchemaUpgrade,tensorflow/tensorflow/lite/schema/upgrade_schema_test.py,253,class, 730,main,tensorflow/tensorflow/lite/testing/generate_examples.py,100,function, 731,MultiGenState,tensorflow/tensorflow/lite/testing/generate_examples_lib.py,176,class,"State of multiple set generation process. This state class stores the information needed when generating the examples for multiple test set. The stored informations are open archive object to be shared, information on test target for current iteration of generation, accumulated generation results." 732,Options,tensorflow/tensorflow/lite/testing/generate_examples_lib.py,203,class,All options for example generation. 733,_prepare_dir,tensorflow/tensorflow/lite/testing/generate_examples_lib.py,244,function, 734,generate_examples,tensorflow/tensorflow/lite/testing/generate_examples_lib.py,256,function,"Generate examples for a test set. Args: options: Options containing information to generate examples. Raises: RuntimeError: if the test function cannot be found." 735,generate_multi_set_examples,tensorflow/tensorflow/lite/testing/generate_examples_lib.py,294,function,"Generate examples for test sets. Args: options: Options containing information to generate examples. test_sets: List of the name of test sets to generate examples." 736,make_report_table,tensorflow/tensorflow/lite/testing/generate_examples_report.py,32,function,"Make an HTML report of the success/failure reports. Args: fp: File-like object in which to put the html. title: ""Title of the zip file this pertains to."" reports: a list of conversion attempts. (report_args, report_vals) i.e. ({""shape"": [1,2,3], ""type"": ""tf.float32""}, {""tf"": ""SUCCESS"", ""toco"": ""FAILURE"", ""toco_log"": ""Unsupported type."", ""tf_log"": """"})" 737,toco_options,tensorflow/tensorflow/lite/testing/toco_convert.py,31,function,"Create TOCO options to process a model. Args: data_types: input and inference types used by TOCO. input_arrays: names of the input tensors output_arrays: name of the output tensors shapes: shapes of the input tensors extra_toco_options: additional toco options Returns: the options in a string." 738,toco_convert,tensorflow/tensorflow/lite/testing/toco_convert.py,78,function,"Convert a model's graph def into a tflite model. NOTE: this currently shells out to the toco binary, but we would like convert to Python API tooling in the future. Args: options: An Options instance. graph_def: A GraphDef object. input_tensors: List of input tensor tuples `(name, shape, type)`. output_tensors: List of output tensors (names). **kwargs: Extra options to be passed. Returns: output tflite model, log_txt from conversion or None, log_txt if it did not convert properly." 739,register_make_test_function,tensorflow/tensorflow/lite/testing/zip_test_utils.py,55,function, 740,get_test_function,tensorflow/tensorflow/lite/testing/zip_test_utils.py,65,function,Get the test function according to the test function name. 741,ExtraTocoOptions,tensorflow/tensorflow/lite/testing/zip_test_utils.py,88,class,"Additional toco options besides input, output, shape." 742,create_tensor_data,tensorflow/tensorflow/lite/testing/zip_test_utils.py,106,function,"Build tensor data spreading the range [min_value, max_value)." 743,create_scalar_data,tensorflow/tensorflow/lite/testing/zip_test_utils.py,126,function,Build scalar tensor data range from min_value to max_value exclusively. 744,freeze_graph,tensorflow/tensorflow/lite/testing/zip_test_utils.py,144,function,"Freeze the current graph. Args: session: Tensorflow sessions containing the graph outputs: List of output tensors Returns: The frozen graph_def." 745,format_result,tensorflow/tensorflow/lite/testing/zip_test_utils.py,158,function,Convert a tensor to a format that can be used in test specs. 746,write_examples,tensorflow/tensorflow/lite/testing/zip_test_utils.py,168,function,"Given a list `examples`, write a text format representation. The file format is csv like with a simple repeated pattern. We would ike to use proto here, but we can't yet due to interfacing with the Android team using this format. Args: fp: File-like object to write to. examples: Example dictionary consisting of keys ""inputs"" and ""outputs""" 747,write_test_cases,tensorflow/tensorflow/lite/testing/zip_test_utils.py,196,function,"Given a dictionary of `examples`, write a text format representation. The file format is protocol-buffer-like, even though we don't use proto due to the needs of the Android team. Args: fp: File-like object to write to. model_name: Filename where the model was written to, relative to filename. examples: Example dictionary consisting of keys ""inputs"" and ""outputs""" 748,get_input_shapes_map,tensorflow/tensorflow/lite/testing/zip_test_utils.py,225,function,"Gets a map of input names to shapes. Args: input_tensors: List of input tensor tuples `(name, shape, type)`. Returns: {string : list of integers}." 749,_normalize_output_name,tensorflow/tensorflow/lite/testing/zip_test_utils.py,251,function,Remove :0 suffix from tensor names. 750,make_zip_of_tests,tensorflow/tensorflow/lite/testing/zip_test_utils.py,262,function,"Helper to make a zip file of a bunch of TensorFlow models. This does a cartesian product of the dictionary of test_parameters and calls make_graph() for each item in the cartesian product set. If the graph is built successfully, then make_test_inputs() is called to build expected input/output value pairs. The model is then converted to tflite with toco, and the examples are serialized with the tflite model into a zip file (2 files per item in the cartesian product set). Args: options: An Options instance. test_parameters: Dictionary mapping to lists for each parameter. e.g. `{""strides"": [[1,3,3,1], [1,2,2,1]], ""foo"": [1.2, 1.3]}` make_graph: function that takes current parameters and returns tuple `[input1, input2, ...], [output1, output2, ...]` make_test_inputs: function taking `curr_params`, `session`, `input_tensors`, `output_tensors` and returns tuple `(input_values, output_values)`. extra_toco_options: Additional toco options. use_frozen_graph: Whether or not freeze graph before toco converter. expected_tf_failures: Number of times tensorflow is expected to fail in executing the input graphs. In some cases it is OK for TensorFlow to fail because the one or more combination of parameters is invalid. Raises: RuntimeError: if there are converter errors that can't be ignored." 751,get_filepath,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,47,function,"Returns the full path of the filename. Args: filename: Subdirectory and name of the model file. base_dir: Base directory containing model file. Returns: str." 752,get_image,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,63,function,"Returns an image loaded into an np.ndarray with dims [1, size, size, 3]. Args: size: Size of image. Returns: np.ndarray." 753,_convert,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,80,function,"Converts the model. Args: converter: TFLiteConverter object. **kwargs: Additional arguments to be passed into the converter. Supported flags are {""target_ops"", ""post_training_quantize"", ""quantize_to_float16""}. Returns: The converted TFLite model in serialized format. Raises: ValueError: Invalid version number." 754,_get_tflite_interpreter,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,103,function,"Creates a TFLite interpreter with resized input tensors. Args: tflite_model: Serialized TensorFlow Lite model. input_shapes_resize: A map where the key is the input tensor name and the value is the shape of the input tensor. This resize happens after model conversion, prior to calling allocate tensors. (default None) Returns: lite.Interpreter" 755,_get_input_data_map,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,127,function,"Generates a map of input data based on the TFLite model. Args: tflite_model: Serialized TensorFlow Lite model. input_data: List of np.ndarray. Returns: {str: [np.ndarray]}." 756,_generate_random_input_data,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,146,function,"Generates input data based on the input tensors in the TFLite model. Args: tflite_model: Serialized TensorFlow Lite model. seed: Integer seed for the random generator. (default None) input_data_range: A map where the key is the input tensor name and the value is a tuple (min_val, max_val) which specifies the value range of the corresponding input tensor. For example, '{'input1': (1, 5)}' means to generate a random value for tensor `input1` within range [1.0, 5.0) (half-inclusive). (default None) input_shapes_resize: A map where the key is the input tensor name and the value is the shape of the input tensor. This resize happens after model conversion, prior to calling allocate tensors. (default None) Returns: ([np.ndarray], {str : [np.ndarray]})." 757,_evaluate_tflite_model,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,191,function,"Returns evaluation of input data on TFLite model. Args: tflite_model: Serialized TensorFlow Lite model. input_data: List of np.ndarray. input_shapes_resize: A map where the key is the input tensor name and the value is the shape of the input tensor. This resize happens after model conversion, prior to calling allocate tensors. (default None) Returns: List of np.ndarray." 758,evaluate_frozen_graph,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,222,function,"Returns a function that evaluates the frozen graph on input data. Args: filename: Full filepath of file containing frozen GraphDef. input_arrays: List of input tensors to freeze graph with. output_arrays: List of output tensors to freeze graph with. Returns: Lambda function ([np.ndarray data] : [np.ndarray result])." 759,evaluate_saved_model,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,260,function,"Returns a function that evaluates the SavedModel on input data. Args: directory: SavedModel directory to convert. tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. signature_key: Key identifying SignatureDef containing inputs and outputs. Returns: Lambda function ([np.ndarray data] : [np.ndarray result])." 760,evaluate_keras_model,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,286,function,"Returns a function that evaluates the tf.keras model on input data. Args: filename: Full filepath of HDF5 file containing the tf.keras model. Returns: Lambda function ([np.ndarray data] : [np.ndarray result])." 761,compare_models,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,299,function,"Compares TensorFlow and TFLite models. Unless the input data is provided, the models are compared with random data. Args: tflite_model: Serialized TensorFlow Lite model. tf_eval_func: Lambda function that takes in input data and outputs the results of the TensorFlow model ([np.ndarray data] : [np.ndarray result]). input_shapes_resize: A map where the key is the input tensor name and the value is the shape of the input tensor. This resize happens after model conversion, prior to calling allocate tensors. (default None) input_data: np.ndarray to pass into models during inference. (default None) input_data_range: A map where the key is the input tensor name and the value is a tuple (min_val, max_val) which specifies the value range of the corresponding input tensor. For example, '{'input1': (1, 5)}' means to generate a random value for tensor `input1` within range [1.0, 5.0) (half-inclusive). (default None) tolerance: Decimal place to check accuracy to. (default 5)." 762,compare_models_v2,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,336,function,"Compares TensorFlow and TFLite models for TensorFlow 2.0. Unless the input data is provided, the models are compared with random data. Currently only 1 input and 1 output are supported by this function. Args: tflite_model: Serialized TensorFlow Lite model. tf_eval_func: Function to evaluate TensorFlow model. Either a lambda function that takes in input data and outputs the results or a TensorFlow ConcreteFunction. input_data: np.ndarray to pass into models during inference. (default None). input_data_range: A map where the key is the input tensor name and the value is a tuple (min_val, max_val) which specifies the value range of the corresponding input tensor. For example, '{'input1': (1, 5)}' means to generate a random value for tensor `input1` within range [1.0, 5.0) (half-inclusive). (default None) tolerance: Decimal place to check accuracy to. (default 5)" 763,test_frozen_graph_quant,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,390,function,"Sanity check to validate post quantize flag alters the graph. This test does not check correctness of the converted model. It converts the TensorFlow frozen graph to TFLite with and without the post_training_quantized flag. It ensures some tensors have different types between the float and quantized models in the case of an all TFLite model or mix-and-match model. It ensures tensor types do not change in the case of an all Flex model. Args: filename: Full filepath of file containing frozen GraphDef. input_arrays: List of input tensors to freeze graph with. output_arrays: List of output tensors to freeze graph with. input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {""foo"" : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {""foo"" : None}). (default None) **kwargs: Additional arguments to be passed into the converter. Raises: ValueError: post_training_quantize flag doesn't act as intended." 764,test_frozen_graph,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,459,function,"Validates the TensorFlow frozen graph converts to a TFLite model. Converts the TensorFlow frozen graph to TFLite and checks the accuracy of the model on random data. Args: filename: Full filepath of file containing frozen GraphDef. input_arrays: List of input tensors to freeze graph with. output_arrays: List of output tensors to freeze graph with. input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {""foo"" : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {""foo"" : None}). (default None) input_shapes_resize: A map where the key is the input tensor name and the value is the shape of the input tensor. This resize happens after model conversion, prior to calling allocate tensors. (default None) input_data: np.ndarray to pass into models during inference. (default None). input_data_range: A map where the key is the input tensor name and the value is a tuple (min_val, max_val) which specifies the value range of the corresponding input tensor. For example, '{'input1': (1, 5)}' means to generate a random value for tensor `input1` within range [1.0, 5.0) (half-inclusive). (default None) **kwargs: Additional arguments to be passed into the converter." 765,test_saved_model,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,504,function,"Validates the TensorFlow SavedModel converts to a TFLite model. Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the model on random data. Args: directory: SavedModel directory to convert. input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {""foo"" : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {""foo"" : None}). (default None) tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. signature_key: Key identifying SignatureDef containing inputs and outputs. input_data: np.ndarray to pass into models during inference. (default None). input_data_range: A map where the key is the input tensor name and the value is a tuple (min_val, max_val) which specifies the value range of the corresponding input tensor. For example, '{'input1': (1, 5)}' means to generate a random value for tensor `input1` within range [1.0, 5.0) (half-inclusive). (default None) **kwargs: Additional arguments to be passed into the converter." 766,test_saved_model_v2,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,548,function,"Validates the TensorFlow SavedModel converts to a TFLite model. Converts the TensorFlow SavedModel to TFLite and checks the accuracy of the model on random data. Args: directory: SavedModel directory to convert. tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. signature_key: Key identifying SignatureDef containing inputs and outputs. input_data: np.ndarray to pass into models during inference. (default None). input_data_range: A map where the key is the input tensor name and the value is a tuple (min_val, max_val) which specifies the value range of the corresponding input tensor. For example, '{'input1': (1, 5)}' means to generate a random value for tensor `input1` within range [1.0, 5.0) (half-inclusive). (default None) **kwargs: Additional arguments to be passed into the converter." 767,test_saved_model_v2_quant_float16,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,587,function,Validates the TensorFlow SavedModel converts to a TFLite model. 768,test_keras_model,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,623,function,"Validates the tf.keras model converts to a TFLite model. Converts the tf.keras model to TFLite and checks the accuracy of the model on random data. Args: filename: Full filepath of HDF5 file containing the tf.keras model. input_arrays: List of input tensors to freeze graph with. input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {""foo"" : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {""foo"" : None}). (default None) input_data: np.ndarray to pass into models during inference. (default None). input_data_range: A map where the key is the input tensor name and the value is a tuple (min_val, max_val) which specifies the value range of the corresponding input tensor. For example, '{'input1': (1, 5)}' means to generate a random value for tensor `input1` within range [1.0, 5.0) (half-inclusive). (default None) **kwargs: Additional arguments to be passed into the converter." 769,test_keras_model_v2,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib.py,661,function,"Validates the tf.keras model converts to a TFLite model. Converts the tf.keras model to TFLite and checks the accuracy of the model on random data. Args: filename: Full filepath of HDF5 file containing the tf.keras model. input_shapes: List of list of integers representing input shapes in the order of the tf.keras model's .input attribute (e.g., [[1, 16, 16, 3]]). (default None) input_data: np.ndarray to pass into models during inference. (default None). input_data_range: A map where the key is the input tensor name and the value is a tuple (min_val, max_val) which specifies the value range of the corresponding input tensor. For example, '{'input1': (1, 5)}' means to generate a random value for tensor `input1` within range [1.0, 5.0) (half-inclusive). (default None) **kwargs: Additional arguments to be passed into the converter." 770,EvaluateFrozenGraph,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib_test.py,42,class, 771,EvaluateSavedModel,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib_test.py,142,class, 772,EvaluateKerasModel,tensorflow/tensorflow/lite/testing/model_coverage/model_coverage_lib_test.py,160,class, 773,make_abs_tests,tensorflow/tensorflow/lite/testing/op_tests/abs.py,28,function,Make a set of tests to do abs. 774,make_add_n_tests,tensorflow/tensorflow/lite/testing/op_tests/add_n.py,27,function,Make a set of tests for AddN op. 775,make_arg_min_max_tests,tensorflow/tensorflow/lite/testing/op_tests/arg_min_max.py,29,function,Make a set of tests to do arg_max. 776,make_batch_to_space_nd_tests,tensorflow/tensorflow/lite/testing/op_tests/batch_to_space_nd.py,28,function,Make a set of tests to do batch_to_space_nd. 777,make_binary_op_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,26,function,Make a set of tests to do binary ops with and without broadcast. 778,make_binary_op_tests_func,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,239,function,Return a function that does a test on a binary operator. 779,make_add_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,245,function, 780,make_div_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,250,function,Make zip tests for div op with 5D case. 781,make_sub_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,267,function,Make zip tests for sub op with additional cases. 782,make_mul_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,287,function, 783,make_pow_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,292,function, 784,make_floor_div_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,297,function, 785,make_floor_mod_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,302,function, 786,make_squared_difference_tests,tensorflow/tensorflow/lite/testing/op_tests/binary_op.py,307,function, 787,make_cast_tests,tensorflow/tensorflow/lite/testing/op_tests/cast.py,27,function,Generate examples for cast. 788,make_ceil_tests,tensorflow/tensorflow/lite/testing/op_tests/ceil.py,27,function,Make a set of tests to do ceil. 789,make_concat_tests,tensorflow/tensorflow/lite/testing/op_tests/concat.py,27,function,Make a set of tests to do concatenation. 790,make_constant_tests,tensorflow/tensorflow/lite/testing/op_tests/constant.py,31,function,Make a set of tests to do constant ops. 791,make_conv_tests,tensorflow/tensorflow/lite/testing/op_tests/conv.py,28,function,Make a set of tests to do convolution. 792,make_conv2d_transpose_tests,tensorflow/tensorflow/lite/testing/op_tests/conv2d_transpose.py,28,function,Make a set of tests to do transpose_conv. 793,make_conv_activation_tests,tensorflow/tensorflow/lite/testing/op_tests/conv_activation.py,27,function,Make a set of tests to do convolution with activation. 794,make_conv_relu6_tests,tensorflow/tensorflow/lite/testing/op_tests/conv_activation.py,132,function,Make a set of tests to do conv_relu6. 795,make_conv_relu_tests,tensorflow/tensorflow/lite/testing/op_tests/conv_activation.py,138,function,Make a set of tests to do conv_relu. 796,relu1,tensorflow/tensorflow/lite/testing/op_tests/conv_activation.py,143,function, 797,make_conv_relu1_tests,tensorflow/tensorflow/lite/testing/op_tests/conv_activation.py,151,function,Make a set of tests to do conv_relu1. 798,make_conv_to_depthwiseconv_with_shared_weights_tests,tensorflow/tensorflow/lite/testing/op_tests/conv_to_depthwiseconv_with_shared_weights.py,28,function,Make a test where 2 Conv ops shared the same constant weight tensor. 799,make_conv_with_shared_weights_tests,tensorflow/tensorflow/lite/testing/op_tests/conv_with_shared_weights.py,28,function,Make a test where 2 Conv ops shared the same constant weight tensor. 800,make_cos_tests,tensorflow/tensorflow/lite/testing/op_tests/cos.py,28,function,Make a set of tests to do cos. 801,make_depth_to_space_tests,tensorflow/tensorflow/lite/testing/op_tests/depth_to_space.py,27,function,Make a set of tests to do depth_to_space. 802,make_depthwiseconv_tests,tensorflow/tensorflow/lite/testing/op_tests/depthwiseconv.py,28,function,Make a set of tests to do convolution. 803,_make_elementwise_tests,tensorflow/tensorflow/lite/testing/op_tests/elementwise.py,26,function,Make a set of tests to do element-wise operations. 804,make_sin_tests,tensorflow/tensorflow/lite/testing/op_tests/elementwise.py,57,function,Make a set of tests to do sin. 805,make_log_tests,tensorflow/tensorflow/lite/testing/op_tests/elementwise.py,63,function,Make a set of tests to do log. 806,make_sqrt_tests,tensorflow/tensorflow/lite/testing/op_tests/elementwise.py,69,function,Make a set of tests to do sqrt. 807,make_rsqrt_tests,tensorflow/tensorflow/lite/testing/op_tests/elementwise.py,75,function,Make a set of tests to do 1/sqrt. 808,make_square_tests,tensorflow/tensorflow/lite/testing/op_tests/elementwise.py,81,function,Make a set of tests to do square. 809,make_elu_tests,tensorflow/tensorflow/lite/testing/op_tests/elu.py,28,function,Make a set of tests to do (float) tf.nn.elu. 810,make_embedding_lookup_tests,tensorflow/tensorflow/lite/testing/op_tests/embedding_lookup.py,27,function,Make a set of tests to do gather. 811,make_equal_tests,tensorflow/tensorflow/lite/testing/op_tests/equal.py,27,function,Make a set of tests to do equal. 812,make_exp_tests,tensorflow/tensorflow/lite/testing/op_tests/exp.py,27,function,Make a set of tests to do exp. 813,make_expand_dims_tests,tensorflow/tensorflow/lite/testing/op_tests/expand_dims.py,28,function,Make a set of tests to do expand_dims. 814,make_eye_tests,tensorflow/tensorflow/lite/testing/op_tests/eye.py,28,function,Make a set of tests for tf.eye op. 815,make_fill_tests,tensorflow/tensorflow/lite/testing/op_tests/fill.py,28,function,Make a set of tests to do fill. 816,make_floor_tests,tensorflow/tensorflow/lite/testing/op_tests/floor.py,27,function,Make a set of tests to do floor. 817,make_fully_connected_tests,tensorflow/tensorflow/lite/testing/op_tests/fully_connected.py,28,function,Make a set of tests to do fully_connected. 818,make_fused_batch_norm_tests,tensorflow/tensorflow/lite/testing/op_tests/fused_batch_norm.py,27,function,Make a set of tests to do fused_batch_norm. 819,make_gather_tests,tensorflow/tensorflow/lite/testing/op_tests/gather.py,27,function,Make a set of tests to do gather. 820,make_gather_nd_tests,tensorflow/tensorflow/lite/testing/op_tests/gather_nd.py,27,function,Make a set of tests to do gather_nd. 821,make_gather_with_constant_tests,tensorflow/tensorflow/lite/testing/op_tests/gather_with_constant.py,28,function,Make a set of test which feed a constant to gather toco. 822,make_global_batch_norm_tests,tensorflow/tensorflow/lite/testing/op_tests/global_batch_norm.py,27,function,Make a set of tests to do batch_norm_with_global_normalization. 823,make_greater_tests,tensorflow/tensorflow/lite/testing/op_tests/greater.py,27,function,Make a set of tests to do greater. 824,make_greater_equal_tests,tensorflow/tensorflow/lite/testing/op_tests/greater_equal.py,27,function,Make a set of tests to do greater_equal. 825,_tflite_convert_verify_num_ops,tensorflow/tensorflow/lite/testing/op_tests/hardswish.py,29,function,Verifies that the result of the conversion is a single op. 826,make_hardswish_tests,tensorflow/tensorflow/lite/testing/op_tests/hardswish.py,47,function,Make a set of tests to do hardswish. 827,make_identity_tests,tensorflow/tensorflow/lite/testing/op_tests/identity.py,29,function,Make a set of tests to do identity. 828,make_l2norm_tests,tensorflow/tensorflow/lite/testing/op_tests/l2norm.py,28,function,Make a set of tests to do l2norm. 829,make_l2norm_shared_epsilon_tests,tensorflow/tensorflow/lite/testing/op_tests/l2norm_shared_epsilon.py,28,function,Regression test for a bug (b/122651451). 830,make_leaky_relu_tests,tensorflow/tensorflow/lite/testing/op_tests/leaky_relu.py,28,function,Make a set of tests to do LeakyRelu. 831,make_less_tests,tensorflow/tensorflow/lite/testing/op_tests/less.py,27,function,Make a set of tests to do less. 832,make_less_equal_tests,tensorflow/tensorflow/lite/testing/op_tests/less_equal.py,27,function,Make a set of tests to do less_equal. 833,make_local_response_norm_tests,tensorflow/tensorflow/lite/testing/op_tests/local_response_norm.py,28,function,Make a set of tests to do local_response_norm. 834,make_log_softmax_tests,tensorflow/tensorflow/lite/testing/op_tests/log_softmax.py,27,function,Make a set of tests to do log_softmax. 835,_make_logical_tests,tensorflow/tensorflow/lite/testing/op_tests/logic.py,26,function,Make a set of tests to do logical operations. 836,make_logical_or_tests,tensorflow/tensorflow/lite/testing/op_tests/logic.py,65,function,Make a set of tests to do logical_or. 837,make_logical_and_tests,tensorflow/tensorflow/lite/testing/op_tests/logic.py,71,function,Make a set of tests to do logical_and. 838,make_logical_xor_tests,tensorflow/tensorflow/lite/testing/op_tests/logic.py,77,function,"Make a set of tests to do logical_xor, test logical_not as well." 839,make_lstm_tests,tensorflow/tensorflow/lite/testing/op_tests/lstm.py,29,function,Make a set of tests to do basic Lstm cell. 840,make_matrix_diag_tests,tensorflow/tensorflow/lite/testing/op_tests/matrix_diag.py,27,function,Make a set of tests for tf.linalg.diag op. 841,make_matrix_set_diag_tests,tensorflow/tensorflow/lite/testing/op_tests/matrix_set_diag.py,27,function,Make a set of tests for tf.linalg.set_diag op. 842,make_maximum_tests,tensorflow/tensorflow/lite/testing/op_tests/maximum.py,27,function,Make a set of tests to do maximum. 843,make_minimum_tests,tensorflow/tensorflow/lite/testing/op_tests/minimum.py,27,function,Make a set of tests to do minimum. 844,make_mirror_pad_tests,tensorflow/tensorflow/lite/testing/op_tests/mirror_pad.py,28,function,Make a set of tests to do mirror_pad. 845,make_nearest_upsample_tests,tensorflow/tensorflow/lite/testing/op_tests/nearest_upsample.py,27,function,Make a set of tests to do nearest_upsample. 846,make_neg_tests,tensorflow/tensorflow/lite/testing/op_tests/neg.py,27,function,Make a set of tests to do neg. 847,make_not_equal_tests,tensorflow/tensorflow/lite/testing/op_tests/not_equal.py,27,function,Make a set of tests to do not equal. 848,make_one_hot_tests,tensorflow/tensorflow/lite/testing/op_tests/one_hot.py,27,function,Make a set of tests to do one_hot. 849,make_pack_tests,tensorflow/tensorflow/lite/testing/op_tests/pack.py,28,function,Make a set of tests to do stack. 850,make_pad_tests,tensorflow/tensorflow/lite/testing/op_tests/pad.py,28,function,Make a set of tests to do pad. 851,make_padv2_tests,tensorflow/tensorflow/lite/testing/op_tests/padv2.py,28,function,Make a set of tests to do padv2. 852,make_placeholder_with_default_tests,tensorflow/tensorflow/lite/testing/op_tests/placeholder_with_default.py,28,function,Make a set of tests to test placeholder_with_default. 853,make_pool_tests,tensorflow/tensorflow/lite/testing/op_tests/pool.py,26,function,"Make a set of tests to do average pooling. Args: pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`. allow_fully_quantize: bool, whether fully_quantize is allowed. Returns: A function representing the true generator (after curried pool_op_in)." 854,make_l2_pool,tensorflow/tensorflow/lite/testing/op_tests/pool.py,119,function,Given an input perform a sequence of TensorFlow ops to produce l2pool. 855,make_l2_pool_tests,tensorflow/tensorflow/lite/testing/op_tests/pool.py,131,function, 856,make_avg_pool_tests,tensorflow/tensorflow/lite/testing/op_tests/pool.py,136,function, 857,make_max_pool_tests,tensorflow/tensorflow/lite/testing/op_tests/pool.py,143,function, 858,make_prelu_tests,tensorflow/tensorflow/lite/testing/op_tests/prelu.py,28,function,Make a set of tests to do PReLU. 859,make_range_tests,tensorflow/tensorflow/lite/testing/op_tests/range.py,27,function,Make a set of tests to do range. 860,make_rank_tests,tensorflow/tensorflow/lite/testing/op_tests/rank.py,27,function,Make a set of tests to do rank. 861,make_reduce_tests,tensorflow/tensorflow/lite/testing/op_tests/reduce.py,27,function,"Make a set of tests to do reduce operation. Args: reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`. min_value: min value for created tensor data. max_value: max value for created tensor data. boolean_tensor_only: If true, will only generate tensor with boolean value. allow_fully_quantize: bool, whether fully_quantize is allowed. Returns: a function representing the true generator with `reduce_op_in` curried." 862,make_mean_tests,tensorflow/tensorflow/lite/testing/op_tests/reduce.py,219,function,Make a set of tests to do mean. 863,make_sum_tests,tensorflow/tensorflow/lite/testing/op_tests/reduce.py,231,function,Make a set of tests to do sum. 864,make_reduce_prod_tests,tensorflow/tensorflow/lite/testing/op_tests/reduce.py,243,function,Make a set of tests to do prod. 865,make_reduce_max_tests,tensorflow/tensorflow/lite/testing/op_tests/reduce.py,250,function,Make a set of tests to do max. 866,make_reduce_min_tests,tensorflow/tensorflow/lite/testing/op_tests/reduce.py,258,function,Make a set of tests to do min. 867,make_reduce_any_tests,tensorflow/tensorflow/lite/testing/op_tests/reduce.py,266,function,Make a set of tests to do any. 868,make_relu_tests,tensorflow/tensorflow/lite/testing/op_tests/relu.py,28,function,Make a set of tests to do relu. 869,make_relu1_tests,tensorflow/tensorflow/lite/testing/op_tests/relu1.py,28,function,Make a set of tests to do relu1. 870,make_relu6_tests,tensorflow/tensorflow/lite/testing/op_tests/relu6.py,28,function,Make a set of tests to do relu6. 871,make_reshape_tests,tensorflow/tensorflow/lite/testing/op_tests/reshape.py,28,function,Make a set of tests to do reshape. 872,make_resize_bilinear_tests,tensorflow/tensorflow/lite/testing/op_tests/resize_bilinear.py,27,function,Make a set of tests to do resize_bilinear. 873,make_resize_nearest_neighbor_tests,tensorflow/tensorflow/lite/testing/op_tests/resize_nearest_neighbor.py,27,function,Make a set of tests to do resize_nearest_neighbor. 874,make_resolve_constant_strided_slice_tests,tensorflow/tensorflow/lite/testing/op_tests/resolve_constant_strided_slice.py,29,function,Make a set of tests to show strided_slice yields incorrect results. 875,make_reverse_sequence_tests,tensorflow/tensorflow/lite/testing/op_tests/reverse_sequence.py,27,function,Make a set of tests to do reverse_sequence. 876,make_reverse_v2_tests,tensorflow/tensorflow/lite/testing/op_tests/reverse_v2.py,27,function,Make a set of tests to do reverse_v2. 877,make_rfft2d_tests,tensorflow/tensorflow/lite/testing/op_tests/rfft2d.py,28,function,Make a set of tests to do rfft2d. 878,make_round_tests,tensorflow/tensorflow/lite/testing/op_tests/round.py,27,function,Build the round op testing graph. 879,make_scatter_nd_tests,tensorflow/tensorflow/lite/testing/op_tests/scatter_nd.py,28,function,Make a set of tests to do scatter_nd. 880,make_shape_tests,tensorflow/tensorflow/lite/testing/op_tests/shape.py,28,function,Make a set of tests to do shape. 881,make_sigmoid_tests,tensorflow/tensorflow/lite/testing/op_tests/sigmoid.py,27,function,Make a set of tests to do sigmoid. 882,make_slice_tests,tensorflow/tensorflow/lite/testing/op_tests/slice.py,29,function,Make a set of tests to do slice. 883,make_softmax_tests,tensorflow/tensorflow/lite/testing/op_tests/softmax.py,27,function,Make a set of tests to do softmax. 884,make_space_to_batch_nd_tests,tensorflow/tensorflow/lite/testing/op_tests/space_to_batch_nd.py,28,function,Make a set of tests to do space_to_batch_nd. 885,make_space_to_depth_tests,tensorflow/tensorflow/lite/testing/op_tests/space_to_depth.py,27,function,Make a set of tests to do space_to_depth. 886,make_sparse_to_dense_tests,tensorflow/tensorflow/lite/testing/op_tests/sparse_to_dense.py,29,function,Make a set of tests to do sparse to dense. 887,make_split_tests,tensorflow/tensorflow/lite/testing/op_tests/split.py,28,function,Make a set of tests to do tf.split. 888,make_splitv_tests,tensorflow/tensorflow/lite/testing/op_tests/splitv.py,28,function,Make a set of tests to do tf.split_v. 889,make_squeeze_tests,tensorflow/tensorflow/lite/testing/op_tests/squeeze.py,27,function,Make a set of tests to do squeeze. 890,make_squeeze_transpose_tests,tensorflow/tensorflow/lite/testing/op_tests/squeeze_transpose.py,27,function,Make a set of tests to do squeeze followed by transpose. 891,_make_strided_slice_tests,tensorflow/tensorflow/lite/testing/op_tests/strided_slice.py,28,function,Utility function to make strided_slice_tests based on parameters. 892,make_strided_slice_tests,tensorflow/tensorflow/lite/testing/op_tests/strided_slice.py,100,function,Make a set of tests to do strided_slice. 893,make_strided_slice_1d_exhaustive_tests,tensorflow/tensorflow/lite/testing/op_tests/strided_slice.py,208,function,Make a set of exhaustive tests for 1D strided_slice. 894,make_strided_slice_np_style_tests,tensorflow/tensorflow/lite/testing/op_tests/strided_slice_np_style.py,29,function,Make a set of tests to test strided_slice in np style. 895,make_tanh_tests,tensorflow/tensorflow/lite/testing/op_tests/tanh.py,28,function,Make a set of tests to do tanh. 896,make_tile_tests,tensorflow/tensorflow/lite/testing/op_tests/tile.py,27,function,Make a set of tests to do tile. 897,make_topk_tests,tensorflow/tensorflow/lite/testing/op_tests/topk.py,28,function,Make a set of tests to do topk. 898,make_transpose_tests,tensorflow/tensorflow/lite/testing/op_tests/transpose.py,28,function,Make a set of tests to do transpose. 899,make_transpose_conv_tests,tensorflow/tensorflow/lite/testing/op_tests/transpose_conv.py,33,function,Make a set of tests to do transpose_conv. 900,make_unfused_gru_tests,tensorflow/tensorflow/lite/testing/op_tests/unfused_gru.py,27,function,Make a set of tests for unfused gru op. 901,make_unidirectional_sequence_lstm_tests,tensorflow/tensorflow/lite/testing/op_tests/unidirectional_sequence_lstm.py,29,function,Make a set of tests to do unidirectional_sequence_lstm. 902,make_unidirectional_sequence_rnn_tests,tensorflow/tensorflow/lite/testing/op_tests/unidirectional_sequence_rnn.py,29,function,Make a set of tests to do unidirectional_sequence_rnn. 903,make_unique_tests,tensorflow/tensorflow/lite/testing/op_tests/unique.py,27,function,Make a set of tests for Unique op. 904,make_unpack_tests,tensorflow/tensorflow/lite/testing/op_tests/unpack.py,27,function,Make a set of tests to do unpack. 905,make_unroll_batch_matmul_tests,tensorflow/tensorflow/lite/testing/op_tests/unroll_batch_matmul.py,27,function,Make a set of tests to test unroll_batch_matmul. 906,make_where_tests,tensorflow/tensorflow/lite/testing/op_tests/where.py,27,function,Make a set of tests to do where. 907,make_zeros_like_tests,tensorflow/tensorflow/lite/testing/op_tests/zeros_like.py,27,function,Make a set of tests to do zeros_like. 908,html_escape,tensorflow/tensorflow/lite/toco/logging/gen_html.py,37,function, 909,get_input_type_from_signature,tensorflow/tensorflow/lite/toco/logging/gen_html.py,41,function,"Parses op_signature and returns a string denoting the input tensor type. Args: op_signature: a string specifying the signature of a particular operator. The signature of an operator contains the input tensor's shape and type, output tensor's shape and type, operator's name and its version. It has the following schema: INPUT:input_1_shape::input_1_type::input_2_shape::input_2_type::.. ::OUTPUT:output_1_shape::output_1_type::output_2_shape::output_2_type:: ..::NAME:operator_name ::VERSION:operator_version An example of an operator signature is: INPUT:[1,73,73,160]::float::[64,1,1,160]::float::[64]::float:: OUTPUT:[1,73,73,64]::float::NAME:Conv::VERSION:1 Returns: A string denoting the input tensors' type. In the form of shape/type separated by comma. For example: shape:[1,73,73,160],type:float,shape:[64,1,1,160],type:float,shape:[64], type:float" 910,get_operator_type,tensorflow/tensorflow/lite/toco/logging/gen_html.py,78,function, 911,HTMLGenerator,tensorflow/tensorflow/lite/toco/logging/gen_html.py,87,class,Utility class to generate an HTML report. 912,gen_conversion_log_html,tensorflow/tensorflow/lite/toco/logging/gen_html.py,208,function,"Generates an HTML report about the conversion process. Args: conversion_log_dir: A string specifying the file directory of the conversion logs. It's required that before calling this function, the `conversion_log_dir` already contains the following files: `toco_log_before.pb`, `toco_log_after.pb`, `toco_tf_graph.dot`, `toco_tflite_graph.dot`. quantization_enabled: A boolean, passed from the tflite converter to indicate whether post-training quantization is enabled during conversion. tflite_graph_path: A string, the filepath to the converted TFLite model. Raises: IOError: When any of the required files doesn't exist." 913,GenHtmlTest,tensorflow/tensorflow/lite/toco/logging/gen_html_test.py,32,class, 914,execute,tensorflow/tensorflow/lite/toco/python/toco_from_protos.py,32,function,Runs the converter. 915,main,tensorflow/tensorflow/lite/toco/python/toco_from_protos.py,61,function, 916,TensorName,tensorflow/tensorflow/lite/toco/python/toco_from_protos_test.py,30,function,Get the canonical (non foo:0 name). 917,TocoFromProtosTest,tensorflow/tensorflow/lite/toco/python/toco_from_protos_test.py,35,class, 918,get_image,tensorflow/tensorflow/lite/tools/convert_image_to_csv.py,41,function,"Returns an image loaded into an np.ndarray with dims [height, width, (3 or 1)]. Args: width: Width to rescale the image to. height: Height to rescale the image to. want_grayscale: Whether the result should be converted to grayscale. filepath: Path of the image file.. Returns: np.ndarray of shape (height, width, channels) where channels is 1 if want_grayscale is true, otherwise 3." 919,array_to_int_csv,tensorflow/tensorflow/lite/tools/convert_image_to_csv.py,65,function,"Converts all elements in a numerical array to a comma-separated string. Args: array_data: Numerical array to convert. Returns: String containing array values as integers, separated by commas." 920,run_main,tensorflow/tensorflow/lite/tools/convert_image_to_csv.py,79,function,Application run loop. 921,main,tensorflow/tensorflow/lite/tools/convert_image_to_csv.py,110,function, 922,ConvertImageToCsvTest,tensorflow/tensorflow/lite/tools/convert_image_to_csv_test.py,34,class, 923,convert_bytearray_to_object,tensorflow/tensorflow/lite/tools/flatbuffer_utils.py,38,function,Converts a tflite model from a bytearray to an object for parsing. 924,read_model,tensorflow/tensorflow/lite/tools/flatbuffer_utils.py,44,function,"Reads a tflite model as a python object. Args: input_tflite_file: Full path name to the input tflite file Raises: RuntimeError: If input_tflite_file path is invalid. IOError: If input_tflite_file cannot be opened. Returns: A python object corresponding to the input tflite file." 925,read_model_with_mutable_tensors,tensorflow/tensorflow/lite/tools/flatbuffer_utils.py,64,function,"Reads a tflite model as a python object with mutable tensors. Similar to read_model() with the addition that the returned object has mutable tensors (read_model() returns an object with immutable tensors). Args: input_tflite_file: Full path name to the input tflite file Raises: RuntimeError: If input_tflite_file path is invalid. IOError: If input_tflite_file cannot be opened. Returns: A mutable python object corresponding to the input tflite file." 926,convert_object_to_bytearray,tensorflow/tensorflow/lite/tools/flatbuffer_utils.py,83,function,Converts a tflite model from an object to a bytearray. 927,write_model,tensorflow/tensorflow/lite/tools/flatbuffer_utils.py,93,function,"Writes the tflite model, a python object, into the output file. Args: model_object: A tflite model as a python object output_tflite_file: Full path name to the output tflite file. Raises: IOError: If output_tflite_file path is invalid or cannot be opened." 928,strip_strings,tensorflow/tensorflow/lite/tools/flatbuffer_utils.py,108,function,"Strips all nonessential strings from the model to reduce model size. We remove the following strings: (find strings by searching "":string"" in the tensorflow lite flatbuffer schema) 1. Model description 2. SubGraph name 3. Tensor names We retain OperatorCode custom_code and Metadata name. Args: model: The model from which to remove nonessential strings." 929,randomize_weights,tensorflow/tensorflow/lite/tools/flatbuffer_utils.py,130,function,"Randomize weights in a model. Args: model: The model in which to randomize weights. random_seed: The input to the random number generator (default value is 0)." 930,WriteReadModelTest,tensorflow/tensorflow/lite/tools/flatbuffer_utils_test.py,29,class, 931,StripStringsTest,tensorflow/tensorflow/lite/tools/flatbuffer_utils_test.py,74,class, 932,RandomizeWeightsTest,tensorflow/tensorflow/lite/tools/flatbuffer_utils_test.py,119,class, 933,main,tensorflow/tensorflow/lite/tools/randomize_weights.py,34,function, 934,main,tensorflow/tensorflow/lite/tools/strip_strings.py,34,function,Application run loop. 935,build_mock_flatbuffer_model,tensorflow/tensorflow/lite/tools/test_utils.py,30,function,Creates a flatbuffer containing an example model. 936,load_model_from_flatbuffer,tensorflow/tensorflow/lite/tools/test_utils.py,211,function,Loads a model as a python object from a flatbuffer model. 937,build_mock_model,tensorflow/tensorflow/lite/tools/test_utils.py,218,function,Creates an object containing an example model. 938,TensorTypeToName,tensorflow/tensorflow/lite/tools/visualize.py,202,function,Converts a numerical enum to a readable tensor type. 939,BuiltinCodeToName,tensorflow/tensorflow/lite/tools/visualize.py,210,function,Converts a builtin op code enum to a readable name. 940,NameListToString,tensorflow/tensorflow/lite/tools/visualize.py,218,function,Converts a list of integers to the equivalent ASCII string. 941,OpCodeMapper,tensorflow/tensorflow/lite/tools/visualize.py,229,class,Maps an opcode index to an op name. 942,DataSizeMapper,tensorflow/tensorflow/lite/tools/visualize.py,245,class,"For buffers, report the number of bytes." 943,TensorMapper,tensorflow/tensorflow/lite/tools/visualize.py,255,class,Maps a list of tensor indices to a tooltip hoverable indicator of more. 944,GenerateGraph,tensorflow/tensorflow/lite/tools/visualize.py,278,function,Produces the HTML required to have a d3 visualization of the dag. 945,GenerateTableHtml,tensorflow/tensorflow/lite/tools/visualize.py,337,function,"Given a list of object values and keys to print, make an HTML table. Args: items: Items to print an array of dicts. keys_to_print: (key, display_fn). `key` is a key in the object. i.e. items[0][key] should exist. display_fn is the mapping function on display. i.e. the displayed html cell will have the string returned by `mapping_fn(items[0][key])`. display_index: add a column which is the index of each row in `items`. Returns: An html table." 946,CamelCaseToSnakeCase,tensorflow/tensorflow/lite/tools/visualize.py,375,function,Converts an identifier in CamelCase to snake_case. 947,FlatbufferToDict,tensorflow/tensorflow/lite/tools/visualize.py,381,function,"Converts a hierarchy of FB objects into a nested dict. We avoid transforming big parts of the flat buffer into python arrays. This speeds conversion from ten minutes to a few seconds on big graphs. Args: fb: a flat buffer structure. (i.e. ModelT) preserve_as_numpy: true if all downstream np.arrays should be preserved. false if all downstream np.array should become python arrays Returns: A dictionary representing the flatbuffer rather than a flatbuffer object." 948,CreateDictFromFlatbuffer,tensorflow/tensorflow/lite/tools/visualize.py,413,function, 949,CreateHtmlFile,tensorflow/tensorflow/lite/tools/visualize.py,419,function,"Given a tflite model in `tflite_input` file, produce html description." 950,main,tensorflow/tensorflow/lite/tools/visualize.py,506,function, 951,VisualizeTest,tensorflow/tensorflow/lite/tools/visualize_test.py,29,class, 952,main,tensorflow/tensorflow/lite/tools/zip_files.py,32,function, 953,_get_ground_truth_detections,tensorflow/tensorflow/lite/tools/evaluation/tasks/coco_object_detection/preprocess_coco_minival.py,44,function,"Processes the annotations JSON file and returns ground truth data corresponding to allowlisted image IDs. Args: instances_file: COCO instances JSON file, usually named as instances_val20xx.json. allowlist_file: File containing COCO minival image IDs to allowlist for evaluation, one per line. num_images: Number of allowlisted images to pre-process. First num_images are chosen based on sorted list of filenames. If None, all allowlisted files are preprocessed. Returns: A dict mapping image id (int) to a per-image dict that contains: 'filename', 'image' & 'height' mapped to filename & image dimensions respectively AND 'detections' to a list of detection dicts, with each mapping: 'category_id' to COCO category id (starting with 1) & 'bbox' to a list of dimension-normalized [top, left, bottom, right] bounding-box values." 954,_dump_data,tensorflow/tensorflow/lite/tools/evaluation/tasks/coco_object_detection/preprocess_coco_minival.py,145,function,"Dumps images & data from ground-truth objects into output_folder_path. The following are created in output_folder_path: images/: sub-folder for allowlisted validation images. ground_truth.pb: A binary proto file containing all ground-truth object-sets. Args: ground_truth_detections: A dict mapping image id to ground truth data. Output of _get_ground_truth_detections. images_folder_path: Validation images folder output_folder_path: folder to output files to." 955,_parse_args,tensorflow/tensorflow/lite/tools/evaluation/tasks/coco_object_detection/preprocess_coco_minival.py,190,function,"Creates a parser that parse the command line arguments. Returns: A namespace parsed from command line arguments." 956,_synset_to_word,tensorflow/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/generate_validation_labels.py,30,function,Returns synset to word dictionary by reading sysnset arrays. 957,_validation_file_path,tensorflow/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/generate_validation_labels.py,50,function, 958,_synset_array_path,tensorflow/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/generate_validation_labels.py,54,function, 959,_generate_validation_labels,tensorflow/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/generate_validation_labels.py,58,function, 960,_check_arguments,tensorflow/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/generate_validation_labels.py,67,function, 961,main,tensorflow/tensorflow/lite/tools/evaluation/tasks/imagenet_image_classification/generate_validation_labels.py,80,function, 962,main,tensorflow/tensorflow/lite/tools/optimize/python/modify_model_interface.py,38,function,Application run loop. 963,_parse_type_to_int,tensorflow/tensorflow/lite/tools/optimize/python/modify_model_interface_lib.py,27,function,"Converts a tflite type to it's integer representation. Args: dtype: tf.DType representing the inference type. flag: str representing the flag name. Returns: integer, a tflite TensorType enum value. Raises: ValueError: Unsupported tflite type." 964,modify_model_interface,tensorflow/tensorflow/lite/tools/optimize/python/modify_model_interface_lib.py,52,function,"Modify a quantized model's interface (input/output) from float to integer. Args: input_file: Full path name to the input tflite file. output_file: Full path name to the output tflite file. input_type: Final input interface type. output_type: Final output interface type. Raises: RuntimeError: If the modification of the model interface was unsuccessful. ValueError: If the input_type or output_type is unsupported." 965,build_tflite_model_with_full_integer_quantization,tensorflow/tensorflow/lite/tools/optimize/python/modify_model_interface_lib_test.py,31,function, 966,ModifyModelInterfaceTest,tensorflow/tensorflow/lite/tools/optimize/python/modify_model_interface_lib_test.py,56,class, 967,FormatConverterTest,tensorflow/tensorflow/lite/tools/optimize/sparsity/python/format_converter_extension_test.py,28,class, 968,get_build_cpus,tensorflow/tensorflow/lite/tools/pip_package/setup.py,72,function, 969,make_args,tensorflow/tensorflow/lite/tools/pip_package/setup.py,80,function,Construct make command line. 970,make_output,tensorflow/tensorflow/lite/tools/pip_package/setup.py,94,function,Invoke make on the target and return output. 971,make,tensorflow/tensorflow/lite/tools/pip_package/setup.py,99,function,"Invoke make to build tflite C++ sources. Build dependencies: apt-get install swig libjpeg-dev zlib1g-dev python3-dev python3-nump" 972,download_dependencies,tensorflow/tensorflow/lite/tools/pip_package/setup.py,108,function,Download build dependencies if haven't done yet. 973,CustomBuildExt,tensorflow/tensorflow/lite/tools/pip_package/setup.py,114,class,Customized build extension. 974,CustomBuildPy,tensorflow/tensorflow/lite/tools/pip_package/setup.py,130,class, 975,get_pybind_include,tensorflow/tensorflow/lite/tools/pip_package/setup.py,137,function,"pybind11 include directory is not correctly resolved. This fixes include directory to /usr/local/pythonX.X Returns: include directories to find pybind11" 976,set_signature_defs,tensorflow/tensorflow/lite/tools/signature/signature_def_utils.py,25,function,"Sets SignatureDefs to the Metadata of a TfLite flatbuffer buffer. Args: tflite_model: Binary TFLite model (bytes or bytes-like object) to which to add signature_def. signature_def_map: dict containing SignatureDefs to store in metadata. Returns: buffer: A TFLite model binary identical to model buffer with metadata field containing SignatureDef. Raises: ValueError: tflite_model buffer does not contain a valid TFLite model. signature_def_map is empty or does not contain a SignatureDef." 977,get_signature_defs,tensorflow/tensorflow/lite/tools/signature/signature_def_utils.py,51,function,"Get SignatureDef dict from the Metadata of a TfLite flatbuffer buffer. Args: tflite_model: TFLite model buffer to get the signature_def. Returns: dict containing serving names to SignatureDefs if exists, otherwise, empty dict. Raises: ValueError: tflite_model buffer does not contain a valid TFLite model. DecodeError: SignatureDef cannot be parsed from TfLite SignatureDef metadata." 978,clear_signature_defs,tensorflow/tensorflow/lite/tools/signature/signature_def_utils.py,78,function,"Clears SignatureDefs from the Metadata of a TfLite flatbuffer buffer. Args: tflite_model: TFLite model buffer to remove signature_defs. Returns: buffer: A TFLite model binary identical to model buffer with no SignatureDef metadata. Raises: ValueError: tflite_model buffer does not contain a valid TFLite model." 979,SignatureDefUtilsTest,tensorflow/tensorflow/lite/tools/signature/signature_def_utils_test.py,30,class, 980,read32,tensorflow/tensorflow/lite/tutorials/dataset.py,35,function,Read 4 bytes from bytestream as an unsigned 32-bit integer. 981,check_image_file_header,tensorflow/tensorflow/lite/tutorials/dataset.py,41,function,Validate that filename corresponds to images for the MNIST dataset. 982,check_labels_file_header,tensorflow/tensorflow/lite/tutorials/dataset.py,57,function,Validate that filename corresponds to labels for the MNIST dataset. 983,download,tensorflow/tensorflow/lite/tutorials/dataset.py,67,function,Download (and unzip) a file from the MNIST dataset if not already done. 984,dataset,tensorflow/tensorflow/lite/tutorials/dataset.py,86,function,Download and parse MNIST dataset. 985,train,tensorflow/tensorflow/lite/tutorials/dataset.py,114,function,tf.data.Dataset object for MNIST training data. 986,test,tensorflow/tensorflow/lite/tutorials/dataset.py,120,function,tf.data.Dataset object for MNIST test data. 987,test_image_generator,tensorflow/tensorflow/lite/tutorials/mnist_tflite.py,35,function, 988,run_eval,tensorflow/tensorflow/lite/tutorials/mnist_tflite.py,47,function,"Performs evaluation for input image over specified model. Args: interpreter: TFLite interpreter initialized with model to execute. input_image: Image input to the model. Returns: output: output tensor of model being executed." 989,main,tensorflow/tensorflow/lite/tutorials/mnist_tflite.py,72,function, 990,set_dlopen_flags,tensorflow/tensorflow/python/pywrap_dlopen_global_flags.py,43,function, 991,reset_dlopen_flags,tensorflow/tensorflow/python/pywrap_dlopen_global_flags.py,48,function, 992,import_graphdef,tensorflow/tensorflow/python/pywrap_mlir.py,26,function, 993,experimental_convert_saved_model_to_mlir,tensorflow/tensorflow/python/pywrap_mlir.py,32,function, 994,experimental_convert_saved_model_v1_to_mlir,tensorflow/tensorflow/python/pywrap_mlir.py,39,function, 995,experimental_run_pass_pipeline,tensorflow/tensorflow/python/pywrap_mlir.py,48,function, 996,enable,tensorflow/tensorflow/python/tf2.py,30,function, 997,disable,tensorflow/tensorflow/python/tf2.py,36,function, 998,enabled,tensorflow/tensorflow/python/tf2.py,42,function, 999,AssertTransformer,tensorflow/tensorflow/python/autograph/converters/asserts.py,27,class,Transforms Assert nodes to Call so they can be handled as functions. 1000,transform,tensorflow/tensorflow/python/autograph/converters/asserts.py,50,function, 1001,AssertsTest,tensorflow/tensorflow/python/autograph/converters/asserts_test.py,30,class, 1002,_Break,tensorflow/tensorflow/python/autograph/converters/break_statements.py,29,class, 1003,BreakTransformer,tensorflow/tensorflow/python/autograph/converters/break_statements.py,39,class,Canonicalizes break statements into additional conditionals. 1004,transform,tensorflow/tensorflow/python/autograph/converters/break_statements.py,183,function, 1005,BreakCanonicalizationTest,tensorflow/tensorflow/python/autograph/converters/break_statements_test.py,27,class, 1006,_Function,tensorflow/tensorflow/python/autograph/converters/call_trees.py,40,class, 1007,_ArgTemplateBuilder,tensorflow/tensorflow/python/autograph/converters/call_trees.py,51,class,"Constructs a tuple representing the positional arguments in a call. Example (yes, it's legal Python 3): f(*args1, b, *args2, c, d) -> args1 + (b,) + args2 + (c, d)" 1008,CallTreeTransformer,tensorflow/tensorflow/python/autograph/converters/call_trees.py,96,class,Transforms the call tree by renaming transformed symbols. 1009,transform,tensorflow/tensorflow/python/autograph/converters/call_trees.py,211,function,"Transform function call to the compiled counterparts. Args: node: AST ctx: EntityContext Returns: A tuple (node, new_names): node: The transformed AST new_names: set(string), containing any newly-generated names" 1010,MockConvertedCall,tensorflow/tensorflow/python/autograph/converters/call_trees_test.py,30,class, 1011,CallTreesTest,tensorflow/tensorflow/python/autograph/converters/call_trees_test.py,42,class, 1012,ConditionalExpressionTransformer,tensorflow/tensorflow/python/autograph/converters/conditional_expressions.py,28,class,Converts conditional expressions to functional form. 1013,transform,tensorflow/tensorflow/python/autograph/converters/conditional_expressions.py,48,function, 1014,ConditionalExpressionsTest,tensorflow/tensorflow/python/autograph/converters/conditional_expressions_test.py,26,class, 1015,_Continue,tensorflow/tensorflow/python/autograph/converters/continue_statements.py,29,class, 1016,_Block,tensorflow/tensorflow/python/autograph/converters/continue_statements.py,40,class,"Tracks information about lexical blocks as they are visited in the AST. Mainly, this object tracks the creation of block guards that replace `continue` statements (e.g. `if not continue_:`). Attributes: create_guard_current: bool, whether to create a guard for the current statement. create_guard_next: bool, whether to create a guard for the next statement. is_loop_type: bool, whether this block is the body of a loop." 1017,ContinueCanonicalizationTransformer,tensorflow/tensorflow/python/autograph/converters/continue_statements.py,60,class,Canonicalizes continue statements into additional conditionals. 1018,transform,tensorflow/tensorflow/python/autograph/converters/continue_statements.py,163,function, 1019,ContinueCanonicalizationTest,tensorflow/tensorflow/python/autograph/converters/continue_statements_test.py,27,class, 1020,_Function,tensorflow/tensorflow/python/autograph/converters/control_flow.py,41,class, 1021,ControlFlowTransformer,tensorflow/tensorflow/python/autograph/converters/control_flow.py,46,class,Transforms control flow structures like loops an conditionals. 1022,AnnotatedDef,tensorflow/tensorflow/python/autograph/converters/control_flow.py,395,class, 1023,transform,tensorflow/tensorflow/python/autograph/converters/control_flow.py,402,function, 1024,ControlFlowTransformer,tensorflow/tensorflow/python/autograph/converters/control_flow_deprecated_py2.py,44,class,Transforms control flow structures like loops an conditionals. 1025,AnnotatedDef,tensorflow/tensorflow/python/autograph/converters/control_flow_deprecated_py2.py,623,class, 1026,transform,tensorflow/tensorflow/python/autograph/converters/control_flow_deprecated_py2.py,630,function, 1027,ControlFlowTestBase,tensorflow/tensorflow/python/autograph/converters/control_flow_test.py,43,class, 1028,NestedControlFlowTest,tensorflow/tensorflow/python/autograph/converters/control_flow_test.py,59,class, 1029,WhileStatementTest,tensorflow/tensorflow/python/autograph/converters/control_flow_test.py,106,class, 1030,IfStatementTest,tensorflow/tensorflow/python/autograph/converters/control_flow_test.py,352,class, 1031,ForStatementTest,tensorflow/tensorflow/python/autograph/converters/control_flow_test.py,598,class, 1032,AdvancedControlFlowTest,tensorflow/tensorflow/python/autograph/converters/control_flow_test.py,688,class, 1033,_LoopScope,tensorflow/tensorflow/python/autograph/converters/directives.py,48,class, 1034,_map_args,tensorflow/tensorflow/python/autograph/converters/directives.py,55,function,"Maps AST call nodes to the actual function's arguments. Args: call_node: ast.Call function: Callable[..., Any], the actual function matching call_node Returns: Dict[Text, ast.AST], mapping each of the function's argument names to the respective AST node. Raises: ValueError: if the default arguments are not correctly set" 1035,DirectivesTransformer,tensorflow/tensorflow/python/autograph/converters/directives.py,90,class,Parses compiler directives and converts them into AST annotations. 1036,transform,tensorflow/tensorflow/python/autograph/converters/directives.py,180,function, 1037,DirectivesTest,tensorflow/tensorflow/python/autograph/converters/directives_test.py,28,class, 1038,_Function,tensorflow/tensorflow/python/autograph/converters/functions.py,32,class, 1039,FunctionTransformer,tensorflow/tensorflow/python/autograph/converters/functions.py,38,class,Wraps function bodies around autograph-specific boilerplate. 1040,transform,tensorflow/tensorflow/python/autograph/converters/functions.py,134,function, 1041,FunctionTransformer,tensorflow/tensorflow/python/autograph/converters/functions_test.py,31,class, 1042,ListCompTransformer,tensorflow/tensorflow/python/autograph/converters/list_comprehensions.py,42,class,Lowers list comprehensions into standard control flow. 1043,transform,tensorflow/tensorflow/python/autograph/converters/list_comprehensions.py,81,function, 1044,ListCompTest,tensorflow/tensorflow/python/autograph/converters/list_comprehensions_test.py,26,class, 1045,_Statement,tensorflow/tensorflow/python/autograph/converters/lists.py,45,class, 1046,ListTransformer,tensorflow/tensorflow/python/autograph/converters/lists.py,51,class,Converts lists and related operations to their TF counterpart. 1047,transform,tensorflow/tensorflow/python/autograph/converters/lists.py,239,function, 1048,ListTest,tensorflow/tensorflow/python/autograph/converters/lists_test.py,33,class, 1049,LogicalExpressionTransformer,tensorflow/tensorflow/python/autograph/converters/logical_expressions.py,49,class,Converts logical expressions to corresponding TF calls. 1050,transform,tensorflow/tensorflow/python/autograph/converters/logical_expressions.py,135,function, 1051,LogicalExpressionTest,tensorflow/tensorflow/python/autograph/converters/logical_expressions_test.py,28,class, 1052,_RewriteBlock,tensorflow/tensorflow/python/autograph/converters/return_statements.py,37,class, 1053,ConditionalReturnRewriter,tensorflow/tensorflow/python/autograph/converters/return_statements.py,43,class,"Rewrites a a pattern where it's unobvious that all paths return a value. This rewrite allows avoiding intermediate None return values. The following pattern: if cond: return else: is converted to: if cond: return else: and vice-versa (if the else returns, subsequent statements are moved under the if branch)." 1054,_Block,tensorflow/tensorflow/python/autograph/converters/return_statements.py,159,class, 1055,_Function,tensorflow/tensorflow/python/autograph/converters/return_statements.py,172,class, 1056,ReturnStatementsTransformer,tensorflow/tensorflow/python/autograph/converters/return_statements.py,183,class,"Lowers return statements into variables and conditionals. Specifically, the following pattern: return val is converted to: do_return = False retval = None do_return = True retval = val if not do_return: return retval The conversion adjusts loops as well: while cond: return retval is converted to: while not do_return and cond: do_return = True retval = val" 1057,transform,tensorflow/tensorflow/python/autograph/converters/return_statements.py,392,function,"Ensure a function has only a single return, at the end." 1058,SingleReturnTest,tensorflow/tensorflow/python/autograph/converters/return_statements_test.py,28,class, 1059,SliceTransformer,tensorflow/tensorflow/python/autograph/converters/slices.py,28,class,"Converts slicing operations to their TF counterpart. Currently, relying on the default slice operator that Tensor uses is insufficient, because TensorArray and tensor lists use dedicated index read and write functions." 1060,transform,tensorflow/tensorflow/python/autograph/converters/slices.py,84,function, 1061,SliceTest,tensorflow/tensorflow/python/autograph/converters/slices_test.py,31,class, 1062,VariableAccessTransformer,tensorflow/tensorflow/python/autograph/converters/variables.py,28,class,"Rewrites basic symbol reads. This transformer rewrites variable reads with a ""read"" operator which allows tracking activity. Example: For a basic statement: a = b + c This is translated to: a = ld(b) + ld(c) Augmented assignment operations also introduce a `ld` operator: a += b The assignment target also receives an operator to properly represent the read: a = ld(a) a += ld(b)" 1063,transform,tensorflow/tensorflow/python/autograph/converters/variables.py,100,function, 1064,VariablesTest,tensorflow/tensorflow/python/autograph/converters/variables_test.py,26,class, 1065,_control_ctx,tensorflow/tensorflow/python/autograph/core/ag_ctx.py,29,function, 1066,control_status_ctx,tensorflow/tensorflow/python/autograph/core/ag_ctx.py,35,function, 1067,Status,tensorflow/tensorflow/python/autograph/core/ag_ctx.py,40,class, 1068,ControlStatusCtx,tensorflow/tensorflow/python/autograph/core/ag_ctx.py,46,class,A context that tracks whether autograph is enabled by the user. 1069,NullCtx,tensorflow/tensorflow/python/autograph/core/ag_ctx.py,66,class,Helper substitute for contextlib.nullcontext. 1070,_default_control_status_ctx,tensorflow/tensorflow/python/autograph/core/ag_ctx.py,76,function, 1071,Rule,tensorflow/tensorflow/python/autograph/core/config_lib.py,27,class,Base class for conversion rules. 1072,Action,tensorflow/tensorflow/python/autograph/core/config_lib.py,38,class, 1073,DoNotConvert,tensorflow/tensorflow/python/autograph/core/config_lib.py,44,class,Indicates that this module should be not converted. 1074,Convert,tensorflow/tensorflow/python/autograph/core/config_lib.py,56,class,Indicates that this module should be converted. 1075,Feature,tensorflow/tensorflow/python/autograph/core/converter.py,83,class,"This enumeration represents optional conversion options. These conversion options are experimental. They are subject to change without notice and offer no guarantees. _Example Usage_ ```python optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS @tf.function(experimental_autograph_options=optionals) def f(i): if i == 0: # EQUALITY_OPERATORS allows the use of == here. tf.print('i is zero') ``` Attributes: ALL: Enable all features. AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code. ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert. BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to their TF counterparts. EQUALITY_OPERATORS: Whether to convert the comparison operators, like equality. This is soon to be deprecated as support is being added to the Tensor class. LISTS: Convert list idioms, like initializers, slices, append, etc. NAME_SCOPES: Insert name scopes that name ops according to context, like the function they were defined in." 1076,ConversionOptions,tensorflow/tensorflow/python/autograph/core/converter.py,138,class,"Immutable container for global conversion flags. Attributes: recursive: bool, whether to recursively convert any user functions or classes that the converted function may use. user_requested: bool, whether the conversion was explicitly requested by the user, as opposed to being performed as a result of other logic. This value always auto-resets resets to False in child conversions. optional_features: Union[Feature, Set[Feature]], controls the use of optional features in the conversion process. See Feature for available options." 1077,ProgramContext,tensorflow/tensorflow/python/autograph/core/converter.py,236,class,"ProgramContext keeps track of converting function hierarchies. Attributes: options: ConversionOptions autograph_module: Deprecated. Do not use." 1078,Base,tensorflow/tensorflow/python/autograph/core/converter.py,249,class,"All converters should inherit from this class. Attributes: ctx: EntityContext" 1079,TestConverter,tensorflow/tensorflow/python/autograph/core/converter_test.py,32,class, 1080,ConversionOptionsTest,tensorflow/tensorflow/python/autograph/core/converter_test.py,36,class, 1081,ConverterBaseTest,tensorflow/tensorflow/python/autograph/core/converter_test.py,64,class, 1082,allowlist,tensorflow/tensorflow/python/autograph/core/converter_testing.py,35,function,Helper that marks a callable as whtelitisted. 1083,is_inside_generated_code,tensorflow/tensorflow/python/autograph/core/converter_testing.py,47,function,Tests whether the caller is generated code. Implementation-specific. 1084,TestingTranspiler,tensorflow/tensorflow/python/autograph/core/converter_testing.py,66,class,Testing version that only applies given transformations. 1085,TestCase,tensorflow/tensorflow/python/autograph/core/converter_testing.py,98,class,Base class for unit tests in this module. Contains relevant utilities. 1086,FunctionScope,tensorflow/tensorflow/python/autograph/core/function_wrappers.py,33,class,"Context manager that wraps the body of a converted function. This context manager handles various operations related to the scope of a function: * optional TF name scopes - these name scopes match the name of the function, for easy visualization in tensorBoard; * optional automatic control dependencies - this adds the same mechanism for control dependencies that is used by `@tf.function`; it can be optionally enabled when using `tf.autograph.to_graph`; * tracking of autograph conversion state (whether it's enabled by the user, conversion options;" 1087,with_function_scope,tensorflow/tensorflow/python/autograph/core/function_wrappers.py,114,function,Inline version of the FunctionScope context manager. 1088,FunctionWrappersTest,tensorflow/tensorflow/python/autograph/core/function_wrappers_test.py,29,class, 1089,UnsupportedFeaturesChecker,tensorflow/tensorflow/python/autograph/core/unsupported_features_checker.py,26,class,"Quick check for Python features we know we don't support. Any features detected will cause AutoGraph to not compile a function." 1090,verify,tensorflow/tensorflow/python/autograph/core/unsupported_features_checker.py,60,function, 1091,is_autograph_strict_conversion_mode,tensorflow/tensorflow/python/autograph/impl/api.py,72,function, 1092,AutoGraphError,tensorflow/tensorflow/python/autograph/impl/api.py,82,class,Base class for all AutoGraph exceptions. 1093,ConversionError,tensorflow/tensorflow/python/autograph/impl/api.py,87,class,Raised during the conversion process. 1094,StagingError,tensorflow/tensorflow/python/autograph/impl/api.py,92,class,Raised during the staging (i.e. Python execution) of converted code. 1095,_ErrorMetadata,tensorflow/tensorflow/python/autograph/impl/api.py,97,class,AutoGraph-specific error metadata. See base class. 1096,_attach_error_metadata,tensorflow/tensorflow/python/autograph/impl/api.py,146,function,Augments an error with the metadata necessary for rewrite. 1097,StackTraceMapper,tensorflow/tensorflow/python/autograph/impl/api.py,166,class,Remaps generated code to code it originated from. 1098,PyToTF,tensorflow/tensorflow/python/autograph/impl/api.py,203,class,The TensorFlow AutoGraph transformer. 1099,_convert_actual,tensorflow/tensorflow/python/autograph/impl/api.py,275,function,Applies AutoGraph to entity. 1100,autograph_artifact,tensorflow/tensorflow/python/autograph/impl/api.py,298,function, 1101,is_autograph_artifact,tensorflow/tensorflow/python/autograph/impl/api.py,303,function, 1102,converted_call,tensorflow/tensorflow/python/autograph/impl/api.py,307,function,"Converts a function call inline. For internal use only. Note: The argument list is optimized for readability of generated code, which may look like this: ag__.converted_call(f, (arg1, arg2), None, fscope) ag__.converted_call(f, (), dict(arg1=val1, **kwargs), fscope) ag__.converted_call(f, (arg1, arg2) + varargs, dict(**kwargs), lscope) Args: f: The function to convert. args: Tuple, the original positional arguments of f kwargs: Optional[Dict], the original keyword arguments of f caller_fn_scope: Optional[function_wrappers.FunctionScope], the function scope of the converted function in which this call was originally made. options: Optional[converter.ConversionOptions], conversion options. If not specified, the value of caller_fn_scope.callopts is used. Either options or caller_fn_scope must be present. Returns: Any, the result of executing a possibly-converted `f` with the given arguments." 1103,_call_unconverted,tensorflow/tensorflow/python/autograph/impl/api.py,466,function,Calls the original function without converting with AutoGraph. 1104,_fall_back_unconverted,tensorflow/tensorflow/python/autograph/impl/api.py,479,function,"Falls back to calling the function unconverted, in case of error." 1105,tf_convert,tensorflow/tensorflow/python/autograph/impl/api.py,506,function,"Decorator that applies AutoGraph to a function. Use in internal APIs. This API is suitable for high order functions internal to the TensorFlow API, and more generally any function to which Autograph is not applied. Guidance: convert was a decorator meant for use directly by developers, and will be soon deprecated in favor of tf.function. tf_convert is to be called from high order functions internal to TF. Args: f: Callable. ctx: ag_ctx.ControlStatusCtx, the Autograph context in which `f` is used. convert_by_default: bool, whether to use AutoGraph when the context doesn't specify. user_requested: bool, whether to ignore the conversion allowlist. See ConversionOptions.user_requested. Returns: Either `f or the converted version of `f`." 1106,call_with_unspecified_conversion_status,tensorflow/tensorflow/python/autograph/impl/api.py,565,function,Decorator that resets the conversion context to the unspecified status. 1107,_log_callargs,tensorflow/tensorflow/python/autograph/impl/api.py,577,function,Logging helper. 1108,do_not_convert,tensorflow/tensorflow/python/autograph/impl/api.py,599,function,"Decorator that suppresses the conversion of a function. Args: func: function to decorate. Returns: If `func` is not None, returns a `Callable` which is equivalent to `func`, but is not converted by AutoGraph. If `func` is None, returns a decorator that, when invoked with a single `func` argument, returns a `Callable` equivalent to the above case." 1109,convert,tensorflow/tensorflow/python/autograph/impl/api.py,626,function,"Decorator that compiles a function to use TensorFlow ops. The decorator is dynamic - it recompiles the target whenever the decorated function is called. This means the parameter values are known at conversion. It also means that repeated calls with different types of parameters will be correctly processed. Args: recursive: bool, whether to recursively convert any functions or classes that the converted function may use. optional_features: converted.Feature, allows toggling optional or experimental features. When set to None, only the core features are enabled. user_requested: bool, whether this is a function that the user explicitly asked to be converted. See ConversionOptions.user_requested. conversion_ctx: Optional ag_ctx.ControlStatusCtx, the Autograph context in which `f` is used. Returns: Callable, a decorator that converts the given function into an equivalent function that uses TensorFlow ops." 1110,to_graph,tensorflow/tensorflow/python/autograph/impl/api.py,682,function,"Converts a Python entity into a TensorFlow graph. Also see: `tf.autograph.to_code`, `tf.function`. Unlike `tf.function`, `to_graph` is a low-level transpiler that converts Python code to TensorFlow graph code. It does not implement any caching, variable management or create any actual ops, and is best used where greater control over the generated TensorFlow graph is desired. Another difference from `tf.function` is that `to_graph` will not wrap the graph into a TensorFlow function or a Python callable. Internally, `tf.function` uses `to_graph`. Example usage: >>> def f(x): ... if x > 0: ... y = x * x ... else: ... y = -x ... return y ... >>> converted_f = to_graph(f) >>> x = tf.constant(2) >>> converted_f(x) # converted_foo is like a TensorFlow Op. Supported Python entities include: * functions * classes * object methods Functions are converted into new functions with converted code. Classes are converted by generating a new class whose methods use converted code. Methods are converted into unbound function that have an additional first argument called `self`. For a tutorial, see the [tf.function and AutoGraph guide](https://www.tensorflow.org/guide/function). For more detailed information, see the [AutoGraph reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md). Args: entity: Python callable or class to convert. recursive: Whether to recursively convert any functions that the converted function may call. experimental_optional_features: `None`, a tuple of, or a single `tf.autograph.experimental.Feature` value. Returns: Same as `entity`, the converted Python function or class. Raises: ValueError: If the entity could not be converted." 1111,to_graph_v1,tensorflow/tensorflow/python/autograph/impl/api.py,754,function,"Converts a Python entity into a TensorFlow graph. Also see: `tf.autograph.to_code`, `tf.function`. Unlike `tf.function`, `to_graph` is a low-level transpiler that converts Python code to TensorFlow graph code. It does not implement any caching, variable management or create any actual ops, and is best used where greater control over the generated TensorFlow graph is desired. Another difference from `tf.function` is that `to_graph` will not wrap the graph into a TensorFlow function or a Python callable. Internally, `tf.function` uses `to_graph`. _Example Usage_ ```python def foo(x): if x > 0: y = x * x else: y = -x return y converted_foo = to_graph(foo) x = tf.constant(1) y = converted_foo(x) # converted_foo is a TensorFlow Op-like. assert is_tensor(y) ``` Supported Python entities include: * functions * classes * object methods Functions are converted into new functions with converted code. Classes are converted by generating a new class whose methods use converted code. Methods are converted into unbound function that have an additional first argument called `self`. Args: entity: Python callable or class to convert. recursive: Whether to recursively convert any functions that the converted function may call. arg_values: Deprecated. arg_types: Deprecated. experimental_optional_features: `None`, a tuple of, or a single `tf.autograph.experimental.Feature` value. Returns: Same as `entity`, the converted Python function or class. Raises: ValueError: If the entity could not be converted." 1112,to_code_v1,tensorflow/tensorflow/python/autograph/impl/api.py,825,function,"Returns the source code generated by AutoGraph, as a string. Example usage: >>> def f(x): ... if x < 0: ... x = -x ... return x >>> tf.autograph.to_code(f) ""...def tf__f(x):..."" Also see: `tf.autograph.to_graph`. Note: If a function has been decorated with `tf.function`, pass its underlying Python function, rather than the callable that `tf.function creates: >>> @tf.function ... def f(x): ... if x < 0: ... x = -x ... return x >>> tf.autograph.to_code(f.python_function) ""...def tf__f(x):..."" Args: entity: Python callable or class. recursive: Whether to recursively convert any functions that the converted function may call. arg_values: Deprecated. arg_types: Deprecated. indentation: Deprecated. experimental_optional_features: `None`, a tuple of, or a single `tf.autograph.experimental.Feature` value. Returns: The converted code as string." 1113,to_code,tensorflow/tensorflow/python/autograph/impl/api.py,879,function,"Returns the source code generated by AutoGraph, as a string. Example usage: >>> def f(x): ... if x < 0: ... x = -x ... return x >>> tf.autograph.to_code(f) ""...def tf__f(x):..."" Also see: `tf.autograph.to_graph`. Note: If a function has been decorated with `tf.function`, pass its underlying Python function, rather than the callable that `tf.function creates: >>> @tf.function ... def f(x): ... if x < 0: ... x = -x ... return x >>> tf.autograph.to_code(f.python_function) ""...def tf__f(x):..."" Args: entity: Python callable or class to convert. recursive: Whether to recursively convert any functions that the converted function may call. experimental_optional_features: `None`, a tuple of, or a single `tf.autograph.experimental.Feature` value. Returns: The converted code as string." 1114,TestResource,tensorflow/tensorflow/python/autograph/impl/api_test.py,64,class, 1115,ApiTest,tensorflow/tensorflow/python/autograph/impl/api_test.py,70,class, 1116,_is_of_known_loaded_module,tensorflow/tensorflow/python/autograph/impl/conversion.py,37,function, 1117,_is_known_loaded_type,tensorflow/tensorflow/python/autograph/impl/conversion.py,46,function,Tests whether the function or method is an instance of a known type. 1118,is_unsupported,tensorflow/tensorflow/python/autograph/impl/conversion.py,73,function,Checks whether an entity is supported by AutoGraph at all. 1119,is_allowlisted,tensorflow/tensorflow/python/autograph/impl/conversion.py,116,function,"Checks whether an entity is allowed for use in graph mode. Examples of allowed entities include all members of the tensorflow package. Args: o: A Python entity. check_call_override: Reserved for internal use. When set to `False`, it disables the rule according to which classes are allowed if their __call__ method is allowed. allow_namedtuple_subclass: Reserved for internal use. When `True`, namedtuple subclasses are not allowed. Returns: Boolean" 1120,is_in_allowlist_cache,tensorflow/tensorflow/python/autograph/impl/conversion.py,221,function, 1121,cache_allowlisted,tensorflow/tensorflow/python/autograph/impl/conversion.py,229,function, 1122,ConversionTest,tensorflow/tensorflow/python/autograph/impl/conversion_test.py,39,class, 1123,set_element_type,tensorflow/tensorflow/python/autograph/lang/directives.py,33,function,"Indicates that the entity is expected hold items of specified type/shape. The staged TensorFlow ops will reflect and assert this data type. Ignored otherwise. Args: entity: The entity to annotate. dtype: TensorFlow dtype value to assert for entity. shape: Optional shape to assert for entity." 1124,set_loop_options,tensorflow/tensorflow/python/autograph/lang/directives.py,50,function,"Specifies additional arguments to be passed to the enclosing while_loop. The parameters apply to and only to the immediately enclosing loop. It only has effect if the loop is staged as a TF while_loop; otherwise the parameters have no effect. Usage: >>> @tf.function(autograph=True) ... def f(): ... n = 0 ... for i in tf.range(10): ... tf.autograph.experimental.set_loop_options(maximum_iterations=3) ... n += 1 ... return n >>> @tf.function(autograph=True) ... def f(): ... v = tf.constant((0,)) ... for i in tf.range(3): ... tf.autograph.experimental.set_loop_options( ... shape_invariants=[(v, tf.TensorShape([None]))] ... ) ... v = tf.concat((v, [i]), 0) ... return v Also see tf.while_loop. Args: parallel_iterations: The maximum number of iterations allowed to run in parallel at any given time. Note that this does not guarantee parallel execution. swap_memory: Whether to store intermediate values needed for gradients on the CPU instead of GPU. maximum_iterations: Allows limiting the total number of iterations executed by the loop. shape_invariants: Allows controlling the argument with the same name passed to tf.while_loop. Unlike tf.while_loop, this is a list of `(tensor, shape)` pairs." 1125,_validate_list_constructor,tensorflow/tensorflow/python/autograph/lang/special_functions.py,31,function,Validates the inputs of tensor_list. 1126,match_staging_level,tensorflow/tensorflow/python/autograph/lang/special_functions.py,50,function,Casts a value to be staged at the same level as another. 1127,tensor_list,tensorflow/tensorflow/python/autograph/lang/special_functions.py,57,function,"Creates an tensor list and populates it with the given elements. This function provides a more uniform access to tensor lists and tensor arrays, and allows optional initialization. Note: this function is a simplified wrapper. If you need greater control, it is recommended to use the underlying implementation directly. Args: elements: Iterable[tf.Tensor, ...], the elements to initially fill the list with element_dtype: Optional[tf.DType], data type for the elements in the list; required if the list is empty element_shape: Optional[tf.TensorShape], shape for the elements in the list; required if the list is empty use_tensor_array: bool, whether to use the more compatible but restrictive tf.TensorArray implementation Returns: Union[tf.Tensor, tf.TensorArray], the new list. Raises: ValueError: for invalid arguments" 1128,stack,tensorflow/tensorflow/python/autograph/lang/special_functions.py,92,function,"Stacks the input, if it admits the notion of stacking. For example, a list of tensors can be stacked into a larger tensor. This function is similar to tf.stack, but it accepts non-lists and lists of non-tensors as arguments. In the latter case, the function does nothing. Args: list_or_tensor: Any element_dtype: tf.DType, optional dtypedtype for the elements in the list. Required if the input is stackable, and the list is untyped. strict: bool, if True an error is raised if the input is not stackable. Otherwise the function is a no-op. Returns: Any, if the input is stackable, the result will be a tf.Tensor. Otherwise, if strict=False, the result will be list_or_tensor. Raises: ValueError: if strict=True and the input is not stackable." 1129,SpecialFunctionsTest,tensorflow/tensorflow/python/autograph/lang/special_functions_test.py,31,class, 1130,if_exp,tensorflow/tensorflow/python/autograph/operators/conditional_expressions.py,27,function, 1131,_tf_if_exp,tensorflow/tensorflow/python/autograph/operators/conditional_expressions.py,34,function,Overload of if_exp that stages a TF cond. 1132,_py_if_exp,tensorflow/tensorflow/python/autograph/operators/conditional_expressions.py,55,function, 1133,_basic_expr,tensorflow/tensorflow/python/autograph/operators/conditional_expressions_test.py,29,function, 1134,IfExpTest,tensorflow/tensorflow/python/autograph/operators/conditional_expressions_test.py,38,class, 1135,_verify_loop_init_vars,tensorflow/tensorflow/python/autograph/operators/control_flow.py,102,function,Ensures that all values in the state are defined when entering a loop. 1136,_is_subshape,tensorflow/tensorflow/python/autograph/operators/control_flow.py,117,function,Returns True if left shape is at least as specific as right shape. 1137,_verify_single_loop_var,tensorflow/tensorflow/python/autograph/operators/control_flow.py,134,function,"Verifies whether the initial, entry and exit values are consistent." 1138,_verify_tf_loop_vars,tensorflow/tensorflow/python/autograph/operators/control_flow.py,191,function,Verifies loop variables for consistency. 1139,verify_single_cond_var,tensorflow/tensorflow/python/autograph/operators/control_flow.py,233,function,Verifies whether body_var and orelse_var are consistent. 1140,_verify_tf_cond_branch_vars,tensorflow/tensorflow/python/autograph/operators/control_flow.py,263,function,Verifies variables output by a conditional branch for consistency. 1141,_verify_tf_cond_vars,tensorflow/tensorflow/python/autograph/operators/control_flow.py,276,function,Verifies variables manipulated by a conditional for consistency. 1142,for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,291,function,"Functional form of a for statement. The loop operates on a state, which includes all symbols that are variant across loop iterations, excluding the variables local to the loop. For example, given the loop below that calculates the geometric and arithmetic means or some numbers: ``` geo_mean = 1 arith_mean = 0 for i in range(n): a = numbers[i] geo_mean *= a arith_mean += a ``` The state is represented by the variables geo_mean and arith_mean. The `extra_test`, `body`, `get_state` and `set_state` functions must bind to the original `geo_mean` and `arith_mean` symbols, using `nonlocal`. The inputs and outputs of the callables representing the loop blocks are not explicit - instead, these functions must use nonlocal/global for side effects. The inputs and outputs are instead controlled by the set_state/get_state functions. Args: iter_: The entity being iterated over. extra_test: Callable with boolean return type. An additional loop condition. body: Callable representing the actual loop body. get_state: Additional callable which can capture additional state (such as the values of composite symbols). This is only useful when staging the loop. set_state: Additional callable which save values captured by get_state back into the Python environment. This is only useful when staging the loop. symbol_names: Tuple containing names of the loop variables returned by get_state. opts: Optional dict of extra loop parameters. Returns: Tuple containing the final state." 1143,_py_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,371,function,Overload of for_stmt that executes a Python for loop. 1144,_known_len_tf_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,400,function,Overload of for_stmt that iterates over TF entities that admit a length. 1145,_tf_ragged_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,447,function,Overload of for_stmt that iterates over TF ragged tensors. 1146,_tf_range_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,494,function,Overload of for_stmt that iterates over a TF range (and elides it). 1147,_tf_iterator_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,555,function,Overload of for_stmt that iterates over TF Iterators. See for_loop. 1148,_general_purpose_scan,tensorflow/tensorflow/python/autograph/operators/control_flow.py,615,function,Variant of Dataset.scan with semantics of general-purpose computation. 1149,_tf_dataset_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,629,function,Overload of _dataset_for_stmt with early stopping. See for_stmt. 1150,_tf_distributed_iterable_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,700,function,Overload of for_stmt that iterates over TF distributed datasets. 1151,while_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,727,function,"Functional form of a while statement. The loop operates on a so-called state, which includes all symbols that are variant across loop iterations. In what follows we refer to state as either a tuple of entities that represent an actual state, or a list of arguments of the corresponding types. The inputs and outputs of the callables representing the loop blocks are not explicit - instead, these functions must use nonlocal/global for side effects. The inputs and outputs are instead controlled by the set_state/get_state functions. Args: test: Callable with boolean return type. The loop condition. body: Callable representing the actual loop body. get_state: Additional callable which can capture additional state (such as the values of composite symbols). This is only useful when staging the loop. set_state: Additional callable which save values captured by get_state back into the Python environment. This is only useful when staging the loop. symbol_names: Tuple containing the names of all loop variables. opts: Optional dict of extra loop parameters. Returns: Tuple containing the final state." 1152,_PythonLoopChecker,tensorflow/tensorflow/python/autograph/operators/control_flow.py,777,class,Verifies Python loops for TF-specific limits. 1153,_py_while_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,851,function,Overload of while_stmt that executes a Python while loop. 1154,_shape_invariants_mapping_to_positional_list,tensorflow/tensorflow/python/autograph/operators/control_flow.py,872,function, 1155,_tf_while_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,882,function,Overload of while_stmt that stages a TF while_stmt. 1156,if_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,915,function,"Functional form of an if statement. The conditional operates on a state, which includes all symbols whose values are a function of the branch taken. For example, given the code below that calculates the abs function: ``` x = 1 if x > 0: x = -x ``` The state is represented by the variable `x`. The `body, `orelse` and `set_state` functions must bind to the original `x` symbol, using `nonlocal`. The inputs and outputs of the callables representing the loop blocks are not explicit - instead, these functions must use nonlocal/global for side effects. The inputs and outputs are instead controlled by the set_state/get_state functions. Args: cond: Boolean. body: Callable representing the main block of the conditional. orelse: Callable representing the else block of the conditional. get_state: Function that returns a tuple containing the values of all composite symbols modified within the conditional. This allows access to state that branches may mutate through side effects. This function is not needed and should not be called when dispatching to code matching Python's default semantics. This is useful for checkpointing to avoid unintended side-effects when staging requires evaluating all code-paths. set_state: Function to set the values of all composite symbols modified within the conditional. This is the complement to get_state, used to restore checkpointed values. The single argument a tuple containing values for each composite symbol that may be modified in a branch of the conditional. The is usually the result of a call to get_state. symbol_names: Tuple containing basic loop var names. nouts: Number of variables output by the statement. Vars which are not outputs will not be passed through staged control flow such as tf.cond. This includes variables that are defined before the conditional, but are not used after it." 1157,_tf_if_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,965,function,Overload of if_stmt that stages a TF cond. 1158,_py_if_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow.py,1011,function,Overload of if_stmt that executes a Python if statement. 1159,_disallow_undefs_into_loop,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,104,function,Ensures that all values in the state are defined when entering a loop. 1160,_is_subshape,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,120,function,Returns True if left shape is at least as specific as right shape. 1161,_verify_single_loop_var,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,137,function,"Verifies whether the initial, entry and exit values are consistent." 1162,_verify_tf_loop_vars,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,191,function,Verifies loop variables for consistency. 1163,_verify_single_cond_var,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,223,function,Verifies whether body_var and orelse_var are consistent. 1164,_verify_tf_cond_vars,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,248,function,Verifies variables manipulated by a conditional for consistency. 1165,for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,279,function,"Functional form of a for statement. The loop operates on a state, which includes all symbols that are variant across loop iterations, excluding the iterate as well as the variables local to the loop. For example, given the loop below that calculates the geometric and arithmetic means or some numbers: geo_mean = 1 arith_mean = 0 for i in range(n): a = numbers[i] geo_mean *= a arith_mean += a The state is represented by the variables geo_mean and arith_mean. The argument for initial_state may contain the tuple (1, 0), the body will include the arguments geo_mean and arith_mean and will return a tuple representing the new values for geo_mean and respectively arith_mean. Args: iter_: The entity being iterated over. extra_test: Callable with the state as arguments, and boolean return type. An additional loop condition. body: Callable with the iterate and the state as arguments, and state as return type. The actual loop body. get_state: Additional callable which can capture additional state (such as the values of composite symbols). This is only useful when staging the loop. set_state: Additional callable which save values captured by get_state back into the Python environment. This is only useful when staging the loop. init_vars: Tuple containing the initial state. basic_symbol_names: Tuple containing basic loop var names. composite_symbol_names: Tuple containing composite loop var names. opts: Optional dict of extra loop parameters. Returns: Tuple containing the final state." 1166,_py_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,364,function,Overload of for_stmt that executes a Python for loop. 1167,_known_len_tf_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,383,function,Overload of for_stmt that iterates over TF entities that admit a length. 1168,_tf_ragged_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,446,function,Overload of for_stmt that iterates over TF ragged tensors. 1169,_tf_range_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,509,function,Overload of for_stmt that iterates over a TF range (and elides it). 1170,_tf_iterator_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,572,function,Overload of for_stmt that iterates over TF Iterators. See for_loop. 1171,_tf_dataset_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,636,function,Overload of for_stmt that iterates over TF Datasets. 1172,_general_purpose_scan,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,653,function,Variant of Dataset.scan with semantics of general-purpose computation. 1173,_dataset_for_stmt_with_extra_test,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,667,function,Overload of _dataset_for_stmt with early stopping. See for_stmt. 1174,_dataset_for_stmt_no_extra_test,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,725,function,Overload of _dataset_for_stmt without early stopping. See for_stmt. 1175,_tf_distributed_dataset_for_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,794,function,Overload of for..in statement that iterates over the input. 1176,while_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,817,function,"Functional form of a while statement. The loop operates on a so-called state, which includes all symbols that are variant across loop iterations. In what follows we refer to state as either a tuple of entities that represent an actual state, or a list of arguments of the corresponding types. Args: test: Callable with the state as arguments, and boolean return type. The loop condition. body: Callable with the state as arguments, and state as return type. The actual loop body. get_state: Additional callable which can capture additional state (such as the values of composite symbols). This is only useful when staging the loop. set_state: Additional callable which save values captured by get_state back into the Python environment. This is only useful when staging the loop. init_vars: Tuple containing the initial state. basic_symbol_names: Tuple containing basic loop var names. composite_symbol_names: Tuple containing composite loop var names. opts: Optional dict of extra loop parameters. Returns: Tuple containing the final state." 1177,_shape_invariants_mapping_to_positional_list,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,873,function, 1178,_tf_while_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,883,function,Overload of while_stmt that stages a TF while_stmt. 1179,_PythonLoopChecker,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,925,class,Verifies Python loops for TF-specific limits. 1180,_py_while_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,987,function,Overload of while_stmt that executes a Python while loop. 1181,if_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,1008,function,"Functional form of an if statement. Args: cond: Boolean. body: Callable with no arguments, and outputs of the positive (if) branch as return type. orelse: Callable with no arguments, and outputs of the negative (else) branch as return type. get_state: Function that returns a tuple containing the values of all composite symbols modified within the conditional. This allows access to state that branches may mutate through side effects. This function is not needed and should not be called when dispatching to code matching Python's default semantics. This is useful for checkpointing to avoid unintended side-effects when staging requires evaluating all code-paths. set_state: Function to set the values of all composite symbols modified within the conditional. This is the complement to get_state, used to restore checkpointed values. The single argument a tuple containing values for each composite symbol that may be modified in a branch of the conditional. The is usually the result of a call to get_state. basic_symbol_names: Tuple containing basic loop var names. composite_symbol_names: Tuple containing composite loop var names. Returns: Tuple containing the statement outputs." 1182,tf_if_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,1048,function,Overload of if_stmt that stages a TF cond. 1183,_isolate_state,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,1088,function,"Wraps func to (best-effort) isolate state mutations that func may do. The simplest example of state mutation is mutation of variables (via e.g. attributes), or modification of globals. This allows us to more safely execute this function without worrying about side effects when the function wasn't normally expected to execute. For example, staging requires that the function is executed ahead of time, and we need to ensure its effects are not observed during normal execution. Args: func: () -> Any get_state: () -> Any, returns the current state set_state: (Any) -> None, resets the state to the specified values. Typically the result of an earlier call to `get_state`. Returns: Tuple[Any, Any], where the first element is the return value of `func`, and the second is the final state values." 1184,_wrap_disallow_undefs_from_cond,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,1121,function,Wraps conditional branch to disallow returning undefined symbols. 1185,_py_if_stmt,tensorflow/tensorflow/python/autograph/operators/control_flow_deprecated_py2.py,1152,function,Overload of if_stmt that executes a Python if statement. 1186,ForLoopTest,tensorflow/tensorflow/python/autograph/operators/control_flow_test.py,49,class, 1187,WhileLoopTest,tensorflow/tensorflow/python/autograph/operators/control_flow_test.py,540,class, 1188,IfStmtTest,tensorflow/tensorflow/python/autograph/operators/control_flow_test.py,775,class, 1189,new_list,tensorflow/tensorflow/python/autograph/operators/data_structures.py,36,function,"The list constructor. Args: iterable: Optional elements to fill the list with. Returns: A list-like object. The exact return value depends on the initial elements." 1190,tf_tensor_array_new,tensorflow/tensorflow/python/autograph/operators/data_structures.py,57,function,Overload of new_list that stages a Tensor list creation. 1191,tf_tensor_list_new,tensorflow/tensorflow/python/autograph/operators/data_structures.py,107,function,Overload of new_list that stages a Tensor list creation. 1192,_py_list_new,tensorflow/tensorflow/python/autograph/operators/data_structures.py,166,function,Overload of new_list that creates a Python list. 1193,list_append,tensorflow/tensorflow/python/autograph/operators/data_structures.py,171,function,"The list append function. Note: it is unspecified where list_ will be mutated or not. If list_ is a TensorFlow entity, it will not be typically mutated. If list_ is a plain list, it will be. In general, if the list is mutated then the return value should point to the original entity. Args: list_: An entity that supports append semantics. x: The element to append. Returns: Same as list_, after the append was performed. Raises: ValueError: if list_ is not of a known list-like type." 1194,_tf_tensor_list_append,tensorflow/tensorflow/python/autograph/operators/data_structures.py,202,function,Overload of list_append that stages a Tensor list write. 1195,_tf_tensorarray_append,tensorflow/tensorflow/python/autograph/operators/data_structures.py,218,function,Overload of list_append that stages a TensorArray write. 1196,_py_list_append,tensorflow/tensorflow/python/autograph/operators/data_structures.py,223,function,Overload of list_append that executes a Python list append. 1197,ListPopOpts,tensorflow/tensorflow/python/autograph/operators/data_structures.py,230,class, 1198,list_pop,tensorflow/tensorflow/python/autograph/operators/data_structures.py,235,function,"The list pop function. Note: it is unspecified where list_ will be mutated or not. If list_ is a TensorFlow entity, it will not be typically mutated. If list_ is a plain list, it will be. In general, if the list is mutated then the return value should point to the original entity. Args: list_: An entity that supports pop semantics. i: Optional index to pop from. May be None. opts: A ListPopOpts. Returns: Tuple (x, out_list_): out_list_: same as list_, after the removal was performed. x: the removed element value. Raises: ValueError: if list_ is not of a known list-like type or the operation is not supported for that type." 1199,_tf_tensor_list_pop,tensorflow/tensorflow/python/autograph/operators/data_structures.py,272,function,Overload of list_pop that stages a Tensor list pop. 1200,_py_list_pop,tensorflow/tensorflow/python/autograph/operators/data_structures.py,289,function,Overload of list_pop that executes a Python list append. 1201,ListStackOpts,tensorflow/tensorflow/python/autograph/operators/data_structures.py,299,class, 1202,list_stack,tensorflow/tensorflow/python/autograph/operators/data_structures.py,305,function,"The list stack function. This does not have a direct correspondent in Python. The closest idiom to this is tf.append or np.stack. It's different from those in the sense that it accepts a Tensor list, rather than a list of tensors. It can also accept TensorArray. When the target is anything else, the dispatcher will rely on ctx.original_call for fallback. Args: list_: An entity that supports append semantics. opts: A ListStackOpts object. Returns: The output of the stack operation, typically a Tensor." 1203,_tf_tensorarray_stack,tensorflow/tensorflow/python/autograph/operators/data_structures.py,335,function,Overload of list_stack that stages a TensorArray stack. 1204,_tf_tensor_list_stack,tensorflow/tensorflow/python/autograph/operators/data_structures.py,340,function,Overload of list_stack that stages a Tensor list write. 1205,_py_list_stack,tensorflow/tensorflow/python/autograph/operators/data_structures.py,348,function,Overload of list_stack that executes a Python list append. 1206,ListTest,tensorflow/tensorflow/python/autograph/operators/data_structures_test.py,31,class, 1207,DispatchContext,tensorflow/tensorflow/python/autograph/operators/dispatch_context.py,27,class,"Allows passing additional parameters to the specific implementations. Attributes: options: Optional dict of extra arguments that may be required by specific implementations." 1208,assert_stmt,tensorflow/tensorflow/python/autograph/operators/exceptions.py,26,function,"Functional form of an assert statement. This follows the semantics of the Python assert statement, however the concrete implementations may deviate from it. See the respective implementation for details. In general, the assert statement should not be used for control flow. Furthermore, it is encouraged that the assertion expressions should not have side effects. Args: expression1: Any expression2: Callable[[], Any], returns the expression to include in the error message when expression1 evaluates to False. When expression1 is True, the result of expression2 will not be evaluated, however, expression2 itself may be evaluated in some implementations. Returns: Any, implementation-dependent. Raises: ValueError: if any arguments are illegal." 1209,_tf_assert_stmt,tensorflow/tensorflow/python/autograph/operators/exceptions.py,62,function,"Overload of assert_stmt that stages a TF Assert. This implementation deviates from Python semantics as follows: (1) the assertion is verified regardless of the state of __debug__ (2) on assertion failure, the graph execution will fail with tensorflow.errors.ValueError, rather than AssertionError. Args: expression1: tensorflow.Tensor, must evaluate to a tf.bool scalar expression2: Callable[[], Union[tensorflow.Tensor, List[tensorflow.Tensor]]] Returns: tensorflow.Operation" 1210,_py_assert_stmt,tensorflow/tensorflow/python/autograph/operators/exceptions.py,83,function,Overload of assert_stmt that executes a Python assert statement. 1211,ExceptionsTest,tensorflow/tensorflow/python/autograph/operators/exceptions_test.py,28,class, 1212,not_,tensorflow/tensorflow/python/autograph/operators/logical.py,26,function,"Functional form of ""not""." 1213,_tf_not,tensorflow/tensorflow/python/autograph/operators/logical.py,33,function,"Implementation of the ""not_"" operator for TensorFlow." 1214,_py_not,tensorflow/tensorflow/python/autograph/operators/logical.py,38,function,"Default Python implementation of the ""not_"" operator." 1215,and_,tensorflow/tensorflow/python/autograph/operators/logical.py,43,function,"Functional form of ""and"". Uses lazy evaluation semantics." 1216,_tf_lazy_and,tensorflow/tensorflow/python/autograph/operators/logical.py,51,function,"Lazy-eval equivalent of ""and"" for Tensors." 1217,_py_lazy_and,tensorflow/tensorflow/python/autograph/operators/logical.py,57,function,"Lazy-eval equivalent of ""and"" in Python." 1218,or_,tensorflow/tensorflow/python/autograph/operators/logical.py,62,function,"Functional form of ""or"". Uses lazy evaluation semantics." 1219,_tf_lazy_or,tensorflow/tensorflow/python/autograph/operators/logical.py,70,function,"Lazy-eval equivalent of ""or"" for Tensors." 1220,_py_lazy_or,tensorflow/tensorflow/python/autograph/operators/logical.py,76,function,"Lazy-eval equivalent of ""or"" in Python." 1221,eq,tensorflow/tensorflow/python/autograph/operators/logical.py,81,function,"Functional form of ""equal""." 1222,_tf_equal,tensorflow/tensorflow/python/autograph/operators/logical.py,88,function,"Overload of ""equal"" for Tensors." 1223,_py_equal,tensorflow/tensorflow/python/autograph/operators/logical.py,93,function,"Overload of ""equal"" that falls back to Python's default implementation." 1224,not_eq,tensorflow/tensorflow/python/autograph/operators/logical.py,98,function,"Functional form of ""not-equal""." 1225,LogicalOperatorsTest,tensorflow/tensorflow/python/autograph/operators/logical_test.py,27,class, 1226,overload_of,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,65,function, 1227,_find_originating_frame,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,71,function,Locates the frame in which `caller_fn_scope` was defined. 1228,locals_in_original_context,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,92,function,Executes the locals function in the context of a specified function. 1229,globals_in_original_context,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,97,function,Executes the locals function in the context of a specified function. 1230,eval_in_original_context,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,102,function,Executes the eval function in the context of a specified function. 1231,super_in_original_context,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,117,function,"Executes the super function in the context of a specified function. See https://docs.python.org/3/library/functions.html#super for the exact details Args: f: Callable, typically the super builtin args: List[Any], the original call arguments caller_fn_scope: Optional[function_wrappers.FunctionScope], the function scope of the converted function in which this call was originally made Returns: The result of calling `f` as if it was called in the frame indicated by `caller_fn_scope`." 1232,abs_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,179,function, 1233,_tf_abs,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,187,function, 1234,_tf_dataset_abs,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,191,function, 1235,_py_abs,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,198,function, 1236,float_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,202,function, 1237,_tf_float,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,208,function, 1238,_py_float,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,215,function, 1239,int_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,219,function, 1240,_tf_int,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,225,function, 1241,_py_int,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,235,function, 1242,len_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,241,function, 1243,_tf_tensor_array_len,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,253,function, 1244,_tf_tensor_list_len,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,257,function, 1245,_tf_tensor_len,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,261,function,Overload of len_ for Tensor arguments. 1246,_tf_dataset_len,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,294,function, 1247,_py_len,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,314,function, 1248,print_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,318,function,Overload of the print builtin. 1249,_py_print,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,334,function, 1250,_tf_py_func_print,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,338,function,Overload of print_ as a py_func implementation. 1251,range_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,360,function, 1252,_tf_range,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,366,function,Overload of range_ that generates a TF range tensor. 1253,_py_range,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,383,function, 1254,enumerate_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,391,function, 1255,_tf_dataset_enumerate,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,401,function, 1256,_py_enumerate,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,405,function, 1257,zip_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,409,function, 1258,_tf_dataset_zip,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,415,function, 1259,_py_zip,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,419,function, 1260,map_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,423,function, 1261,_tf_dataset_map,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,429,function, 1262,_py_map,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,433,function, 1263,next_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,437,function, 1264,_verify_spec_compatible,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,444,function,"Verifies that a symbol has a type compatible vith a given spec. Here, compatibility is viewed in the general TensorFlow sense: that the dtypes are the same after implicit conversion, if both are tensors. This verifier ensures consistent treatment of types across AutoGraph. Args: input_name: A name to use for `input_` in error messages. spec_name: A name to use for `spec` in error messages. input_: Any, value to verify. spec: TypeSpec that `input_` must be compatible with. Raises: ValueError if the two types have been determined not to be compatible." 1265,_verify_structure_compatible,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,480,function,"Verifies that possibly-structured symbol has types compatible vith another. See _verify_spec_compatible for a more concrete meaning of ""compatible"". Unspec _verify_spec_compatible, which handles singular Tensor-spec objects, verify_structures_compatible can process structures recognized by tf.nest. Args: input_name: A name to use for `input_` in error messages. spec_name: A name to use for `spec` in error messages. input_: Any, value to verify. May, but doesn't need to, be a structure. spec: Any, value that `input_` must be compatible with. May, but doesn't need to, be a structure. Raises: ValueError if the two types have been determined not to be compatible." 1266,next_tf_iterator,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,509,function, 1267,next_py,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,521,function, 1268,filter_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,527,function, 1269,_tf_dataset_filter,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,533,function, 1270,_py_filter,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,537,function, 1271,any_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,541,function, 1272,_tf_dataset_any,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,552,function, 1273,_py_any,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,566,function, 1274,all_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,570,function, 1275,_tf_dataset_all,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,578,function, 1276,_py_all,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,592,function, 1277,sorted_,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,596,function, 1278,_tf_sorted,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,602,function,Overload of sorted_ for Tensor iterable. 1279,_py_sorted,tensorflow/tensorflow/python/autograph/operators/py_builtins.py,627,function, 1280,TestBase,tensorflow/tensorflow/python/autograph/operators/py_builtins_test.py,41,class, 1281,PyBuiltinsTest,tensorflow/tensorflow/python/autograph/operators/py_builtins_test.py,48,class, 1282,GetItemOpts,tensorflow/tensorflow/python/autograph/operators/slices.py,34,class, 1283,get_item,tensorflow/tensorflow/python/autograph/operators/slices.py,38,function,"The slice read operator (i.e. __getitem__). Note: it is unspecified whether target will be mutated or not. In general, if target is mutable (like Python lists), it will be mutated. Args: target: An entity that supports getitem semantics. i: Index to read from. opts: A GetItemOpts object. Returns: The read element. Raises: ValueError: if target is not of a supported type." 1284,_tf_tensorarray_get_item,tensorflow/tensorflow/python/autograph/operators/slices.py,70,function,Overload of get_item that stages a TensorArray read. 1285,_tf_tensor_list_get_item,tensorflow/tensorflow/python/autograph/operators/slices.py,75,function,Overload of get_item that stages a Tensor list read. 1286,_tf_tensor_get_item,tensorflow/tensorflow/python/autograph/operators/slices.py,84,function,Overload of get_item that stages a Tensor (not Tensor list) read. 1287,_tf_tensor_string_get_item,tensorflow/tensorflow/python/autograph/operators/slices.py,89,function,Overload of get_item that stages a Tensor string read. 1288,_py_get_item,tensorflow/tensorflow/python/autograph/operators/slices.py,95,function,Overload of get_item that executes a Python list modification. 1289,set_item,tensorflow/tensorflow/python/autograph/operators/slices.py,100,function,"The slice write operator (i.e. __setitem__). Note: it is unspecified whether target will be mutated or not. In general, if target is mutable (like Python lists), it will be mutated. Args: target: An entity that supports setitem semantics. i: Index to modify. x: The new element value. Returns: Same as target, after the update was performed. Raises: ValueError: if target is not of a supported type." 1290,_tf_tensorarray_set_item,tensorflow/tensorflow/python/autograph/operators/slices.py,128,function,Overload of set_item that stages a TensorArray write. 1291,_tf_tensor_list_set_item,tensorflow/tensorflow/python/autograph/operators/slices.py,133,function,Overload of set_item that stages a Tensor list update. 1292,_tf_tensor_set_item,tensorflow/tensorflow/python/autograph/operators/slices.py,138,function,Overload of set_item that stages a Tensor scatter update. 1293,_py_set_item,tensorflow/tensorflow/python/autograph/operators/slices.py,143,function,Overload of set_item that executes a Python list modification. 1294,SlicesTest,tensorflow/tensorflow/python/autograph/operators/slices_test.py,27,class, 1295,ld,tensorflow/tensorflow/python/autograph/operators/variables.py,22,function,Load variable operator. 1296,ldu,tensorflow/tensorflow/python/autograph/operators/variables.py,29,function,"Load variable operator that returns Undefined when failing to evaluate. Note: the name (""load or return undefined"") is abbreviated to minimize the amount of clutter in generated code. This variant of `ld` is useful when loading symbols that may be undefined at runtime, such as composite symbols, and whether they are defined or not cannot be determined statically. For example `d['a']` is undefined when `d` is an empty dict. Args: load_v: Lambda that executes the actual read. name: Human-readable name of the symbol being read. Returns: Either the value of the symbol, or Undefined, if the symbol is not fully defined." 1297,Undefined,tensorflow/tensorflow/python/autograph/operators/variables.py,54,class,"Represents an undefined symbol in Python. This is used to reify undefined symbols, which is required to use the functional form of loops. Example: while n > 0: n = n - 1 s = n return s # Runtime error if n == 0 This is valid Python code and will not result in an error as long as n is positive. The use of this class is to stay as close to Python semantics as possible for staged code of this nature. Converted version of the above showing the possible usage of this class: s = Undefined('s') init_state = (s,) s = while_loop(cond, body, init_state) return s # s is an instance of Undefined if the loop never runs Attributes: symbol_name: Text, identifier for the undefined symbol" 1298,UndefinedReturnValue,tensorflow/tensorflow/python/autograph/operators/variables.py,106,class,Represents a return value that is undefined. 1299,SpecialValuesTest,tensorflow/tensorflow/python/autograph/operators/variables_test.py,25,class, 1300,NoValue,tensorflow/tensorflow/python/autograph/pyct/anno.py,37,class, 1301,Basic,tensorflow/tensorflow/python/autograph/pyct/anno.py,43,class,"Container for basic annotation keys. The enum values are used strictly for documentation purposes." 1302,Static,tensorflow/tensorflow/python/autograph/pyct/anno.py,67,class,"Container for static analysis annotation keys. The enum values are used strictly for documentation purposes." 1303,keys,tensorflow/tensorflow/python/autograph/pyct/anno.py,110,function, 1304,getanno,tensorflow/tensorflow/python/autograph/pyct/anno.py,116,function, 1305,hasanno,tensorflow/tensorflow/python/autograph/pyct/anno.py,123,function, 1306,setanno,tensorflow/tensorflow/python/autograph/pyct/anno.py,127,function, 1307,delanno,tensorflow/tensorflow/python/autograph/pyct/anno.py,137,function, 1308,copyanno,tensorflow/tensorflow/python/autograph/pyct/anno.py,145,function, 1309,dup,tensorflow/tensorflow/python/autograph/pyct/anno.py,154,function,"Recursively copies annotations in an AST tree. Args: node: ast.AST copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination key. All annotations with the source key will be copied to identical annotations with the destination key. field_name: str" 1310,AnnoTest,tensorflow/tensorflow/python/autograph/pyct/anno_test.py,30,class, 1311,CleanCopier,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,30,class,NodeTransformer-like visitor that copies an AST. 1312,copy_clean,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,63,function,"Creates a deep copy of an AST. The copy will not include fields that are prefixed by '__', with the exception of user-specified annotations. Args: node: ast.AST preserve_annos: Optional[Set[Hashable]], annotation keys to include in the copy Returns: ast.AST" 1313,SymbolRenamer,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,79,class,Transformer that can rename symbols to a simple names. 1314,rename_symbols,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,130,function,Renames symbols in an AST. Requires qual_names annotations. 1315,keywords_to_dict,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,140,function,Converts a list of ast.keyword objects to a dict. 1316,PatternMatcher,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,150,class,Matches a node against a pattern represented by a node. 1317,matches,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,214,function,"Basic pattern matcher for AST. The pattern may contain wildcards represented by the symbol '_'. A node matches a pattern if for every node in the tree, either there is a node of the same type in pattern, or a Name node with id='_'. Args: node: ast.AST pattern: ast.AST Returns: bool" 1318,apply_to_single_assignments,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,236,function,"Applies a function to each individual assignment. This function can process a possibly-unpacked (e.g. a, b = c, d) assignment. It tries to break down the unpacking if possible. In effect, it has the same effect as passing the assigned values in SSA form to apply_fn. Examples: The following will result in apply_fn(a, c), apply_fn(b, d): a, b = c, d The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]): a, b = c The following will result in apply_fn(a, (b, c)): a = b, c It uses the visitor pattern to allow subclasses to process single assignments individually. Args: targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be used with the targets field of an ast.Assign node values: ast.AST apply_fn: Callable[[ast.AST, ast.AST], None], called with the respective nodes of each single assignment" 1319,parallel_walk,tensorflow/tensorflow/python/autograph/pyct/ast_util.py,283,function,"Walks two ASTs in parallel. The two trees must have identical structure. Args: node: Union[ast.AST, Iterable[ast.AST]] other: Union[ast.AST, Iterable[ast.AST]] Yields: Tuple[ast.AST, ast.AST] Raises: ValueError: if the two trees don't have identical structure." 1320,AstUtilTest,tensorflow/tensorflow/python/autograph/pyct/ast_util_test.py,35,class, 1321,_TransformedFnCache,tensorflow/tensorflow/python/autograph/pyct/cache.py,26,class,"Generic hierarchical cache for transformed functions. The keys are soft references (i.e. they are discarded when the key is destroyed) created from the source function by `_get_key`. The subkeys are strong references and can be any value. Typically they identify different kinds of transformation." 1322,CodeObjectCache,tensorflow/tensorflow/python/autograph/pyct/cache.py,63,class,"A function cache based on code objects. Code objects are good proxies for the source code of a function. This cache efficiently handles functions that share code objects, such as functions defined in a loop, bound methods, etc. The cache falls back to the function object, if it doesn't have a code object." 1323,UnboundInstanceCache,tensorflow/tensorflow/python/autograph/pyct/cache.py,81,class,"A function cache based on unbound function objects. Using the function for the cache key allows efficient handling of object methods. Unlike the _CodeObjectCache, this discriminates between different functions even if they have the same code. This is needed for decorators that may masquerade as another function." 1324,CacheTest,tensorflow/tensorflow/python/autograph/pyct/cache_test.py,25,class, 1325,Node,tensorflow/tensorflow/python/autograph/pyct/cfg.py,54,class,"A node in the CFG. Although new instances of this class are mutable, the objects that a user finds in the CFG are typically not. The nodes represent edges in the CFG graph, and maintain pointers to allow efficient walking in both forward and reverse order. The following property holds for all nodes: ""child in node.next"" iff ""node in child.prev"". Attributes: next: FrozenSet[Node, ...], the nodes that follow this node, in control flow order prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse control flow order ast_node: ast.AST, the AST node corresponding to this CFG node" 1326,Graph,tensorflow/tensorflow/python/autograph/pyct/cfg.py,95,class,"A Control Flow Graph. The CFG maintains an index to allow looking up a CFG node by the AST node to which it is associated. The index can also be enumerated in top-down, depth first order. Walking the graph in forward or reverse order is supported by double parent-child links. Note: the error nodes are not wired to their corresponding finally guards, because these are shared, and wiring them would create a reverse path from normal control flow into the error nodes, which we want to avoid. The graph also maintains edges corresponding to higher level statements like for-else loops. A node is considered successor of a statement if there is an edge from a node that is lexically a child of that statement to a node that is not. Statement predecessors are analogously defined. Attributes: entry: Node, the entry node exit: FrozenSet[Node, ...], the exit nodes error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised error (errors propagated from function calls are not accounted) index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG node stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes to their predecessor CFG nodes stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes to their successor CFG nodes" 1327,_WalkMode,tensorflow/tensorflow/python/autograph/pyct/cfg.py,145,class, 1328,GraphVisitor,tensorflow/tensorflow/python/autograph/pyct/cfg.py,152,class,"Base class for a CFG visitors. This implementation is not thread safe. The visitor has some facilities to simplify dataflow analyses. In particular, it allows revisiting the nodes at the decision of the subclass. This can be used to visit the graph until the state reaches a fixed point. For more details on dataflow analysis, see https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf Note: the literature generally suggests visiting successor nodes only when the state of the current node changed, regardless of whether that successor has ever been visited. This implementation visits every successor at least once. Attributes: graph: Graph in_: Dict[Node, Any], stores node-keyed state during a visit out: Dict[Node, Any], stores node-keyed state during a visit" 1329,GraphBuilder,tensorflow/tensorflow/python/autograph/pyct/cfg.py,252,class,"Builder that constructs a CFG from a given AST. This GraphBuilder facilitates constructing the DAG that forms the CFG when nodes are supplied in lexical order (i.e., top-down, depth first). Under these conditions, it supports building patterns found in typical structured programs. This builder ignores the flow generated by exceptions, which are assumed to always be catastrophic and present purely for diagnostic purposes (e.g. to print debug information). Statements like raise and try/catch sections are allowed and will generate control flow edges, but ordinary statements are assumed not to raise exceptions. Finally sections are also correctly interleaved between break/continue/return nodes and their subsequent statements. Important concepts: * nodes - nodes refer refer to CFG nodes; AST nodes are qualified explicitly * leaf set - since the graph is constructed gradually, a leaf set maintains the CFG nodes that will precede the node that the builder expects to receive next; when an ordinary node is added, it is connected to the existing leaves and it in turn becomes the new leaf * jump nodes - nodes that should generate edges other than what ordinary nodes would; these correspond to break, continue and return statements * sections - logical delimiters for subgraphs that require special edges; there are various types of nodes, each admitting various types of jump nodes; sections are identified by their corresponding AST node" 1330,AstToCfg,tensorflow/tensorflow/python/autograph/pyct/cfg.py,647,class,"Converts an AST to CFGs. A separate CFG will be constructed for each function." 1331,build,tensorflow/tensorflow/python/autograph/pyct/cfg.py,964,function, 1332,CountingVisitor,tensorflow/tensorflow/python/autograph/pyct/cfg_test.py,28,class, 1333,GraphVisitorTest,tensorflow/tensorflow/python/autograph/pyct/cfg_test.py,42,class, 1334,AstToCfgTest,tensorflow/tensorflow/python/autograph/pyct/cfg_test.py,93,class, 1335,FrameInfo,tensorflow/tensorflow/python/autograph/pyct/error_utils.py,26,class, 1336,_stack_trace_inside_mapped_code,tensorflow/tensorflow/python/autograph/pyct/error_utils.py,34,function,"Summarizes inner traceback frames up to the call to a given function. This functions locates the innermost (i.e. most recent) frame that corresponds to code that can be mapped by source_map originated from, and returns a translated stack trace ending at that frame. If no such frame is found, the entire stack trace is summarized. For example, the following code: def f(): for i in tf.range(1): z = y + i # z only defined here Would generate this traceback: ag__.for_stmt(...) return _known_len_tf_for_stmt(iter_, extra_test, body, init_state) <_known_len_tf_for_stmt> _disallow_undefs_into_loop(*init_state) <_disallow_undefs_into_loop> raise ... Which is then processed into: for i in tf.range(1): return _known_len_tf_for_stmt(iter_, extra_test, body, init_state) <_known_len_tf_for_stmt> _disallow_undefs_into_loop(*init_state) <_disallow_undefs_into_loop> raise ... Args: tb: traceback.FrameSummary, The traceback corresponding to an error. Typically, the output of traceback.Summary.extract(capture_locals=True). source_map: Dict[LineLocation, OriginInfo], a source map as created by origin_info.create_source_map. converter_filename: str, the file path of the converted module. Call frames corresponding to this module are elided and their preceding frames are marked as allowlisted. Note that frames enclosing converted code are dropped using a different mechanism. Returns: List[FrameInfo]" 1337,MultilineMessageKeyError,tensorflow/tensorflow/python/autograph/pyct/error_utils.py,141,class, 1338,ErrorMetadataBase,tensorflow/tensorflow/python/autograph/pyct/error_utils.py,153,class,"Container objects attached to exceptions raised in user code. This metadata allows re-raising exceptions that occur in generated code, with a custom error message that includes a stack trace relative to user-readable code from which the generated code originated." 1339,ErrorMetadataBaseTest,tensorflow/tensorflow/python/autograph/pyct/error_utils_test.py,28,class, 1340,PyCTError,tensorflow/tensorflow/python/autograph/pyct/errors.py,22,class,Base class for all exceptions. 1341,UnsupportedLanguageElementError,tensorflow/tensorflow/python/autograph/pyct/errors.py,27,class,Raised for code patterns that AutoGraph does not support. 1342,_is_constant_gast_2,tensorflow/tensorflow/python/autograph/pyct/gast_util.py,31,function, 1343,_is_constant_gast_3,tensorflow/tensorflow/python/autograph/pyct/gast_util.py,36,function, 1344,is_literal,tensorflow/tensorflow/python/autograph/pyct/gast_util.py,40,function,Tests whether node represents a Python literal. 1345,_is_ellipsis_gast_2,tensorflow/tensorflow/python/autograph/pyct/gast_util.py,53,function, 1346,_is_ellipsis_gast_3,tensorflow/tensorflow/python/autograph/pyct/gast_util.py,57,function, 1347,islambda,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,60,function, 1348,isnamedtuple,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,68,function,Returns True if the argument is a namedtuple-like. 1349,isbuiltin,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,82,function,Returns True if the argument is a built-in function. 1350,isconstructor,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,96,function,"Returns True if the argument is an object constructor. In general, any object of type class is a constructor, with the exception of classes created using a callable metaclass. See below for why a callable metaclass is not a trivial combination: https://docs.python.org/2.7/reference/datamodel.html#customizing-class-creation Args: cls: Any Returns: Bool" 1351,_fix_linecache_record,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,116,function,"Fixes potential corruption of linecache in the presence of functools.wraps. functools.wraps modifies the target object's __module__ field, which seems to confuse linecache in special instances, for example when the source is loaded from a .par file (see https://google.github.io/subpar/subpar.html). This function simply triggers a call to linecache.updatecache when a mismatch was detected between the object's __module__ property and the object's source file. Args: obj: Any" 1352,getimmediatesource,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,143,function,A variant of inspect.getsource that ignores the __wrapped__ property. 1353,getnamespace,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,151,function,"Returns the complete namespace of a function. Namespace is defined here as the mapping of all non-local variables to values. This includes the globals and the closure variables. Note that this captures the entire globals collection of the function, and may contain extra symbols that it does not actually use. Args: f: User defined function. Returns: A dict mapping symbol names to values." 1354,getqualifiedname,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,177,function,"Returns the name by which a value can be referred to in a given namespace. If the object defines a parent module, the function attempts to use it to locate the object. This function will recurse inside modules, but it will not search objects for attributes. The recursion depth is controlled by max_depth. Args: namespace: Dict[str, Any], the namespace to search into. object_: Any, the value to search. max_depth: Optional[int], a limit to the recursion depth when searching inside modules. visited: Optional[Set[int]], ID of modules to avoid visiting. Returns: Union[str, None], the fully-qualified name that resolves to the value o, or None if it couldn't be found." 1355,_get_unbound_function,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,240,function, 1356,getdefiningclass,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,250,function,Resolves the class (e.g. one of the superclasses) that defined a method. 1357,getmethodclass,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,265,function,"Resolves a function's owner, e.g. a method's class. Note that this returns the object that the function was retrieved from, not necessarily the class where it was defined. This function relies on Python stack frame support in the interpreter, and has the same limitations that inspect.currentframe. Limitations. This function will only work correctly if the owned class is visible in the caller's global or local variables. Args: m: A user defined function Returns: The class that this function was retrieved from, or None if the function is not an object or class method, or the class that owns the object or method is not visible to m. Raises: ValueError: if the class could not be resolved for any unexpected reason." 1358,getfutureimports,tensorflow/tensorflow/python/autograph/pyct/inspect_utils.py,339,function,"Detects what future imports are necessary to safely execute entity source. Args: entity: Any object Returns: A tuple of future strings" 1359,decorator,tensorflow/tensorflow/python/autograph/pyct/inspect_utils_test.py,37,function, 1360,function_decorator,tensorflow/tensorflow/python/autograph/pyct/inspect_utils_test.py,41,function, 1361,wrapping_decorator,tensorflow/tensorflow/python/autograph/pyct/inspect_utils_test.py,47,function, 1362,TestClass,tensorflow/tensorflow/python/autograph/pyct/inspect_utils_test.py,59,class, 1363,free_function,tensorflow/tensorflow/python/autograph/pyct/inspect_utils_test.py,85,function, 1364,factory,tensorflow/tensorflow/python/autograph/pyct/inspect_utils_test.py,89,function, 1365,free_factory,tensorflow/tensorflow/python/autograph/pyct/inspect_utils_test.py,93,function, 1366,InspectUtilsTest,tensorflow/tensorflow/python/autograph/pyct/inspect_utils_test.py,99,class, 1367,_remove_file,tensorflow/tensorflow/python/autograph/pyct/loader.py,37,function,"Remove a file, if it exists." 1368,load_source,tensorflow/tensorflow/python/autograph/pyct/loader.py,50,function,Loads the given source code as a Python module. 1369,load_ast,tensorflow/tensorflow/python/autograph/pyct/loader.py,70,function,"Loads the given AST as a Python module. Compiling the AST code this way ensures that the source code is readable by e.g. `pdb` or `inspect`. Args: nodes: Union[ast.AST, Iterable[ast.AST]], the code to compile, as an AST object. indentation: Text, the string to use for indentation. include_source_map: bool, whether return a source map. delete_on_exit: bool, whether to delete the temporary file used for compilation on exit. Returns: Tuple[module, Text, Dict[LineLocation, OriginInfo]], containing: the module containing the unparsed nodes, the source code corresponding to nodes, and the source map. Is include_source_map is False, the source map will be None." 1370,load_source,tensorflow/tensorflow/python/autograph/pyct/loader_deprecated_py2.py,40,function,Loads the given source code as a Python module. 1371,load_ast,tensorflow/tensorflow/python/autograph/pyct/loader_deprecated_py2.py,58,function,"Loads the given AST as a Python module. Compiling the AST code this way ensures that the source code is readable by e.g. `pdb` or `inspect`. Args: nodes: Union[ast.AST, Iterable[ast.AST]], the code to compile, as an AST object. indentation: Text, the string to use for indentation. include_source_map: bool, whether return a source map. delete_on_exit: bool, whether to delete the temporary file used for compilation on exit. Returns: Tuple[module, Text, Dict[LineLocation, OriginInfo]], containing: the module containing the unparsed nodes, the source code corresponding to nodes, and the source map. Is include_source_map is False, the source map will be None." 1372,LoaderTest,tensorflow/tensorflow/python/autograph/pyct/loader_test.py,33,class, 1373,Namer,tensorflow/tensorflow/python/autograph/pyct/naming.py,24,class,Symbol name generator. 1374,NamerTest,tensorflow/tensorflow/python/autograph/pyct/naming_test.py,25,class, 1375,LineLocation,tensorflow/tensorflow/python/autograph/pyct/origin_info.py,35,class,"Similar to Location, but without column information. Attributes: filename: Text lineno: int, 1-based" 1376,Location,tensorflow/tensorflow/python/autograph/pyct/origin_info.py,46,class,"Encodes code location information. Attributes: filename: Text lineno: int, 1-based col_offset: int line_loc: LineLocation" 1377,OriginInfo,tensorflow/tensorflow/python/autograph/pyct/origin_info.py,62,class,"Container for information about the source code before conversion. Attributes: loc: Location function_name: Optional[Text] source_code_line: Text comment: Optional[Text]" 1378,create_source_map,tensorflow/tensorflow/python/autograph/pyct/origin_info.py,89,function,"Creates a source map between an annotated AST and the code it compiles to. Note: this function assumes nodes nodes, code and filepath correspond to the same code. Args: nodes: Iterable[ast.AST, ...], one or more AST modes. code: Text, the source code in which nodes are found. filepath: Text Returns: Dict[LineLocation, OriginInfo], mapping locations in code to locations indicated by origin annotations in node." 1379,_Function,tensorflow/tensorflow/python/autograph/pyct/origin_info.py,160,class, 1380,OriginResolver,tensorflow/tensorflow/python/autograph/pyct/origin_info.py,166,class,Annotates an AST with additional source information like file name. 1381,resolve,tensorflow/tensorflow/python/autograph/pyct/origin_info.py,226,function,"Adds origin information to an AST, based on the source it was loaded from. This allows us to map the original source code line numbers to generated source code. Note: the AST may be a part of a larger context (e.g. a function is part of a module that may contain other things). However, this function does not assume the source argument contains the entire context, nor that it contains only code corresponding to node itself. However, it assumes that node was parsed from the given source code. For this reason, two extra arguments are required, and they indicate the location of the node in the original context. Args: node: gast.AST, the AST to annotate. source: Text, the source code representing node. context_filepath: Text context_lineno: int context_col_offset: int" 1382,resolve_entity,tensorflow/tensorflow/python/autograph/pyct/origin_info.py,271,function,"Like resolve, but extracts the context information from an entity." 1383,OriginInfoTest,tensorflow/tensorflow/python/autograph/pyct/origin_info_test.py,34,class, 1384,_unfold_continuations,tensorflow/tensorflow/python/autograph/pyct/parser.py,60,function,Removes any backslash line continuations from the code. 1385,dedent_block,tensorflow/tensorflow/python/autograph/pyct/parser.py,65,function,Dedents a code so that its first line starts at row zero. 1386,parse_entity,tensorflow/tensorflow/python/autograph/pyct/parser.py,133,function,"Returns the AST and source code of given entity. Args: entity: Any, Python function/method/class future_features: Iterable[Text], future features to use (e.g. 'print_statement'). See https://docs.python.org/2/reference/simple_stmts.html#future Returns: gast.AST, Text: the parsed AST node; the source code that was parsed to generate the AST (including any prefixes that this function may have added)." 1387,_without_context,tensorflow/tensorflow/python/autograph/pyct/parser.py,169,function,Returns a clean node and source code without indenting and context. 1388,_arg_name,tensorflow/tensorflow/python/autograph/pyct/parser.py,203,function, 1389,_node_matches_argspec,tensorflow/tensorflow/python/autograph/pyct/parser.py,212,function,Returns True is node fits the argspec of func. 1390,_parse_lambda,tensorflow/tensorflow/python/autograph/pyct/parser.py,234,function,"Returns the AST and source code of given lambda function. Args: lam: types.LambdaType, Python function/method/class Returns: gast.AST, Text: the parsed AST node; the source code that was parsed to generate the AST (including any prefixes that this function may have added)." 1391,parse,tensorflow/tensorflow/python/autograph/pyct/parser.py,323,function,"Returns the AST of given piece of code. Args: src: Text preamble_len: Int, indicates leading nodes in the parsed AST which should be dropped. single_node: Bool, whether `src` is assumed to be represented by exactly one AST node. Returns: ast.AST" 1392,parse_expression,tensorflow/tensorflow/python/autograph/pyct/parser.py,347,function,"Returns the AST of given identifier. Args: src: A piece of code that represents a single Python expression Returns: A gast.AST object. Raises: ValueError: if src does not consist of a single Expression." 1393,unparse,tensorflow/tensorflow/python/autograph/pyct/parser.py,366,function,"Returns the source code of given AST. Args: node: The code to compile, as an AST object. indentation: Unused, deprecated. The returning code will always be indented at 4 spaces. include_encoding_marker: Bool, thether to include a comment on the first line to explicitly specify UTF-8 encoding. Returns: code: The source code generated from the AST object source_mapping: A mapping between the user and AutoGraph generated code." 1394,ParserTest,tensorflow/tensorflow/python/autograph/pyct/parser_test.py,31,class, 1395,PrettyPrinter,tensorflow/tensorflow/python/autograph/pyct/pretty_printer.py,26,class,Print AST nodes. 1396,fmt,tensorflow/tensorflow/python/autograph/pyct/pretty_printer.py,128,function, 1397,PrettyPrinterTest,tensorflow/tensorflow/python/autograph/pyct/pretty_printer_test.py,28,class, 1398,CallerMustSetThis,tensorflow/tensorflow/python/autograph/pyct/qual_names.py,36,class, 1399,Symbol,tensorflow/tensorflow/python/autograph/pyct/qual_names.py,40,class,Represents a Python symbol. 1400,Literal,tensorflow/tensorflow/python/autograph/pyct/qual_names.py,44,class,Represents a Python numeric literal. 1401,QN,tensorflow/tensorflow/python/autograph/pyct/qual_names.py,57,class,Represents a qualified name. 1402,QnResolver,tensorflow/tensorflow/python/autograph/pyct/qual_names.py,210,class,"Annotates nodes with QN information. Note: Not using NodeAnnos to avoid circular dependencies." 1403,resolve,tensorflow/tensorflow/python/autograph/pyct/qual_names.py,251,function, 1404,from_str,tensorflow/tensorflow/python/autograph/pyct/qual_names.py,255,function, 1405,QNTest,tensorflow/tensorflow/python/autograph/pyct/qual_names_test.py,31,class, 1406,QNResolverTest,tensorflow/tensorflow/python/autograph/pyct/qual_names_test.py,183,class, 1407,ContextAdjuster,tensorflow/tensorflow/python/autograph/pyct/templates.py,35,class,"Adjusts the ctx field of nodes to ensure consistency. This transformer can change the ctx fields of a variable, tuple and other AST elements that allow one, based on whether the element is being read or written." 1408,ReplaceTransformer,tensorflow/tensorflow/python/autograph/pyct/templates.py,108,class,Replace AST nodes. 1409,_convert_to_ast,tensorflow/tensorflow/python/autograph/pyct/templates.py,218,function,Converts from a known data type to AST. 1410,replace,tensorflow/tensorflow/python/autograph/pyct/templates.py,234,function,"Replaces placeholders in a Python template. AST Name and Tuple nodes always receive the context that inferred from the template. However, when replacing more complex nodes (that can potentially contain Name children), then the caller is responsible for setting the appropriate context. Args: template: A string representing Python code. Any symbol name can be used that appears in the template code can be used as placeholder. **replacements: A mapping from placeholder names to (lists of) AST nodes that these placeholders will be replaced by. String values are also supported as a shorthand for AST Name nodes with the respective ID. Returns: An AST node or list of AST nodes with the replacements made. If the template was a function, a list will be returned. If the template was a node, the same node will be returned. If the template was a string, an AST node will be returned (a `Module` node in the case of a multi-line string, an `Expr` node otherwise). Raises: ValueError: if the arguments are incorrect." 1411,replace_as_expression,tensorflow/tensorflow/python/autograph/pyct/templates.py,279,function,"Variant of replace that generates expressions, instead of code blocks." 1412,_CtxClearer,tensorflow/tensorflow/python/autograph/pyct/templates_test.py,33,class, 1413,_parse_with_unset_ctx,tensorflow/tensorflow/python/autograph/pyct/templates_test.py,42,function, 1414,_CtxChecker,tensorflow/tensorflow/python/autograph/pyct/templates_test.py,48,class, 1415,TemplatesTest,tensorflow/tensorflow/python/autograph/pyct/templates_test.py,64,class, 1416,AnalysisLevel,tensorflow/tensorflow/python/autograph/pyct/transformer.py,32,class, 1417,Context,tensorflow/tensorflow/python/autograph/pyct/transformer.py,41,class,"Contains information about a source code transformation. This object is mutable, and is updated during conversion. Not thread safe. Attributes: info: EntityInfo, immutable. namer: naming.Namer. current_origin: origin_info.OriginInfo, holds the OriginInfo of the last AST node to be processed successfully. Useful for error handling. user: An user-supplied context object. The object is opaque to the infrastructure, but will pe passed through to all custom transformations." 1418,EntityInfo,tensorflow/tensorflow/python/autograph/pyct/transformer.py,63,class,"Contains information about a Python entity. Immutable. Examples of entities include functions and classes. Attributes: name: The name that identifies this entity. source_code: The entity's source code. source_file: The entity's source file. future_features: Tuple[Text], the future features that this entity was compiled with. See https://docs.python.org/2/reference/simple_stmts.html#future. namespace: Dict[str, ], containing symbols visible to the entity (excluding parameters)." 1419,_StateStack,tensorflow/tensorflow/python/autograph/pyct/transformer.py,87,class,"Templated context manager. This class provides syntactic sugar for a stack of objects of known type. It allows accessing attributes of the object at the top of the stack directly against this object, which allows for very terse syntax. For example, this code: stack = _StateStack(Foo) stack.enter() stack.bar Is equivalent to: stack = [] stack.append(Foo()) foo = stack[-1] foo.bar See _State for more on how this is used. Attributes: type: Any, the type of objects that this stack holds level: int, the current stack depth stack: List[Any], the actual stack value: Any, the instance of the object at the top of the stack" 1420,_State,tensorflow/tensorflow/python/autograph/pyct/transformer.py,159,class,"Syntactic sugar for accessing an instance of a StateStack context manager. This structure offers syntactic sugar over a dict of stacks of objects of known type. These structures are useful to keep state during AST walks. Multiple different scopes can be tracked in parallel. For example: s = _State() s[foo].enter() s[bar].enter() # this will not affect s[foo] Element access has special semantics: * keys are a data type * element values are _StateStack(type=key) objects * missing elements are automatically added, similarly to defaultdict For example, the following block : _State s s[Foo] Is equivalent to: s = {} if Foo not in s: s[Foo] = Foo() s[Foo] See Base for how it's used." 1421,NodeStateTracker,tensorflow/tensorflow/python/autograph/pyct/transformer.py,200,class,"Base class for general-purpose Python code transformation. This abstract class provides helpful functions, like state tracking within the scope of arbitrary node, helpers for processing code blocks, debugging, mapping of transformed code to original code, and others. Scope-local state tracking: to keep state across nodes, at the level of (possibly nested) scopes, use enter/exit_local_scope and set/get_local. You must call enter/exit_local_scope manually, but the transformer detects when they are not properly paired. The transformer allows keeping state across calls that is local to arbitrary nodes and their descendants, using the self.state attribute. Multiple independent scopes are allowed and automatically constructed. For example, to keep track of the `If` node that encloses any `Name` node, one can write: ``` class FooType(object): def __init__(self): self.foo_property = None class DummyTransformer(NodeStateTracker, ast.NodeTransformer): def visit_If(self, node): self.state[FooType].enter() self.state[FooType].foo_property = node node = self.veneric_visit(node) self.state[FooType].exit() return node def visit_Name(self, node): self.state[FooType].foo_property # will hold the innermost enclosing if ``` Alternatively, the `enter()`/`exit()` calls can be managed by a `with` statement: ``` def visit_If(self, node): with self.state[FooType] as foo: foo.foo_property = node return self.generic_visit(node) ```" 1422,Base,tensorflow/tensorflow/python/autograph/pyct/transformer.py,360,class,"Base class for general-purpose Python-to-Python code transformation. This is an extension of ast.NodeTransformer that provides the additional functions offered by NodeStateTracker." 1423,CodeGenerator,tensorflow/tensorflow/python/autograph/pyct/transformer.py,478,class,"Base class for general-purpose Python-to-string code transformation. Similar to Base, but outputs arbitrary strings instead of a Python AST. This uses the same visitor mechanism that the standard NodeVisitor uses, meaning that subclasses write handlers for the different kinds of nodes. New code is generated using the emit method, which appends to a code buffer that can be afterwards obtained from code_buffer. Example: class SimpleCodeGen(CodeGenerator): def visitIf(self, node): self.emit('if ') self.visit(node.test) self.emit(' { ') self.visit(node.body) self.emit(' } else { ') self.visit(node.orelse) self.emit(' } ') node = ast.parse(...) gen = SimpleCodeGen() gen.visit(node) # gen.code_buffer contains the resulting code" 1424,TransformerTest,tensorflow/tensorflow/python/autograph/pyct/transformer_test.py,30,class, 1425,CodeGeneratorTest,tensorflow/tensorflow/python/autograph/pyct/transformer_test.py,302,class, 1426,_wrap_into_factory,tensorflow/tensorflow/python/autograph/pyct/transpiler.py,38,function,"Wraps an AST into the body of a factory with consistent lexical context. The AST is expected to define some symbol with a name given by `entity_name`. This mechanism ensures that the resulting transformed entity has lexical scoping identical to that of the source entity, while allowing extra parametrization. Two nested factories achieve the following: 1. The inner factory dynamically creates the entity represented by `nodes`. 2. The inner factory is parametrized by a custom set of arguments. 3. The inner factory has a closure identical to that of the transformed entity. 4. The inner factory has local variables named like `args`, which `nodes` may use as additional parameters. 5. The inner factory returns the variables given by `entity_name`. 6. The outer factory is niladic. 7. The outer factory has no closure. 8. The outer factory creates the necessary lexical scope for the inner factory, so that the loaded code has the given configuration for closure/globals. 9. The outer factory returns the inner factory. Roughly speaking, the following code is generated: from __future__ import future_feature_1 from __future__ import future_feature_2 ... def outer_factory(): closure_var_1 = None closure_var_2 = None ... def inner_factory(arg_1, arg_2, ...): <> return entity return inner_factory The lexical scoping is created using dummy symbol declarations which create local fariables in the body of the outer factory, so that the Python parser correctly marks them as free non-global variables upon load (that is, it creates cell slots for each symbol. Thes symbols are initialized with None, but their values are not expected to be used; instead, the caller is expected to replace them with the cells of the source entity. For more details, see: https://docs.python.org/3/reference/executionmodel.html#binding-of-names Args: nodes: Tuple[ast.AST], the source code to wrap. entity_name: Union[Text, ast.AST], the name of the principal entity that `nodes` define. inner_factory_name: Text, the name of the inner factory. outer_factory_name: Text, the name of the outer factory. closure_vars: Iterable[Text], names of the closure variables for the inner factory. factory_args: Iterable[Text], names of additional arguments for the inner factory. Useful to configure variables that the converted code can use. Typically, these are modules. future_features: Iterable[Text], names of future statements to associate the code with. Returns: ast.AST" 1427,_PythonFnFactory,tensorflow/tensorflow/python/autograph/pyct/transpiler.py,147,class,Helper object that wraps a Python function factory. 1428,GenericTranspiler,tensorflow/tensorflow/python/autograph/pyct/transpiler.py,227,class,"A generic transpiler for Python functions. Its interface is the `transform` API, which can process Python function objects. Internally, it handles parsing. Users typically subclass this, customizing the `transform_ast` method. The output of transformed_ast is returned directly by `transform`. Existing methods like `transform_function` may also be overloaded. Example: class MyTransformer(GenericTranspiler): def transform_ast(self, node, ctx): result = <> return result transformer = MyTransfomer() result = transformer.transform(f, ...) # result is the output" 1429,PyToPy,tensorflow/tensorflow/python/autograph/pyct/transpiler.py,368,class,"A generic Python-to-Python transpiler. Its `transform` method offers a function-in, function-out interface. Internally, it takes care of parsing, caching and loading of the translated code. Users typically subclass this, overriding `transform_ast`. Usually, instances of this class are singletons, since each instance manages its own cache. The caching can be controlled by overriding `get_caching_key`. Example: class MyTransformer(PyToPy): def transform_ast(self, node, ctx): node = <> return node transformer = MyTransfomer() new_f, module, source_map = transformer.transform_function(f, ...) # new_f is a function with signature identical to f The transformed function has access to the same namespace as the original function. To allow access to internal APIs, users may inject additional symbols by overriding `get_extra_locals`." 1430,FlipSignTransformer,tensorflow/tensorflow/python/autograph/pyct/transpiler_test.py,30,class, 1431,TestTranspiler,tensorflow/tensorflow/python/autograph/pyct/transpiler_test.py,38,class, 1432,PyToPyTest,tensorflow/tensorflow/python/autograph/pyct/transpiler_test.py,55,class, 1433,DummyGensym,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf.py,40,class,A dumb gensym that suffixes a stem by sequential numbers from 1000. 1434,ASTEdgePattern,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf.py,60,class,"A pattern defining a type of AST edge. This consists of three components: - The type of the parent node, checked with isinstance, - The name of the field, checked with string equality, and - The type of the child node, also checked with isinstance. If all three match, the whole pattern is considered to match. In all three slots, the special value `anf.ANY` is treated as ""match anything"". The internal nodes are produced from the `gast` library rather than the standard `ast` module, which may affect `isinstance` checks." 1435,AnfTransformer,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf.py,89,class,Performs the conversion to A-normal form (ANF). 1436,_is_py2_name_constant,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf.py,483,function, 1437,_is_trivial,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf.py,487,function,"Returns whether to consider the given node 'trivial'. The definition of 'trivial' is a node that can't meaningfully be pulled out into its own assignment statement. This is surprisingly difficult to do robustly across versions of Python and gast, as the parsing of constants has changed, if I may, constantly. Args: node: An AST node to check for triviality Returns: trivial: A Python `bool` indicating whether the node is trivial." 1438,transform,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf.py,527,function,"Converts the given node to A-normal form (ANF). The general idea of A-normal form: https://en.wikipedia.org/wiki/A-normal_form The specific converters used here are based on Python AST semantics as documented at https://greentreesnakes.readthedocs.io/en/latest/. What exactly should be considered A-normal form for any given programming language is not completely obvious. The transformation defined here is therefore configurable as to which syntax to replace with a fresh variable and which to leave be. The configuration is intentionally flexible enough to define very precise variable insertion transformations, should that be desired. The configuration is a list of syntax rules, each of which is a 2-tuple: - An `ASTEdgePattern` (which see) defining a type of AST edge, and - Whether to transform children of such edges. The special object `anf.ANY` may be used as a pattern that matches all edges. Each replacement directive is one of three possible things: - The object `anf.REPLACE`, meaning ""Replace this child node with a variable"", - The object `anf.LEAVE`, meaning ""Do not replace this child node with a variable"", or - A Python callable. If a callable, it is called with the parent node, the field name, and the child node, and must compute a boolean indicating whether to transform the child node or not. The callable is free to use whatever context information it chooses. The callable may be invoked more than once on the same link, and must produce the same answer each time. The syntax rules are tested in order, and the first match governs. If no rule matches, the node is not transformed. The above rules notwithstanding, - Variable references are never replaced with (fresh) variables, as that would accomplish nothing. - The left-hand children of Assign and AugAssign nodes, and the children of Del nodes, are never replaced with variables, as that would break their semantics. - The right-hand children of Assign nodes are never replaced with variables, as the original assignment would still have to be present in the result to define the new variable. (That is, there's no point in transforming `x = sin(y)` into `tmp = sin(y); x = tmp`.) - The right-hand children of AugAssign nodes are never replaced with variables either, but only because the difference from Assign was considered a potential source of confusion (and it would have been slightly awkward in the code to treat the RHS differently than the LHS). - Various special-purpose AST nodes are not exposed to the configuration, lest the transform produce invalid syntax like, e.g., `tmp = +; x = 1 tmp 2`. For example, the configuration ```python [(anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)] ``` gives explicit fresh names to all expressions regardless of context (except as outlined above), whereas ```python [(anf.ASTEdgePattern(gast.If, ""test"", anf.ANY), anf.REPLACE)] ``` only transforms the conditionals of `if` statements (but not, e.g., `while`). If no configuration is supplied, the default behavior is to transform all expressions except literal constants, which is defined as a configuration as ```python # For Python 3, and gast library versions before 0.3 literals = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant) [(anf.ASTEdgePattern(anf.ANY, anf.ANY, literals), anf.LEAVE), (anf.ASTEdgePattern(anf.ANY, anf.ANY, gast.expr), anf.REPLACE)] ``` Args: node: The node to transform. ctx: transformer.EntityInfo. TODO(mdan): What information does this argument provide? config: Optional ANF configuration. If omitted, ANF replaces all expression expect literal constants." 1439,exec_test_function,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf_test.py,34,function, 1440,exec_expected_result,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf_test.py,40,function, 1441,AnfTestBase,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf_test.py,49,class, 1442,AnfTransformerTest,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf_test.py,85,class, 1443,AnfNonTransformationTest,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf_test.py,433,class,"Test that specifying ""no transformation"" does nothing. Reuses all the examples of AnfTransformerTest by overriding `assert_body_anfs_as_expected_`." 1444,AnfConfiguredTest,tensorflow/tensorflow/python/autograph/pyct/common_transformers/anf_test.py,454,class, 1445,Scope,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity.py,36,class,"Encloses local symbol definition and usage information. This can track for instance whether a symbol is modified in the current scope. Note that scopes do not necessarily align with Python's scopes. For example, the body of an if statement may be considered a separate scope. Caution - the AST references held by this object are weak. Scope objects are mutable during construction only, and must be frozen using `Scope.finalize()` before use. Furthermore, a scope is consistent only after all its children have been frozen. While analysing code blocks, scopes are being gradually built, from the innermost scope outward. Freezing indicates that the analysis of a code block is complete. Once frozen, mutation is no longer allowed. `is_final` tracks whether the scope is frozen or not. Certain properties, like `referenced`, are only accurate when called on frozen scopes. Attributes: parent: Optional[Scope], the parent scope, if any. isolated: bool, whether the scope is a true Python scope (e.g. the scope of a function), or just a surrogate tracking an ordinary code block. Using the terminology of the Python 3 reference documentation, True roughly represents an actual scope, whereas False represents an ordinary code block. function_name: Optional[str], name of the function owning this scope. isolated_names: Set[qual_names.QN], identifiers that are isolated to this scope (even if the scope is not isolated). annotations: Set[qual_names.QN], identifiers used as type annotations in this scope. read: Set[qual_names.QN], identifiers read in this scope. modified: Set[qual_names.QN], identifiers modified in this scope. deleted: Set[qual_names.QN], identifiers deleted in this scope. bound: Set[qual_names.QN], names that are bound to this scope. See https://docs.python.org/3/reference/executionmodel.html#binding-of-names for a precise definition. globals: Set[qual_names.QN], names that are explicitly marked as global in this scope. Note that this doesn't include free read-only vars bound to global symbols. nonlocals: Set[qual_names.QN], names that are explicitly marked as nonlocal in this scope. Note that this doesn't include free read-only vars bound to global symbols. free_vars: Set[qual_names.QN], the free variables in this scope. See https://docs.python.org/3/reference/executionmodel.html for a precise definition. params: WeakValueDictionary[qual_names.QN, ast.Node], function arguments visible in this scope, mapped to the function node that defines them. enclosing_scope: Scope, the innermost isolated scope that is a transitive parent of this scope. May be the scope itself. referenced: Set[qual_names.QN], the totality of the symbols used by this scope and its parents. is_final: bool, whether the scope is frozen or not. Note - simple statements may never delete and modify a symbol at the same time. However, compound ones like if statements can. In that latter case, it's undefined whether the symbol is actually modified or deleted upon statement exit. Certain analyses like reaching definitions need to be careful about this." 1446,_Comprehension,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity.py,214,class, 1447,_FunctionOrClass,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity.py,224,class, 1448,ActivityAnalyzer,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity.py,230,class,"Annotates nodes with local scope information. See Scope. The use of this class requires that qual_names.resolve() has been called on the node. This class will ignore nodes have not been annotated with their qualified names." 1449,resolve,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity.py,707,function, 1450,ActivityAnalyzerTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity_py3_test.py,31,class,Tests which can only run in Python 3. 1451,ScopeTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity_test.py,41,class, 1452,ActivityAnalyzerTestBase,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity_test.py,114,class, 1453,ActivityAnalyzerTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/activity_test.py,148,class, 1454,NoValue,tensorflow/tensorflow/python/autograph/pyct/static_analysis/annos.py,27,class, 1455,NodeAnno,tensorflow/tensorflow/python/autograph/pyct/static_analysis/annos.py,33,class,"Additional annotations used by the static analyzer. These are in addition to the basic annotations declared in anno.py." 1456,Analyzer,tensorflow/tensorflow/python/autograph/pyct/static_analysis/liveness.py,40,class,CFG visitor that performs liveness analysis at statement level. 1457,TreeAnnotator,tensorflow/tensorflow/python/autograph/pyct/static_analysis/liveness.py,96,class,"Runs liveness analysis on each of the functions defined in the AST. If a function defined other local functions, those will have separate CFGs. However, dataflow analysis needs to tie up these CFGs to properly emulate the effect of closures. In the case of liveness, the parent function's live variables must account for the variables that are live at the entry of each subfunction. For example: def foo(): # baz is live from here on def bar(): print(baz) This analyzer runs liveness analysis on each individual function, accounting for the effect above." 1458,resolve,tensorflow/tensorflow/python/autograph/pyct/static_analysis/liveness.py,206,function,"Resolves the live symbols at the exit of control flow statements. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] include_annotations: Bool, whether type annotations should be included in the analysis. Returns: ast.AST" 1459,LivenessAnalyzerTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/liveness_py3_test.py,30,class,Tests which can only run in Python 3. 1460,LivenessAnalyzerTestBase,tensorflow/tensorflow/python/autograph/pyct/static_analysis/liveness_test.py,37,class, 1461,LivenessAnalyzerTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/liveness_test.py,76,class, 1462,Definition,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py,40,class,"Definition objects describe a unique definition of a variable. Subclasses of this may be used by passing an appropriate factory function to resolve. Attributes: param_of: Optional[ast.AST] directives: Dict, optional definition annotations" 1463,_NodeState,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py,59,class,"Abstraction for the state of the CFG walk for reaching definition analysis. This is a value type. Only implements the strictly necessary operators. Attributes: value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and their possible definitions" 1464,Analyzer,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py,112,class,CFG visitor that determines reaching definitions at statement level. 1465,TreeAnnotator,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py,169,class,"AST visitor that annotates each symbol name with its reaching definitions. Simultaneously, the visitor runs the dataflow analysis on each function node, accounting for the effect of closures. For example: def foo(): bar = 1 def baz(): # bar = 1 reaches here" 1466,resolve,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py,279,function,"Resolves reaching definitions for each symbol. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] definition_factory: Callable[[], Definition] Returns: ast.AST" 1467,ReachingDefinitionsAnalyzerTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions_py3_test.py,26,class,Tests which can only run in Python 3. 1468,ReachingDefinitionsAnalyzerTestBase,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions_test.py,38,class, 1469,ReachingDefinitionsAnalyzerTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions_test.py,88,class, 1470,Definition,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py,32,class,Definition objects describe a unique definition of a function. 1471,_NodeState,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py,39,class,"Abstraction for the state of the CFG walk for reaching definition analysis. This is a value type. Only implements the strictly necessary operators. Attributes: value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and their possible definitions" 1472,Analyzer,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py,76,class,CFG visitor that determines reaching definitions at statement level. 1473,TreeAnnotator,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py,109,class,"AST visitor that annotates each symbol name with its reaching definitions. Simultaneously, the visitor runs the dataflow analysis on each function node, accounting for the effect of closures. For example: def foo(): def f(): pass def g(): # `def f` reaches here" 1474,resolve,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs.py,170,function,"Resolves reaching definitions for each symbol. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] Returns: ast.AST" 1475,ReachingFndefsAnalyzerTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/reaching_fndefs_test.py,33,class, 1476,Resolver,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference.py,41,class,"Resolver objects handle the process of looking up actual names and types. All resolve_* methods: * have a first namespace argument, mapping string to actual values * specify names as QN objects * specify types as a Set of inferred types All resolve_* methods must return either: * a set of `type` objects * None" 1477,_SymbolTable,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference.py,83,class,"Abstraction for the state of the CFG walk for type inference. This is a value type. Only implements the strictly necessary operators. Attributes: value: Dict[qual_names.QN, Set[Type]], mapping symbols to the set of possible types." 1478,StmtInferrer,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference.py,162,class,"Runs type inference on a single AST statement. This visitor annotates most nodes with type information. It also sets types for the symbols modified by this statement in its types_out property." 1479,Analyzer,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference.py,329,class,CFG visitor that propagates type information across statements. 1480,FunctionVisitor,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference.py,394,class,AST visitor that applies type inference to each function separately. 1481,resolve,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference.py,417,function,"Performs type inference. Args: node: ast.AST source_info: transformer.SourceInfo graphs: Dict[ast.FunctionDef, cfg.Graph] resolver: Resolver Returns: ast.AST" 1482,TestResolver,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference_test.py,32,class,A very basic resolver for testing. 1483,TestTranspiler,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference_test.py,58,class, 1484,TypeInferenceAnalyzerTest,tensorflow/tensorflow/python/autograph/pyct/static_analysis/type_inference_test.py,77,class, 1485,simple_function,tensorflow/tensorflow/python/autograph/pyct/testing/basic_definitions.py,23,function,Docstring. 1486,nested_functions,tensorflow/tensorflow/python/autograph/pyct/testing/basic_definitions.py,28,function,Docstring. 1487,function_with_print,tensorflow/tensorflow/python/autograph/pyct/testing/basic_definitions.py,37,function, 1488,SimpleClass,tensorflow/tensorflow/python/autograph/pyct/testing/basic_definitions.py,44,class, 1489,function_with_multiline_call,tensorflow/tensorflow/python/autograph/pyct/testing/basic_definitions.py,53,function,Docstring. 1490,basic_decorator,tensorflow/tensorflow/python/autograph/pyct/testing/basic_definitions.py,61,function, 1491,decorated_function,tensorflow/tensorflow/python/autograph/pyct/testing/basic_definitions.py,67,function, 1492,NodeSampler,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,30,class, 1493,StatementSampler,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,39,class, 1494,ExpressionSampler,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,49,class, 1495,CompareSampler,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,58,class, 1496,BinaryOpSampler,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,71,class, 1497,UnaryOpSampler,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,83,class, 1498,NameSampler,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,87,class, 1499,CodeGenerator,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,98,class,Generate random syntactically-valid Python ASTs. 1500,generate_random_functiondef,tensorflow/tensorflow/python/autograph/pyct/testing/codegen.py,233,function, 1501,CodeGenTest,tensorflow/tensorflow/python/autograph/pyct/testing/codegen_test.py,28,class, 1502,wrapping_decorator,tensorflow/tensorflow/python/autograph/pyct/testing/decorators.py,24,function, 1503,standalone_decorator,tensorflow/tensorflow/python/autograph/pyct/testing/decorators.py,33,function, 1504,functional_decorator,tensorflow/tensorflow/python/autograph/pyct/testing/decorators.py,41,function, 1505,set_verbosity,tensorflow/tensorflow/python/autograph/utils/ag_logging.py,41,function,"Sets the AutoGraph verbosity level. _Debug logging in AutoGraph_ More verbose logging is useful to enable when filing bug reports or doing more in-depth debugging. There are two means to control the logging verbosity: * The `set_verbosity` function * The `AUTOGRAPH_VERBOSITY` environment variable `set_verbosity` takes precedence over the environment variable. For example: ```python import os import tensorflow as tf os.environ['AUTOGRAPH_VERBOSITY'] = 5 # Verbosity is now 5 tf.autograph.set_verbosity(0) # Verbosity is now 0 os.environ['AUTOGRAPH_VERBOSITY'] = 1 # No effect, because set_verbosity was already called. ``` Logs entries are output to [absl](https://abseil.io)'s [default output](https://abseil.io/docs/python/guides/logging), with `INFO` level. Logs can be mirrored to stdout by using the `alsologtostdout` argument. Mirroring is enabled by default when Python runs in interactive mode. Args: level: int, the verbosity level; larger values specify increased verbosity; 0 means no logging. When reporting bugs, it is recommended to set this value to a larger number, like 10. alsologtostdout: bool, whether to also output log messages to `sys.stdout`." 1506,trace,tensorflow/tensorflow/python/autograph/utils/ag_logging.py,92,function,"Traces argument information at compilation time. `trace` is useful when debugging, and it always executes during the tracing phase, that is, when the TF graph is constructed. _Example usage_ ```python import tensorflow as tf for i in tf.range(10): tf.autograph.trace(i) # Output: ``` Args: *args: Arguments to print to `sys.stdout`." 1507,get_verbosity,tensorflow/tensorflow/python/autograph/utils/ag_logging.py,114,function, 1508,has_verbosity,tensorflow/tensorflow/python/autograph/utils/ag_logging.py,121,function, 1509,_output_to_stdout,tensorflow/tensorflow/python/autograph/utils/ag_logging.py,125,function, 1510,error,tensorflow/tensorflow/python/autograph/utils/ag_logging.py,131,function, 1511,log,tensorflow/tensorflow/python/autograph/utils/ag_logging.py,138,function, 1512,warn,tensorflow/tensorflow/python/autograph/utils/ag_logging.py,145,function, 1513,BasicRef,tensorflow/tensorflow/python/autograph/utils/compat_util.py,27,class,This shim emulates the nonlocal keyword in Py2-compatible source. 1514,deprecated_py2_support,tensorflow/tensorflow/python/autograph/utils/compat_util.py,34,function,Swaps calling module with a Py2-specific implementation. Noop in Py3. 1515,control_dependency_on_returns,tensorflow/tensorflow/python/autograph/utils/context_managers.py,27,function,"Create a TF control dependency on the return values of a function. If the function had no return value, a no-op context is returned. Args: return_value: The return value to set as control dependency. Returns: A context manager." 1516,ContextManagersTest,tensorflow/tensorflow/python/autograph/utils/context_managers_test.py,28,class, 1517,alias_tensors,tensorflow/tensorflow/python/autograph/utils/misc.py,27,function,"Wraps any Tensor arguments with an identity op. Any other argument, including Variables, is returned unchanged. Args: *args: Any arguments. Must contain at least one element. Returns: Same as *args, with Tensor instances replaced as described. Raises: ValueError: If args doesn't meet the requirements." 1518,get_range_len,tensorflow/tensorflow/python/autograph/utils/misc.py,55,function, 1519,MiscTest,tensorflow/tensorflow/python/autograph/utils/misc_test.py,30,class, 1520,MatchDType,tensorflow/tensorflow/python/autograph/utils/py_func.py,28,class,"Allows matching the dtype of an argument. Used in conjunction with function calls. For example, MatchDType(0) will match the DType of the first argument." 1521,wrap_py_func,tensorflow/tensorflow/python/autograph/utils/py_func.py,38,function,"Helper that wraps a callable to py_func. The helper passes tensor arguments through the py_func interface. Non-tensor arguments are allowed, and will be passed to f directly. Note that non-tensor arguments are captured by f will not update every time the wrapper is called (this is consistent with its argument list, which only includes the tensor arguments). In general, it's safest not to reuse this wrapper. Args: f: Callable return_dtypes: None, individual of tuple/list of DType or MatchDType, the data type for each of f's return value(s). Set to None if f has no return values or use_dummy_return is True. Use MatchDType to define a dtype identical to that of `i`th argument (argument 0 is the first); an argument must of Tensor type if it is to be used with MatchDType. args: Positional arguments for f, as list or tuple. kwargs: Keyword arguments for f, as dict with string keys. May be None. use_dummy_return: If True, the function will return a dummy value of 1 and discard its actual return value. Returns: The return values of f converted to tensor. Raises: ValueError: if any of the arguments are incorrect." 1522,PyFuncTest,tensorflow/tensorflow/python/autograph/utils/py_func_test.py,27,class, 1523,dynamic_list_append,tensorflow/tensorflow/python/autograph/utils/tensor_list.py,26,function,Converts a list append call inline. 1524,TensorList,tensorflow/tensorflow/python/autograph/utils/tensor_list.py,43,class,Tensor list wrapper API-compatible with Python built-in list. 1525,TensorListTest,tensorflow/tensorflow/python/autograph/utils/tensor_list_test.py,32,class, 1526,is_dense_tensor,tensorflow/tensorflow/python/autograph/utils/tensors.py,32,function, 1527,is_tensor_array,tensorflow/tensorflow/python/autograph/utils/tensors.py,38,function, 1528,is_tensor_list,tensorflow/tensorflow/python/autograph/utils/tensors.py,42,function, 1529,is_range_tensor,tensorflow/tensorflow/python/autograph/utils/tensors.py,51,function,Returns True if a tensor is the result of a tf.range op. Best effort. 1530,TensorsTest,tensorflow/tensorflow/python/autograph/utils/tensors_test.py,30,class, 1531,AutoGraphTestCase,tensorflow/tensorflow/python/autograph/utils/testing.py,30,class,"Tests specialized for AutoGraph, which run as tf.functions. These tests use a staged programming-like approach: most of the test code runs as-is inside a tf.function, but the assertions are lifted outside the function, and run with the corresponding function values instead. For example, the test: def test_foo(self): baz = bar(); self.assertEqual(baz, value) is equivalent to writing: def test_foo(self): @tf.function def test_fn(): baz = bar(); return baz, value baz_actual, value_actual = test_fn() self.assertEqual(baz_actual, value_actual)" 1532,list_local_devices,tensorflow/tensorflow/python/client/device_lib.py,25,function,"List the available devices available in the local process. Args: session_config: a session config proto or None to use the default config. Returns: A list of `DeviceAttribute` protocol buffers." 1533,DeviceLibTest,tensorflow/tensorflow/python/client/device_lib_test.py,28,class, 1534,PywrapeventsWriterTest,tensorflow/tensorflow/python/client/events_writer_test.py,33,class, 1535,main,tensorflow/tensorflow/python/client/notebook.py,53,function, 1536,TF_NewSessionOptions,tensorflow/tensorflow/python/client/pywrap_tf_session.py,51,function, 1537,TF_Reset,tensorflow/tensorflow/python/client/pywrap_tf_session.py,65,function, 1538,SessionInterface,tensorflow/tensorflow/python/client/session.py,51,class,Base class for implementations of TensorFlow client sessions. 1539,_get_indexed_slices_value_from_fetches,tensorflow/tensorflow/python/client/session.py,77,function, 1540,_get_feeds_for_indexed_slices,tensorflow/tensorflow/python/client/session.py,83,function, 1541,_convert_to_numpy_obj,tensorflow/tensorflow/python/client/session.py,139,function,Explicitly convert obj based on numpy type except for string type. 1542,register_session_run_conversion_functions,tensorflow/tensorflow/python/client/session.py,144,function,"Register fetch and feed conversion functions for `tf.Session.run()`. This function registers a triple of conversion functions for fetching and/or feeding values of user-defined types in a call to tf.Session.run(). An example ```python class SquaredTensor(object): def __init__(self, tensor): self.sq = tf.square(tensor) #you can define conversion functions as follows: fetch_function = lambda squared_tensor:([squared_tensor.sq], lambda val: val[0]) feed_function = lambda feed, feed_val: [(feed.sq, feed_val)] feed_function_for_partial_run = lambda feed: [feed.sq] #then after invoking this register function, you can use as follows: session.run(squared_tensor1, feed_dict = {squared_tensor2 : some_numpy_array}) ``` Args: tensor_type: The type for which you want to register a conversion function. fetch_function: A callable that takes an object of type `tensor_type` and returns a tuple, where the first element is a list of `tf.Tensor` objects, and the second element is a callable that takes a list of ndarrays and returns an object of some value type that corresponds to `tensor_type`. fetch_function describes how to expand fetch into its component Tensors and how to contract the fetched results back into a single return value. feed_function: A callable that takes feed_key and feed_value as input, and returns a list of tuples (feed_tensor, feed_val), feed_key must have type `tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed function describes how to unpack a single fed value and map it to feeds of one or more tensors and their corresponding values. feed_function_for_partial_run: A callable for specifying tensor values to feed when setting up a partial run, which takes a `tensor_type` type object as input, and returns a list of Tensors. Raises: ValueError: If `tensor_type` has already been registered." 1543,_is_attrs_instance,tensorflow/tensorflow/python/client/session.py,199,function,Returns True if the given obj is an instance of attrs-decorated class. 1544,_get_attrs_values,tensorflow/tensorflow/python/client/session.py,204,function,Returns the list of values from an attrs instance. 1545,_FetchMapper,tensorflow/tensorflow/python/client/session.py,210,class,"Definition of the interface provided by fetch mappers. Fetch mappers are utility classes used by the _FetchHandler to handle arbitrary structures for the `fetch` argument to `Session.run()`. The `fetch` argument can be of various shapes: single tensor or op, list of fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The structures can be arbitrarily nested. The low level run() API only wants a list of tensor or op names. The various `_FetchMapper` subclasses below take care of handling the different shapes: uniquifying the fetches, and constructing results with the original shape." 1546,_ElementFetchMapper,tensorflow/tensorflow/python/client/session.py,282,class,Fetch mapper for singleton tensors and ops. 1547,_uniquify_fetches,tensorflow/tensorflow/python/client/session.py,329,function,"Uniquifies fetches from a list of fetch_mappers. This is a utility function used by _ListFetchMapper and _DictFetchMapper. It gathers all the unique fetches from a list of mappers and builds a list containing all of them but without duplicates (unique_fetches). It also returns a 2-D list of integers (values_indices) indicating at which index in unique_fetches the fetches of the mappers are located. This list is as follows: values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index Args: fetch_mappers: list of fetch mappers. Returns: A list of fetches. A 2-D list of integers." 1548,_ListFetchMapper,tensorflow/tensorflow/python/client/session.py,365,class,"Fetch mapper for lists, tuples, and namedtuples." 1549,_DictFetchMapper,tensorflow/tensorflow/python/client/session.py,399,class,Fetch mapper for dicts. 1550,_AttrsFetchMapper,tensorflow/tensorflow/python/client/session.py,425,class,Fetch mapper for attrs decorated classes. 1551,_FetchHandler,tensorflow/tensorflow/python/client/session.py,449,class,"Handler for structured fetches. Given a graph, a user-provided structure for fetches, and a feed dict, this class takes care of generating a list of tensor names to fetch and op names to run for a low level `run()` call. Given the results of the low level run call, this class can also rebuild a result structure matching the user-provided structure for fetches, but containing the corresponding results." 1552,_name_list,tensorflow/tensorflow/python/client/session.py,573,function,"Utility function for transitioning to the new session API. Args: tensor_list: a list of `Tensor`s. Returns: A list of each `Tensor`s name (as byte arrays)." 1553,_DeviceAttributes,tensorflow/tensorflow/python/client/session.py,585,class,"Struct-like object describing a device's attributes. Each device has 3 key properties: - name: the fully-qualified TensorFlow path to the device. For example: /job:worker/replica:0/task:3/device:CPU:0 - device_type: the type of the device (e.g. CPU, GPU, TPU, etc.) - memory_limit_bytes: the maximum amount of memory available on the device (in bytes)." 1554,BaseSession,tensorflow/tensorflow/python/client/session.py,627,class,"A class for interacting with a TensorFlow computation. The BaseSession enables incremental graph building with inline execution of Operations and evaluation of Tensors." 1555,Session,tensorflow/tensorflow/python/client/session.py,1509,class,"A class for running TensorFlow operations. A `Session` object encapsulates the environment in which `Operation` objects are executed, and `Tensor` objects are evaluated. For example: ```python tf.compat.v1.disable_eager_execution() # need to disable eager in TF2.x # Build a graph. a = tf.constant(5.0) b = tf.constant(6.0) c = a * b # Launch the graph in a session. sess = tf.compat.v1.Session() # Evaluate the tensor `c`. print(sess.run(c)) # prints 30.0 ``` A session may own resources, such as `tf.Variable`, `tf.queue.QueueBase`, and `tf.compat.v1.ReaderBase`. It is important to release these resources when they are no longer required. To do this, either invoke the `tf.Session.close` method on the session, or use the session as a context manager. The following two examples are equivalent: ```python # Using the `close()` method. sess = tf.compat.v1.Session() sess.run(...) sess.close() # Using the context manager. with tf.compat.v1.Session() as sess: sess.run(...) ``` The [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) protocol buffer exposes various configuration options for a session. For example, to create a session that uses soft constraints for device placement, and log the resulting placement decisions, create a session as follows: ```python # Launch the graph in a session that allows soft device placement and # logs the placement decisions. sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto( allow_soft_placement=True, log_device_placement=True)) ```" 1556,InteractiveSession,tensorflow/tensorflow/python/client/session.py,1679,class,"A TensorFlow `Session` for use in interactive contexts, such as a shell. The only difference with a regular `Session` is that an `InteractiveSession` installs itself as the default session on construction. The methods `tf.Tensor.eval` and `tf.Operation.run` will use that session to run ops. This is convenient in interactive shells and [IPython notebooks](http://ipython.org), as it avoids having to pass an explicit `Session` object to run ops. For example: ```python sess = tf.compat.v1.InteractiveSession() a = tf.constant(5.0) b = tf.constant(6.0) c = a * b # We can just use 'c.eval()' without passing 'sess' print(c.eval()) sess.close() ``` Note that a regular session installs itself as the default session when it is created in a `with` statement. The common usage in non-interactive programs is to follow that pattern: ```python a = tf.constant(5.0) b = tf.constant(6.0) c = a * b with tf.compat.v1.Session(): # We can also use 'c.eval()' here. print(c.eval()) ```" 1557,SessionBenchmark,tensorflow/tensorflow/python/client/session_benchmark.py,36,class,Tests and benchmarks for interacting with the `tf.compat.v1.Session`. 1558,SessionClusterSpecPropagationTest,tensorflow/tensorflow/python/client/session_clusterspec_prop_test.py,45,class, 1559,SessionListDevicesTest,tensorflow/tensorflow/python/client/session_list_devices_test.py,33,class, 1560,PartialRunTest,tensorflow/tensorflow/python/client/session_partial_run_test.py,35,class, 1561,SessionTest,tensorflow/tensorflow/python/client/session_test.py,72,class, 1562,AllocationMaximum,tensorflow/tensorflow/python/client/timeline.py,32,class,"Stores the maximum allocation for a given allocator within the timelne. Parameters: timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached. num_bytes: the total memory used at this time. tensors: the set of tensors allocated at this time." 1563,StepStatsAnalysis,tensorflow/tensorflow/python/client/timeline.py,44,class,"Stores the step stats analysis output. Parameters: chrome_trace: A dict containing the chrome trace analysis. allocator_maximums: A dict mapping allocator names to AllocationMaximum." 1564,_ChromeTraceFormatter,tensorflow/tensorflow/python/client/timeline.py,55,class,A helper class for generating traces in Chrome Trace Format. 1565,_TensorTracker,tensorflow/tensorflow/python/client/timeline.py,265,class,An internal class to track the lifetime of a Tensor. 1566,Timeline,tensorflow/tensorflow/python/client/timeline.py,346,class,A class for visualizing execution timelines of TensorFlow steps. 1567,TimelineTest,tensorflow/tensorflow/python/client/timeline_test.py,34,class, 1568,VirtualGpuTestUtil,tensorflow/tensorflow/python/client/virtual_gpu_test.py,38,class, 1569,VirtualGpuTest,tensorflow/tensorflow/python/client/virtual_gpu_test.py,195,class, 1570,_date_to_date_number,tensorflow/tensorflow/python/compat/compat.py,41,function, 1571,_update_forward_compatibility_date_number,tensorflow/tensorflow/python/compat/compat.py,45,function,Update the base date to compare in forward_compatible function. 1572,forward_compatible,tensorflow/tensorflow/python/compat/compat.py,70,function,"Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The ""producer"" is typically a Python program that constructs and trains a model while the ""consumer"" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day)." 1573,forward_compatibility_horizon,tensorflow/tensorflow/python/compat/compat.py,131,function,"Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing." 1574,CompatTest,tensorflow/tensorflow/python/compat/compat_test.py,27,class, 1575,DisableV2BehaviorTest,tensorflow/tensorflow/python/compat/disable_v2_behavior_test.py,27,class, 1576,enable_v2_behavior,tensorflow/tensorflow/python/compat/v2_compat.py,43,function,"Enables TensorFlow 2.x behaviors. This function can be called at the beginning of the program (before `Tensors`, `Graphs` or other structures have been created, and before devices have been initialized. It switches all global behaviors that are different between TensorFlow 1.x and 2.x to behave as intended for 2.x. This function is called in the main TensorFlow `__init__.py` file, user should not need to call it, except during complex migrations." 1577,disable_v2_behavior,tensorflow/tensorflow/python/compat/v2_compat.py,82,function,"Disables TensorFlow 2.x behaviors. This function can be called at the beginning of the program (before `Tensors`, `Graphs` or other structures have been created, and before devices have been initialized. It switches all global behaviors that are different between TensorFlow 1.x and 2.x to behave as intended for 1.x. User can call this function to disable 2.x behavior during complex migrations." 1578,convert_graph_def,tensorflow/tensorflow/python/compiler/mlir/mlir.py,26,function,"Import a GraphDef and convert it to a textual MLIR module. Args: graph_def: An object of type graph_pb2.GraphDef or a textual proto representation of a valid GraphDef. pass_pipeline: A textual description of an MLIR Pass Pipeline to run on the module, see MLIR documentation for the [textual pass pipeline syntax](https://github.com/tensorflow/mlir/blob/master/g3doc/WritingAPass.md#textual-pass-pipeline-specification). Returns: A textual representation of the MLIR module corresponding to the graphdef. Raises a RuntimeError on error." 1579,MLIRImportTest,tensorflow/tensorflow/python/compiler/mlir/mlir_test.py,26,class, 1580,_to_bytes,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,84,function,Encode s if it is a sequence of chars. 1581,_to_string,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,91,function,Decode s if it is a sequence of bytes. 1582,TrtPrecisionMode,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,98,class, 1583,TrtConversionParams,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,117,class,"Parameters that are used for TF-TRT conversion. Fields: rewriter_config_template: a template RewriterConfig proto used to create a TRT-enabled RewriterConfig. If None, it will use a default one. max_workspace_size_bytes: the maximum GPU temporary memory which the TRT engine can use at execution time. This corresponds to the 'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize(). precision_mode: one the strings in TrtPrecisionMode.supported_precision_modes(). minimum_segment_size: the minimum number of nodes required for a subgraph to be replaced by TRTEngineOp. is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT network and engine at run time. i.e. Since TensorRT version < 6.0 does not support dynamic dimensions other than the batch dimension, when the TensorFlow graph has a non-batch dimension of dynamic size, we would need to enable this option. This option should be set to True in TF 2.0. maximum_cached_engines: max number of cached TRT engines for dynamic TRT ops. Created TRT engines for a dynamic dimension are cached. This is the maximum number of engines that can be cached. If the number of cached engines is already at max but none of them supports the input shapes, the TRTEngineOp will fall back to run the original TF subgraph that corresponds to the TRTEngineOp. use_calibration: this argument is ignored if precision_mode is not INT8. If set to True, a calibration graph will be created to calibrate the missing ranges. The calibration graph must be converted to an inference graph by running calibration with calibrate(). If set to False, quantization nodes will be expected for every tensor in the graph (excluding those which will be fused). If a range is missing, an error will occur. Please note that accuracy may be negatively affected if there is a mismatch between which tensors TRT quantizes and which tensors were trained with fake quantization. max_batch_size: max size for the input batch. This parameter is only effective when is_dynamic_op=False which is not supported in TF 2.0. allow_build_at_runtime: whether to build TensorRT engines during runtime. If no TensorRT engine can be found in cache that can handle the given inputs during runtime, then a new TensorRT engine is built at runtime if allow_build_at_runtime=True, and otherwise native TF is used. This argument is only effective if is_dynamic_op=True." 1584,_check_conversion_params,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,188,function,"Validate the provided TrtConversionParams. Args: conversion_params: a TrtConversionParams instance. is_v2: whether we're getting a RewriterConfig for TF 2.0. Raises: TypeError: if any of the parameters are of unexpected type. ValueError: if any of the parameters are of unexpected value." 1585,_check_trt_version_compatibility,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,252,function,"Check compatibility of TensorRT version. Raises: RuntimeError: if the TensorRT library version is incompatible." 1586,get_tensorrt_rewriter_config,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,292,function,"Returns a RewriterConfig proto for TRT transformation. Args: conversion_params: a TrtConversionParams instance. is_v2: whether we're getting a RewriterConfig for TF 2.0. disable_non_trt_optimizers: Turn off all default Grappler optimizers. Returns: A RewriterConfig proto which sets a TensorRTOptimizer to run Grappler. Raises: TypeError: if any of the parameters are of unexpected type. ValueError: if any of the parameters are of unexpected value." 1587,_get_canonical_engine_name,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,383,function, 1588,is_explicit_batch_mode_enabled,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,387,function,Checks whether explicit batch is enabled by the rewriter config. 1589,TrtGraphConverter,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,398,class,"A converter for TF-TRT transformation for TF 1.x GraphDef/SavedModels. To run the conversion without quantization calibration (e.g. for FP32/FP16 precision modes): ```python converter = TrtGraphConverter( input_saved_model_dir=""my_dir"", precision_mode=TrtPrecisionMode.FP16) converted_graph_def = converter.convert() converter.save(output_saved_model_dir) ``` To run the conversion with quantization calibration: ```python converter = TrtGraphConverter( input_saved_model_dir=""my_dir"", precision_mode=TrtPrecisionMode.INT8) converter.convert() # Run calibration 10 times. converted_graph_def = converter.calibrate( fetch_names=['output:0'], num_runs=10, feed_dict_fn=lambda: {'input:0': my_next_data()}) converter.save(output_saved_model_dir) ```" 1590,_get_resource_handle,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,833,function, 1591,_TRTEngineResourceDeleter,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,838,class,Resource deleter for destroying TRT engine cache resource. 1592,_TRTEngineResource,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,853,class,Class to track the serialized engines resource. 1593,TrtGraphConverterV2,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,880,class,"An offline converter for TF-TRT transformation for TF 2.0 SavedModels. Currently this is not available on Windows platform. Note that in V2, is_dynamic_op=False is not supported, meaning TRT engines will be built only when the corresponding TRTEngineOp is executed. But we still provide a way to avoid the cost of building TRT engines during inference (see more below). There are several ways to run the conversion: 1. FP32/FP16 precision ```python params = tf.experimental.tensorrt.ConversionParams( precision_mode='FP16') converter = tf.experimental.tensorrt.Converter( input_saved_model_dir=""my_dir"", conversion_params=params) converter.convert() converter.save(output_saved_model_dir) ``` In this case, no TRT engines will be built or saved in the converted SavedModel. But if input data is available during conversion, we can still build and save the TRT engines to reduce the cost during inference (see option 2 below). 2. FP32/FP16 precision with pre-built engines ```python params = tf.experimental.tensorrt.ConversionParams( precision_mode='FP16', # Set this to a large enough number so it can cache all the engines. maximum_cached_engines=16) converter = tf.experimental.tensorrt.Converter( input_saved_model_dir=""my_dir"", conversion_params=params) converter.convert() # Define a generator function that yields input data, and use it to execute # the graph to build TRT engines. # With TensorRT 5.1, different engines will be built (and saved later) for # different input shapes to the TRTEngineOp. def my_input_fn(): for _ in range(num_runs): inp1, inp2 = ... yield inp1, inp2 converter.build(input_fn=my_input_fn) # Generate corresponding TRT engines converter.save(output_saved_model_dir) # Generated engines will be saved. ``` In this way, one engine will be built/saved for each unique input shapes of the TRTEngineOp. This is good for applications that cannot afford building engines during inference but have access to input data that is similar to the one used in production (for example, that has the same input shapes). Also, the generated TRT engines is platform dependent, so we need to run `build()` in an environment that is similar to production (e.g. with same type of GPU). 3. INT8 precision and calibration with pre-built engines ```python params = tf.experimental.tensorrt.ConversionParams( precision_mode='INT8', # Currently only one INT8 engine is supported in this mode. maximum_cached_engines=1, use_calibration=True) converter = tf.experimental.tensorrt.Converter( input_saved_model_dir=""my_dir"", conversion_params=params) # Define a generator function that yields input data, and run INT8 # calibration with the data. All input data should have the same shape. # At the end of convert(), the calibration stats (e.g. range information) # will be saved and can be used to generate more TRT engines with different # shapes. Also, one TRT engine will be generated (with the same shape as # the calibration data) for save later. def my_calibration_input_fn(): for _ in range(num_runs): inp1, inp2 = ... yield inp1, inp2 converter.convert(calibration_input_fn=my_calibration_input_fn) # (Optional) Generate more TRT engines offline (same as the previous # option), to avoid the cost of generating them during inference. def my_input_fn(): for _ in range(num_runs): inp1, inp2 = ... yield inp1, inp2 converter.build(input_fn=my_input_fn) # Save the TRT engine and the engines. converter.save(output_saved_model_dir) ```" 1594,create_inference_graph,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert.py,1270,function,"Python wrapper for the TRT transformation. Args: input_graph_def: a GraphDef object containing a model to be transformed. If set to None, the graph will be read from the SavedModel loaded from input_saved_model_dir. outputs: list of tensors or node names for the model outputs. Only used when input_graph_def is not None. max_batch_size: max size for the input batch. max_workspace_size_bytes: the maximum GPU temporary memory which the TRT engine can use at execution time. This corresponds to the 'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize(). precision_mode: one of TrtPrecisionMode.supported_precision_modes(). minimum_segment_size: the minimum number of nodes required for a subgraph to be replaced by TRTEngineOp. is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT network and engine at run time. maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops. If the number of cached engines is already at max but none of them can serve the input, the TRTEngineOp will fall back to run the TF function based on which the TRTEngineOp is created. input_saved_model_dir: the directory to load the SavedModel which contains the input graph to transforms. Used only when input_graph_def is None. input_saved_model_tags: list of tags to load the SavedModel. input_saved_model_signature_key: the key of the signature to optimize the graph for. output_saved_model_dir: if not None, construct a SavedModel using the returned GraphDef and save it to the specified directory. This option only works when the input graph is loaded from a SavedModel, i.e. when input_saved_model_dir is specified and input_graph_def is None. session_config: the ConfigProto used to create a Session. It's also used as a template to create a TRT-enabled ConfigProto for conversion. If not specified, a default ConfigProto will be used. Returns: A GraphDef transformed from input_graph_def (or the SavedModel graph def loaded from input_saved_model_dir, if input_graph_def is not present), where all TRT compatible subgraphs are replaced with TRTEngineOps, and a TF function is added for each of the subgraphs. If is_dynamic_op is True, each TRTEngineOp will contain a serialized subgraph GraphDef, which will be converted to a TRT engine at execution time and the TRT engine will be cached for future usage. A new TRT engine will be created each time when none of the cached engines match the input shapes. If it fails to execute the TRT engine or the number of cached engines reaches maximum_cached_engines, the op will fall back to call the corresponding TF function. If is_dynamic_op is False, each TRTEngineOp will contain a serialized TRT engine created from the corresponding subgraph. No more engines will be created on the fly, and the op will fall back to call the corresponding TF function when it fails to execute the engine. Raises: ValueError: if the combination of the parameters is invalid." 1595,TrtConvertTest,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert_test.py,67,class,Class to test Tensorflow-TensorRT integration python API. 1596,TrtPrecisionMode,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert_windows.py,31,class, 1597,TrtConversionParams,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert_windows.py,43,class,"Parameters that are used for TF-TRT conversion. Fields: rewriter_config_template: a template RewriterConfig proto used to create a TRT-enabled RewriterConfig. If None, it will use a default one. max_workspace_size_bytes: the maximum GPU temporary memory which the TRT engine can use at execution time. This corresponds to the 'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize(). precision_mode: one the strings in TrtPrecisionMode.supported_precision_modes(). minimum_segment_size: the minimum number of nodes required for a subgraph to be replaced by TRTEngineOp. is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT network and engine at run time. i.e. Since TensorRT version < 6.0 does not support dynamic dimensions other than the batch dimension, when the TensorFlow graph has a non-batch dimension of dynamic size, we would need to enable this option. This option should be set to True in TF 2.0. maximum_cached_engines: max number of cached TRT engines for dynamic TRT ops. Created TRT engines for a dynamic dimension are cached. This is the maximum number of engines that can be cached. If the number of cached engines is already at max but none of them supports the input shapes, the TRTEngineOp will fall back to run the original TF subgraph that corresponds to the TRTEngineOp. use_calibration: this argument is ignored if precision_mode is not INT8. If set to True, a calibration graph will be created to calibrate the missing ranges. The calibration graph must be converted to an inference graph by running calibration with calibrate(). If set to False, quantization nodes will be expected for every tensor in the graph (exlcuding those which will be fused). If a range is missing, an error will occur. Please note that accuracy may be negatively affected if there is a mismatch between which tensors TRT quantizes and which tensors were trained with fake quantization. max_batch_size: max size for the input batch. This parameter is only effective when is_dynamic_op=False which is not supported in TF 2.0." 1598,TrtConverterWindows,tensorflow/tensorflow/python/compiler/tensorrt/trt_convert_windows.py,97,class,"An offline converter for TF-TRT transformation for TF 2.0 SavedModels. Currently this is not available on Windows platform." 1599,SimpleSingleEngineTest,tensorflow/tensorflow/python/compiler/tensorrt/test/base_test.py,34,class, 1600,SimpleMultiEnginesTest,tensorflow/tensorflow/python/compiler/tensorrt/test/base_test.py,74,class, 1601,SimpleMultiEnginesTest2,tensorflow/tensorflow/python/compiler/tensorrt/test/base_test.py,134,class, 1602,ConstInputTest,tensorflow/tensorflow/python/compiler/tensorrt/test/base_test.py,175,class, 1603,ConstDataInputSingleEngineTest,tensorflow/tensorflow/python/compiler/tensorrt/test/base_test.py,211,class, 1604,ConstDataInputMultipleEnginesTest,tensorflow/tensorflow/python/compiler/tensorrt/test/base_test.py,232,class, 1605,ControlDependencyTest,tensorflow/tensorflow/python/compiler/tensorrt/test/base_test.py,264,class, 1606,BatchMatMulTwoTensorTest,tensorflow/tensorflow/python/compiler/tensorrt/test/batch_matmul_test.py,32,class,Testing conversion of BatchMatMul where both inputs are tensors. 1607,BatchMatMulWeightBroadcastTest,tensorflow/tensorflow/python/compiler/tensorrt/test/batch_matmul_test.py,50,class,Testing BatchMatMulV2: one operand is weight and both have same rank. 1608,BatchMatMulWeightBroadcastDims2Test,tensorflow/tensorflow/python/compiler/tensorrt/test/batch_matmul_test.py,69,class,Testing BatchMatMulV2: weight operand must be broadcasted. 1609,BiasaddMatMulTest,tensorflow/tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py,33,class,Testing conversion of BiasAdd MatMul in TF-TRT conversion. 1610,BinaryTensorWeightBroadcastTest,tensorflow/tensorflow/python/compiler/tensorrt/test/binary_tensor_weight_broadcast_test.py,30,class,Tests for scale & elementwise layers in TF-TRT. 1611,CastInt32ToFp32Test,tensorflow/tensorflow/python/compiler/tensorrt/test/cast_test.py,31,class,Tests cast to FP32 are splitted in FP16 mode. 1612,CombinedNmsTest,tensorflow/tensorflow/python/compiler/tensorrt/test/combined_nms_test.py,30,class,Test for CombinedNMS op in TF-TRT. 1613,ConcatenationTest,tensorflow/tensorflow/python/compiler/tensorrt/test/concatenation_test.py,32,class,Testing Concatenation in TF-TRT conversion. 1614,ConstBroadcastTest,tensorflow/tensorflow/python/compiler/tensorrt/test/const_broadcast_test.py,28,class,Test for Constant broadcasting in TF-TRT. 1615,conv2d_layer,tensorflow/tensorflow/python/compiler/tensorrt/test/conv2d_test.py,32,function, 1616,div_round_up,tensorflow/tensorflow/python/compiler/tensorrt/test/conv2d_test.py,62,function, 1617,build_graph,tensorflow/tensorflow/python/compiler/tensorrt/test/conv2d_test.py,66,function, 1618,Conv2DNCHWTest,tensorflow/tensorflow/python/compiler/tensorrt/test/conv2d_test.py,83,class,Testing conversion of Conv2D (data_format=NCHW) in TF-TRT conversion. 1619,Conv2DNHWCTest,tensorflow/tensorflow/python/compiler/tensorrt/test/conv2d_test.py,118,class,Testing conversion of Conv2D (data_format=NCHW) in TF-TRT conversion. 1620,Conv2DStridedNCHWTest,tensorflow/tensorflow/python/compiler/tensorrt/test/conv2d_test.py,141,class,Testing conversion of strided Conv2D (data_format=NCHW). 1621,Conv2DTranposeTest,tensorflow/tensorflow/python/compiler/tensorrt/test/conv2d_test.py,172,class,Testing conversion of conv2d_transpose (AKA Conv2DBackpropInput) 1622,DynamicInputShapesTest,tensorflow/tensorflow/python/compiler/tensorrt/test/dynamic_input_shapes_test.py,32,class, 1623,IdentityTest,tensorflow/tensorflow/python/compiler/tensorrt/test/identity_output_test.py,36,class,Testing engine with the same tensor repeated as output via identity. 1624,ExcludeUnsupportedInt32Test,tensorflow/tensorflow/python/compiler/tensorrt/test/int32_test.py,32,class,Test exclusion of ops which are not supported in INT32 mode by TF-TRT 1625,CalibrationInt32Support,tensorflow/tensorflow/python/compiler/tensorrt/test/int32_test.py,68,class,Test execution of calibration with int32 input 1626,LRUCacheTest,tensorflow/tensorflow/python/compiler/tensorrt/test/lru_cache_test.py,33,class, 1627,MemoryAlignmentTest,tensorflow/tensorflow/python/compiler/tensorrt/test/memory_alignment_test.py,31,class,Testing conversion of BatchMatMul in TF-TRT conversion. 1628,MultiConnectionNeighborEngineTest,tensorflow/tensorflow/python/compiler/tensorrt/test/multi_connection_neighbor_engine_test.py,31,class,Test for multi connection neighboring nodes wiring tests in TF-TRT. 1629,NeighboringEngineTest,tensorflow/tensorflow/python/compiler/tensorrt/test/neighboring_engine_test.py,32,class,Neighboring node wiring tests in TF-TRT conversion. 1630,QuantizationAwareTrainingMNISTTest,tensorflow/tensorflow/python/compiler/tensorrt/test/quantization_mnist_test.py,59,class,Testing usage of quantization ranges inserted in graph. 1631,_GraphFn,tensorflow/tensorflow/python/compiler/tensorrt/test/quantization_test.py,33,function, 1632,_GetParams,tensorflow/tensorflow/python/compiler/tensorrt/test/quantization_test.py,53,function, 1633,QuantizationMissingAllRangesTest,tensorflow/tensorflow/python/compiler/tensorrt/test/quantization_test.py,57,class,Create a graph containing single segment with no quantization ranges. 1634,QuantizationWithRangesTest,tensorflow/tensorflow/python/compiler/tensorrt/test/quantization_test.py,82,class,Create a graph containing single segment with no quantization ranges. 1635,NonQuantizedPrecisionsWithRangesTest,tensorflow/tensorflow/python/compiler/tensorrt/test/quantization_test.py,110,class,Create a graph containing single segment with no quantization ranges. 1636,RankTwoTest,tensorflow/tensorflow/python/compiler/tensorrt/test/rank_two_test.py,30,class,Test for rank 2 input in TF-TRT. 1637,ReshapeTest,tensorflow/tensorflow/python/compiler/tensorrt/test/reshape_transpose_test.py,28,class, 1638,TransposeTest,tensorflow/tensorflow/python/compiler/tensorrt/test/reshape_transpose_test.py,79,class, 1639,IncompatibleTransposeTest,tensorflow/tensorflow/python/compiler/tensorrt/test/reshape_transpose_test.py,108,class, 1640,IsQuantizationMode,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,95,function, 1641,IsQuantizationWithCalibration,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,99,function, 1642,GraphState,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,103,class, 1643,TfTrtIntegrationTestBase,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,109,class,Class to test Tensorflow-TensorRT integration. 1644,_GetTestConfigsV1,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,883,function,Returns the config combinations to run the test. 1645,_GetTestConfigsV2,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,902,function,Returns the config combinations to run the test. 1646,_GetTest,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,928,function,Gets a single test method based on the parameters. 1647,_AddTestsFor,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,942,function,Adds test methods to TfTrtIntegrationTestBase for specific TF version. 1648,_AddTests,tensorflow/tensorflow/python/compiler/tensorrt/test/tf_trt_integration_test_base.py,967,function,Adds test methods to TfTrtIntegrationTestBase. 1649,TopKTest,tensorflow/tensorflow/python/compiler/tensorrt/test/topk_test.py,29,class,Testing Top-K in TF-TRT conversion. 1650,TopKOutputTypeTest,tensorflow/tensorflow/python/compiler/tensorrt/test/topk_test.py,50,class,Testing that output type of engine using Top-K is set correctly. 1651,TrtModeTestBase,tensorflow/tensorflow/python/compiler/tensorrt/test/trt_mode_test.py,31,class,Test squeeze on batch dim and some unary operations in TF-TRT. 1652,ImplicitBatchTest,tensorflow/tensorflow/python/compiler/tensorrt/test/trt_mode_test.py,81,class, 1653,ExplicitBatchTest,tensorflow/tensorflow/python/compiler/tensorrt/test/trt_mode_test.py,104,class, 1654,DynamicShapesTest,tensorflow/tensorflow/python/compiler/tensorrt/test/trt_mode_test.py,140,class,"Test with dynamic input shapes. DynamicShapesTest is different from ExplicitBatchTest in that it uses input and output masks to change the input and output shapes to unknown shapes." 1655,UnaryTest,tensorflow/tensorflow/python/compiler/tensorrt/test/unary_test.py,33,class,Test for unary operations in TF-TRT. 1656,VGGBlockNCHWTest,tensorflow/tensorflow/python/compiler/tensorrt/test/vgg_block_nchw_test.py,35,class,Single vgg layer in NCHW unit tests in TF-TRT. 1657,VGGBlockTest,tensorflow/tensorflow/python/compiler/tensorrt/test/vgg_block_test.py,35,class,Single vgg layer test in TF-TRT conversion. 1658,GetGraph,tensorflow/tensorflow/python/compiler/tensorrt/test/testdata/gen_tftrt_model.py,49,function,Define graph. 1659,GenerateModelV2,tensorflow/tensorflow/python/compiler/tensorrt/test/testdata/gen_tftrt_model.py,59,function,Generate and convert a model using TFv2 API. 1660,GenerateModelV1,tensorflow/tensorflow/python/compiler/tensorrt/test/testdata/gen_tftrt_model.py,90,function,Generate and convert a model using TFv1 API. 1661,ExperimentalCompileTest,tensorflow/tensorflow/python/compiler/xla/experimental_compile_test.py,30,class, 1662,_XlaScope,tensorflow/tensorflow/python/compiler/xla/jit.py,32,class,"Keeps track of previous XLA scope calls, and depth of current call." 1663,experimental_jit_scope,tensorflow/tensorflow/python/compiler/xla/jit.py,42,function,"Enable or disable JIT compilation of operators within the scope. NOTE: This is an experimental feature. The compilation is a hint and only supported on a best-effort basis. Example usage: ```python with tf.xla.experimental.jit_scope(): c = tf.matmul(a, b) # compiled with tf.xla.experimental.jit_scope(compile_ops=False): d = tf.matmul(a, c) # not compiled with tf.xla.experimental.jit_scope( compile_ops=lambda node_def: 'matmul' in node_def.op.lower()): e = tf.matmul(a, b) + d # matmul is compiled, the addition is not. ``` Example of `separate_compiled_gradients`: ```python # In the example below, the computations for f, g and h will all be compiled # in separate scopes. with tf.xla.experimental.jit_scope( separate_compiled_gradients=True): f = tf.matmul(a, b) g = tf.gradients([f], [a, b], name='mygrads1') h = tf.gradients([f], [a, b], name='mygrads2') ``` Args: compile_ops: Whether to enable or disable compilation in the scope. Either a Python bool, or a callable that accepts the parameter `node_def` and returns a python bool. separate_compiled_gradients: If true put each gradient subgraph into a separate compilation scope. This gives fine-grained control over which portions of the graph will be compiled as a single unit. Compiling gradients separately may yield better performance for some graphs. The scope is named based on the scope of the forward computation as well as the name of the gradients. As a result, the gradients will be compiled in a scope that is separate from both the forward computation, and from other gradients. Raises: RuntimeError: if called when eager execution is enabled. Yields: The current scope, enabling or disabling compilation." 1664,enable_jit_nonstateful,tensorflow/tensorflow/python/compiler/xla/jit_test.py,39,function, 1665,JITTest,tensorflow/tensorflow/python/compiler/xla/jit_test.py,47,class, 1666,CompilationEnabledInGradientTest,tensorflow/tensorflow/python/compiler/xla/jit_test.py,187,class, 1667,compile,tensorflow/tensorflow/python/compiler/xla/xla.py,67,function,"Builds an operator that compiles and runs `computation` with XLA. NOTE: In eager mode, `computation` will have `@tf.function` semantics. Args: computation: A Python function that builds a computation to apply to the input. If the function takes n inputs, 'inputs' should be a list of n tensors. `computation` may return a list of operations and tensors. Tensors must come before operations in the returned list. The return value of `compile` is a list of tensors corresponding to the tensors from the output of `computation`. All `Operation`s returned from `computation` will be executed when evaluating any of the returned output tensors. inputs: A list of inputs or `None` (equivalent to an empty list). Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. Returns: Same data structure as if computation(*inputs) is called directly with some exceptions for correctness. Exceptions include: 1) None output: a NoOp would be returned which control-depends on computation. 2) Single value output: A tuple containing the value would be returned. 3) Operation-only outputs: a NoOp would be returned which control-depends on computation. TODO(b/121383831): Investigate into removing these special cases. Raises: RuntimeError: if called when eager execution is enabled. Known issues: When a tf.random operation is built with XLA, the implementation doesn't pass the user provided seed to the XLA compiler. As such, the XLA compiler generates a random number and uses it as a seed when compiling the operation. This implementation causes a violation of the Tensorflow defined semantics in two aspects. First, changing the value of the user defined seed doesn't change the numbers generated by the operation. Second, when a seed is not specified, running the program multiple times will generate the same numbers." 1668,XLACompileContext,tensorflow/tensorflow/python/compiler/xla/xla.py,125,class,"A `ControlFlowContext` for nodes inside an XLA computation cluster. THIS IS ONLY FOR TENSORFLOW INTERNAL IMPLEMENTATION, DO NO USE DIRECTLY. The primary role of `XLACompileContext` is to mark operators inside a xla.compile() computation with attribute ""_xla_compile_id=XYZ"", where XYZ is a unique name. `ControlFlowContext` is used to perform the annotation since it integrates with Tensorflow constructs like ResourceVariables. For example, if a `ResourceVariable` is constructed inside a xla.compile() block, the `ResourceVariable` implementation can use `with ops.control_dependencies(None)` to build the variable's definition outside the compiled computation." 1669,_compile_internal,tensorflow/tensorflow/python/compiler/xla/xla.py,306,function,"Builds graph operators that compiles and symbolically executes computation. Args: computation: A Python function that builds the computation to compile and execute. inputs: A list of inputs or `None` (equivalent to an empty list). Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. Returns: Same data structure as if computation(*inputs) is called directly with some exceptions for correctness. Exceptions include: 1) None output 2) Single value output 3) Operation-only outputs Raises: ValueError: If any element in computation outputs is neither an operations or a value that can be converted to tensor. ValueError: If computation outputs is non-flat and contains any Operations. TypeError: If `inputs` is not a list or tuple." 1670,is_flat,tensorflow/tensorflow/python/compiler/xla/xla.py,409,function,"Checks if outputs is a flat structure. Following structures and values are considered flat: 1) None 2) A single object 3) A list or tuple of Tensors/Operations The only structures that this function understands are sequences, dictionaries and types defined using the attrs library. E.g. this means that if outputs contains a single user-defined Object, it is considered to be flat. Errors are raised later on if that Object cannot be converted to a Tensor. Args: outputs: Output from `computation` inside `xla.compile`. Returns: A boolean indicates whether outputs is flat." 1671,_postprocess_flat_outputs,tensorflow/tensorflow/python/compiler/xla/xla.py,451,function,"Validates flat outputs and adds back device assignments. Args: outputs: Output from `computation` inside `xla.compile`. Returns: Tensors and Operations extracted from outputs." 1672,_postprocess_non_flat_outputs,tensorflow/tensorflow/python/compiler/xla/xla.py,503,function,"Validates non-flat outputs and adds back device assignments. Args: outputs: Output from `computation` inside `xla.compile`. Returns: Tensors extracted from outputs and an empty list because Operations are not allowed in non-flat outputs.." 1673,_disable_summary_context,tensorflow/tensorflow/python/compiler/xla/xla.py,539,function,"Enters a context where all summary ops are skipped. Summaries are not yet supported in xla.compile(). So we provide this context manager that can skip creating summary ops. This is a temporary workaround due to XLA not supporting summary ops. Yields: None." 1674,_CapturedObject,tensorflow/tensorflow/python/compiler/xla/xla.py,558,class,A placeholder to capture an object. 1675,_get_scaffold,tensorflow/tensorflow/python/compiler/xla/xla.py,576,function,Retrieves the Scaffold from `captured_scaffold_fn`. 1676,check_function_argument_count,tensorflow/tensorflow/python/compiler/xla/xla.py,591,function,"Validate the number of input arguments to an XLA function. Args: func: the Python function that will be called to generate the body of an XLA computation graph. input_arity: the number of explicit arguments supplied by the caller. infeed_queue: if not None, the infeed queue that will supply additional arguments to the function. Returns: None if function can be called with the supplied number of arguments, or an error string if it cannot." 1677,XLACompileContextTest,tensorflow/tensorflow/python/compiler/xla/xla_test.py,47,class, 1678,XlaCompileTest,tensorflow/tensorflow/python/compiler/xla/xla_test.py,217,class, 1679,CheckFunctionArgumentCountTest,tensorflow/tensorflow/python/compiler/xla/xla_test.py,260,class, 1680,BatchBenchmark,tensorflow/tensorflow/python/data/benchmarks/batch_benchmark.py,27,class,Benchmarks for `tf.data.Dataset.batch()`. 1681,DatasetBenchmarkBase,tensorflow/tensorflow/python/data/benchmarks/benchmark_base.py,31,class,Base class for dataset benchmarks. 1682,FilterBenchmark,tensorflow/tensorflow/python/data/benchmarks/filter_benchmark.py,26,class,Benchmarks for `tf.data.Dataset.filter()`. 1683,SingleThreadedFlatMapDataset,tensorflow/tensorflow/python/data/benchmarks/from_tensor_slices_benchmark.py,30,class,A `Dataset` that maps a function over its input and flattens the result. 1684,FromTensorSlicesBenchmark,tensorflow/tensorflow/python/data/benchmarks/from_tensor_slices_benchmark.py,62,class,Benchmarks for `tf.data.Dataset.from_tensor_slices()`. 1685,ListFilesBenchmark,tensorflow/tensorflow/python/data/benchmarks/list_files_benchmark.py,35,class,Benchmarks for `tf.data.Dataset.list_files()`. 1686,MapBenchmark,tensorflow/tensorflow/python/data/benchmarks/map_benchmark.py,32,class,Benchmarks for `tf.data.Dataset.map()`. 1687,MetaBenchmark,tensorflow/tensorflow/python/data/benchmarks/meta_benchmark.py,31,class,Benchmark that compares various ways of running tf.data benchmarks. 1688,PrefetchBenchmark,tensorflow/tensorflow/python/data/benchmarks/prefetch_benchmark.py,24,class,Benchmarks for `tf.data.Dataset.prefetch()`. 1689,RangeBenchmark,tensorflow/tensorflow/python/data/benchmarks/range_benchmark.py,24,class,Benchmarks for `tf.data.Dataset.range()`. 1690,AutotuneBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/autotune_benchmark.py,31,class,Benchmarks for autotuning performance knobs. 1691,ChooseFastestBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/choose_fastest_benchmark.py,31,class,Benchmarks for static optimizations. 1692,ChooseFastestBranchBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/choose_fastest_branch_benchmark.py,26,class,Benchmarks for ChooseFastestBranchDatast. 1693,CsvDatasetBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/csv_dataset_benchmark.py,38,class,Benchmarks for `tf.data.experimental.CsvDataset`. 1694,MapAndBatchBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/map_and_batch_benchmark.py,40,class,Benchmarks for `tf.data.experimental.map_and_batch()`. 1695,MapDefunBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py,34,class,Benchmarks for MapDefunOp. 1696,_generate_csv_test_case,tensorflow/tensorflow/python/data/experimental/benchmarks/map_vectorization_benchmark.py,37,function,Generates a `decode_csv()` test case. 1697,_generate_parse_single_example_test_case,tensorflow/tensorflow/python/data/experimental/benchmarks/map_vectorization_benchmark.py,57,function,Generates a `parse_single_example()` test case. 1698,MapVectorizationBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/map_vectorization_benchmark.py,97,class,Benchmarks for the `MapVectorization` optimization. 1699,MatchingFilesBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/matching_files_benchmark.py,35,class,Benchmark for the experimental `MatchingFilesDataset`. 1700,OptimizationBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/optimize_benchmark.py,32,class,Benchmarks for static optimizations. 1701,_make_fake_dataset_fn,tensorflow/tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py,36,function,"Returns a dataset that emulates a remote storage data source. Returns a dataset factory which creates a dataset with 100 elements that emulates the performance characteristic of a file-based dataset stored in a remote storage. In particular, the first element will take an order of magnitude longer to produce than the remaining elements (100ms vs. 1ms). Args: initial_delay_us: How long to wait before producing the first element. remainder_delay_us: How long to wait before producing subsequent elements." 1702,ParallelInterleaveBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py,68,class,Benchmarks for `tf.data.experimental.parallel_interleave()`. 1703,_time_resampling,tensorflow/tensorflow/python/data/experimental/benchmarks/rejection_resample_benchmark.py,31,function, 1704,RejectionResampleBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/rejection_resample_benchmark.py,56,class,Benchmarks for `tf.data.experimental.rejection_resample()`. 1705,SnapshotDatasetBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/snapshot_dataset_benchmark.py,34,class,Benchmarks for `tf.data.experimental.snapshot()`. 1706,UnbatchBenchmark,tensorflow/tensorflow/python/data/experimental/benchmarks/unbatch_benchmark.py,32,class,Benchmarks for `tf.data.Dataset.unbatch()`. 1707,AssertCardinalityTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/assert_cardinality_test.py,30,class,Tests for `tf.data.experimental.assert_cardinality()`. 1708,AssertNextTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/assert_next_test.py,30,class, 1709,chunk,tensorflow/tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py,46,function, 1710,AutoShardDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py,51,class, 1711,AutoShardTextLineDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py,509,class, 1712,_element_length_fn,tensorflow/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py,37,function, 1713,_to_sparse_tensor,tensorflow/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py,42,function, 1714,_format_record,tensorflow/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py,46,function, 1715,_get_record_type,tensorflow/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py,56,function, 1716,_get_record_shape,tensorflow/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py,66,function, 1717,BucketBySequenceLengthTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py,76,class, 1718,_test_objects,tensorflow/tensorflow/python/data/experimental/kernel_tests/compression_ops_test.py,31,function, 1719,CompressionOpsTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/compression_ops_test.py,53,class, 1720,CopyToDeviceTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/copy_to_device_test.py,40,class, 1721,CounterTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/counter_test.py,30,class, 1722,CsvDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/csv_dataset_test.py,40,class, 1723,_make_scalar_ds,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,38,function,Create a test dataset with scalar elements. 1724,_make_vector_ds,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,43,function,Create a test dataset with vector elements (of varying size). 1725,_make_matrix_ds1,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,48,function,Create a test dataset with matrix elements (of varying size). 1726,_make_matrix_ds2,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,53,function,Create a test dataset with matrix elements (of varying size). 1727,_make_matrix_ds_fully_defined,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,58,function,Create a test dataset with matrix elements (of varying size). 1728,_make_5dtensor_ds,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,63,function,Create a test dataset with matrix elements (of varying size). 1729,_make_ragged_ds,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,69,function,Create a test dataset with RaggedTensor elements (of varying size). 1730,_make_dict_ds,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,76,function,Create a test set with various element shapes. 1731,_make_tuple_ds,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,89,function,Create a test set with various element shapes. 1732,_to_list,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,98,function, 1733,RaggedBatchTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_ragged_batch_test.py,102,class, 1734,DenseToSparseBatchTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/dense_to_sparse_batch_test.py,32,class, 1735,DirectedInterleaveDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/directed_interleave_dataset_test.py,34,class, 1736,GetSingleElementTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/get_single_element_test.py,34,class, 1737,GroupByReducerTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py,37,class, 1738,GroupByWindowTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/group_by_window_test.py,41,class, 1739,IgnoreErrorsTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/ignore_errors_test.py,40,class, 1740,IOTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/io_test.py,32,class, 1741,MakeBatchedFeaturesDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/make_batched_features_dataset_test.py,38,class, 1742,MakeCsvDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/make_csv_dataset_test.py,38,class, 1743,MakeTFRecordDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/make_tf_record_dataset_test.py,33,class, 1744,MapAndBatchTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py,43,class, 1745,_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py,44,function, 1746,MapDefunTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py,48,class, 1747,MatchingFilesDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/matching_files_test.py,34,class, 1748,ModelDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/model_dataset_test.py,30,class, 1749,NonSerializableTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/non_serializable_test.py,29,class, 1750,_captured_refvar_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimize_dataset_test.py,44,function, 1751,OptimizeDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimize_dataset_test.py,106,class, 1752,OverrideThreadpoolTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/override_threadpool_test.py,38,class, 1753,ParallelInterleaveTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/parallel_interleave_test.py,42,class, 1754,ParseExampleDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/parse_example_dataset_test.py,54,class, 1755,PrefetchToDeviceTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/prefetch_to_device_test.py,37,class, 1756,PrefetchWithSlackTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/prefetch_with_slack_test.py,33,class, 1757,RandomDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/random_dataset_test.py,29,class, 1758,FixedLengthRecordDatasetTestBase,tensorflow/tensorflow/python/data/experimental/kernel_tests/reader_dataset_ops_test_base.py,36,class,Base class for setting up and testing FixedLengthRecordDataset. 1759,MakeBatchedFeaturesDatasetTestBase,tensorflow/tensorflow/python/data/experimental/kernel_tests/reader_dataset_ops_test_base.py,63,class,Base class for setting up and testing `make_batched_features_dataset`. 1760,TextLineDatasetTestBase,tensorflow/tensorflow/python/data/experimental/kernel_tests/reader_dataset_ops_test_base.py,271,class,Base class for setting up and testing TextLineDataset. 1761,TFRecordDatasetTestBase,tensorflow/tensorflow/python/data/experimental/kernel_tests/reader_dataset_ops_test_base.py,311,class,Base class for setting up and testing TFRecordDataset. 1762,BatchSizesForWorkerTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py,35,class, 1763,_flat_shapes,tensorflow/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py,113,function, 1764,RebatchDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py,120,class, 1765,LegacyRebatchDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py,325,class, 1766,ComputeBatchSizeTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py,500,class, 1767,RejectionResampleTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/rejection_resample_test.py,36,class, 1768,LocalReplicateTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/replicate_test.py,44,class, 1769,_get_server_def,tensorflow/tensorflow/python/data/experimental/kernel_tests/replicate_test.py,225,function,Returns a server def with a single job + multiple tasks. 1770,EagerClusterReplicateTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/replicate_test.py,245,class, 1771,GraphClusterReplicateTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/replicate_test.py,327,class, 1772,ScanTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/scan_test.py,46,class, 1773,ShuffleAndRepeatTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/shuffle_and_repeat_test.py,32,class, 1774,SleepTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/sleep_test.py,32,class, 1775,SnapshotDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/snapshot_test.py,40,class, 1776,LegacySnapshotDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/snapshot_test.py,318,class, 1777,SqlDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/sql_dataset_test.py,32,class, 1778,SqlDatasetTestBase,tensorflow/tensorflow/python/data/experimental/kernel_tests/sql_dataset_test_base.py,30,class,Base class for setting up and testing SqlDataset. 1779,StatsDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/stats_dataset_ops_test.py,39,class, 1780,ThreadUtilizationStatsTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/stats_dataset_ops_test.py,334,class, 1781,FeatureStatsDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/stats_dataset_ops_test.py,399,class, 1782,StatsDatasetTestBase,tensorflow/tensorflow/python/data/experimental/kernel_tests/stats_dataset_test_base.py,37,class,Base class for testing statistics gathered in `StatsAggregator`. 1783,_events_from_file,tensorflow/tensorflow/python/data/experimental/kernel_tests/stats_dataset_test_base.py,311,function,"Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.Event protos in the event file." 1784,_events_from_logdir,tensorflow/tensorflow/python/data/experimental/kernel_tests/stats_dataset_test_base.py,329,function,"Returns all events in the single eventfile in logdir. Args: logdir: The directory in which the single event file is sought. Returns: A list of all tf.Event protos from the single event file. Raises: AssertionError: If logdir does not contain exactly one file." 1785,TakeWhileTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/take_while_test.py,34,class, 1786,TFRecordWriterTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/tf_record_writer_test.py,39,class, 1787,UniqueTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/unique_test.py,32,class, 1788,VariantTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/variant_test.py,28,class, 1789,WrapDatasetVariantTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/wrap_unwrap_test.py,31,class, 1790,ChooseFastestBranchDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_branch_dataset_test.py,34,class, 1791,ChooseFastestDatasetTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_dataset_test.py,31,class, 1792,_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/filter_fusion_test.py,34,function, 1793,FilterFusionTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/filter_fusion_test.py,62,class, 1794,FilterWithRandomUniformFusionTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/filter_with_random_uniform_fusion_test.py,30,class, 1795,GrapplerTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/grappler_test.py,37,class, 1796,_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py,38,function, 1797,HoistRandomUniformTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py,68,class, 1798,InjectPrefetchTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/inject_prefetch_test.py,29,class, 1799,LatencyAllEdgesTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py,31,class, 1800,MapAndBatchFusionTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_and_batch_fusion_test.py,29,class, 1801,_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py,34,function, 1802,MapAndFilterFusionTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py,77,class, 1803,_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_fusion_test.py,34,function, 1804,MapFusionTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_fusion_test.py,66,class, 1805,_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py,37,function, 1806,MapParallelizationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py,58,class, 1807,_generate_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,51,function, 1808,_unary_bitwise_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,60,function, 1809,_unary_logical_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,65,function, 1810,_unary_complex_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,70,function, 1811,_unary_real_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,81,function, 1812,_binary_bitwise_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,135,function, 1813,_binary_logical_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,144,function, 1814,_binary_real_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,150,function, 1815,MapVectorizationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py,192,class, 1816,_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py,34,function, 1817,NoopEliminationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py,90,class, 1818,ReorderDataDiscardingOpsTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/reorder_data_discarding_ops_test.py,29,class, 1819,ShuffleAndRepeatFusionTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py,30,class, 1820,AssertCardinalityDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/assert_cardinality_dataset_serialization_test.py,30,class, 1821,AutoShardDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/auto_shard_dataset_serialization_test.py,36,class, 1822,BatchDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/batch_dataset_serialization_test.py,33,class, 1823,CacheDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/cache_dataset_serialization_test.py,32,class, 1824,_test_combinations,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/checkpoint_input_pipeline_hook_test.py,40,function, 1825,CheckpointInputPipelineHookTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/checkpoint_input_pipeline_hook_test.py,44,class, 1826,ChooseFastestBranchDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_branch_dataset_serialization_test.py,33,class, 1827,ChooseFastestDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_dataset_serialization_test.py,30,class, 1828,ConcatenateDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/concatenate_dataset_serialization_test.py,30,class, 1829,CsvDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/csv_dataset_serialization_test.py,32,class, 1830,FromTensorsSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/dataset_constructor_serialization_test.py,31,class, 1831,FromTensorSlicesSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/dataset_constructor_serialization_test.py,49,class, 1832,FromSparseTensorSlicesSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/dataset_constructor_serialization_test.py,71,class, 1833,remove_variants,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py,41,function,"Remove variants from a nest structure, so sess.run will execute." 1834,DatasetSerializationTestBase,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py,55,class,Base class for testing serializable datasets. 1835,FilterDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/filter_dataset_serialization_test.py,31,class, 1836,FixedLengthRecordDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/fixed_length_record_dataset_serialization_test.py,30,class, 1837,FlatMapDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/flat_map_dataset_serialization_test.py,38,class, 1838,GroupByReducerSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/group_by_reducer_serialization_test.py,31,class, 1839,GroupByWindowSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/group_by_window_serialization_test.py,31,class, 1840,IgnoreErrorsSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/ignore_errors_serialization_test.py,31,class, 1841,InterleaveDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/interleave_dataset_serialization_test.py,32,class, 1842,MapAndBatchDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/map_and_batch_dataset_serialization_test.py,34,class, 1843,MapDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/map_dataset_serialization_test.py,38,class, 1844,MatchingFilesDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/matching_files_dataset_serialization_test.py,33,class, 1845,OptimizeDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/optimize_dataset_serialization_test.py,30,class, 1846,PaddedBatchDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/padded_batch_dataset_serialization_test.py,32,class, 1847,ParallelInterleaveDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/parallel_interleave_dataset_serialization_test.py,33,class, 1848,ParallelMapDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/parallel_map_dataset_serialization_test.py,37,class, 1849,ParseExampleDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/parse_example_dataset_serialization_test.py,29,class, 1850,PrefetchDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/prefetch_dataset_serialization_test.py,29,class, 1851,RangeDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/range_dataset_serialization_test.py,38,class, 1852,LegacyRebatchDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/rebatch_dataset_serialization_test.py,30,class, 1853,RebatchDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/rebatch_dataset_serialization_test.py,46,class, 1854,SampleFromDatasetsSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/sample_from_datasets_serialization_test.py,30,class, 1855,ScanDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/scan_dataset_serialization_test.py,30,class, 1856,SkipDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py,30,class, 1857,TakeDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py,61,class, 1858,RepeatDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py,91,class, 1859,SerializationIntegrationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/serialization_integration_test.py,33,class, 1860,ShardDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/shard_dataset_serialization_test.py,29,class, 1861,ShuffleAndRepeatSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/shuffle_and_repeat_dataset_serialization_test.py,30,class, 1862,ShuffleDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/shuffle_dataset_serialization_test.py,32,class, 1863,SnapshotDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/snapshot_dataset_serialization_test.py,33,class, 1864,LegacySnapshotDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/snapshot_dataset_serialization_test.py,124,class, 1865,SqlDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/sql_dataset_serialization_test.py,34,class, 1866,StatsDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/stats_dataset_serialization_test.py,36,class, 1867,TakeWhileDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/take_while_dataset_serialization_test.py,30,class, 1868,TextLineDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py,30,class, 1869,TFRecordDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/tf_record_dataset_serialization_test.py,34,class, 1870,UnbatchDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/unbatch_dataset_serialization_test.py,30,class, 1871,UniqueDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/unique_dataset_serialization_test.py,30,class, 1872,ZipDatasetSerializationTest,tensorflow/tensorflow/python/data/experimental/kernel_tests/serialization/zip_dataset_serialization_test.py,30,class, 1873,dense_to_ragged_batch,tensorflow/tensorflow/python/data/experimental/ops/batching.py,36,function,"A transformation that batches ragged elements into `tf.RaggedTensor`s. This transformation combines multiple consecutive elements of the input dataset into a single element. Like `tf.data.Dataset.batch`, the components of the resulting element will have an additional outer dimension, which will be `batch_size` (or `N % batch_size` for the last element if `batch_size` does not divide the number of input elements `N` evenly and `drop_remainder` is `False`). If your program depends on the batches having the same outer dimension, you should set the `drop_remainder` argument to `True` to prevent the smaller batch from being produced. Unlike `tf.data.Dataset.batch`, the input elements to be batched may have different shapes: * If an input element is a `tf.Tensor` whose static `tf.TensorShape` is fully defined, then it is batched as normal. * If an input element is a `tf.Tensor` whose static `tf.TensorShape` contains one or more axes with unknown size (i.e., `shape[i]=None`), then the output will contain a `tf.RaggedTensor` that is ragged up to any of such dimensions. * If an input element is a `tf.RaggedTensor` or any other type, then it is batched as normal. Example: >>> dataset = tf.data.Dataset.from_tensor_slices(np.arange(6)) >>> dataset = dataset.map(lambda x: tf.range(x)) >>> dataset.element_spec.shape TensorShape([None]) >>> dataset = dataset.apply( ... tf.data.experimental.dense_to_ragged_batch(batch_size=2)) >>> for batch in dataset: ... print(batch) Args: batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements of this dataset to combine in a single batch. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether the last batch should be dropped in the case it has fewer than `batch_size` elements; the default behavior is not to drop the smaller batch. row_splits_dtype: The dtype that should be used for the `row_splits` of any new ragged tensors. Existing `tf.RaggedTensor` elements do not have their row_splits dtype changed. Returns: Dataset: A `Dataset`." 1874,dense_to_sparse_batch,tensorflow/tensorflow/python/data/experimental/ops/batching.py,102,function,"A transformation that batches ragged elements into `tf.sparse.SparseTensor`s. Like `Dataset.padded_batch()`, this transformation combines multiple consecutive elements of the dataset, which might have different shapes, into a single element. The resulting element has three components (`indices`, `values`, and `dense_shape`), which comprise a `tf.sparse.SparseTensor` that represents the same data. The `row_shape` represents the dense shape of each row in the resulting `tf.sparse.SparseTensor`, to which the effective batch size is prepended. For example: ```python # NOTE: The following examples use `{ ... }` to represent the # contents of a dataset. a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] } a.apply(tf.data.experimental.dense_to_sparse_batch( batch_size=2, row_shape=[6])) == { ([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices ['a', 'b', 'c', 'a', 'b'], # values [2, 6]), # dense_shape ([[0, 0], [0, 1], [0, 2], [0, 3]], ['a', 'b', 'c', 'd'], [1, 6]) } ``` Args: batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements of this dataset to combine in a single batch. row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object representing the equivalent dense shape of a row in the resulting `tf.sparse.SparseTensor`. Each element of this dataset must have the same rank as `row_shape`, and must have size less than or equal to `row_shape` in each dimension. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1875,map_and_batch_with_legacy_function,tensorflow/tensorflow/python/data/experimental/ops/batching.py,153,function,"Fused implementation of `map` and `batch`. NOTE: This is an escape hatch for existing uses of `map_and_batch` that do not work with V2 functions. New uses are strongly discouraged and existing uses should migrate to `map_and_batch` as this method will not be removed in V2. Args: map_func: A function mapping a nested structure of tensors to another nested structure of tensors. batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements of this dataset to combine in a single batch. num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the number of batches to create in parallel. On one hand, higher values can help mitigate the effect of stragglers. On the other hand, higher values can increase contention if CPU is scarce. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether the last batch should be dropped in case its size is smaller than desired; the default behavior is not to drop the smaller batch. num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, representing the number of elements to process in parallel. If not specified, `batch_size * num_parallel_batches` elements will be processed in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then the number of parallel calls is set dynamically based on available CPU. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. Raises: ValueError: If both `num_parallel_batches` and `num_parallel_calls` are specified." 1876,map_and_batch,tensorflow/tensorflow/python/data/experimental/ops/batching.py,213,function,"Fused implementation of `map` and `batch`. Maps `map_func` across `batch_size` consecutive elements of this dataset and then combines them into a batch. Functionally, it is equivalent to `map` followed by `batch`. This API is temporary and deprecated since input pipeline optimization now fuses consecutive `map` and `batch` operations automatically. Args: map_func: A function mapping a nested structure of tensors to another nested structure of tensors. batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements of this dataset to combine in a single batch. num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the number of batches to create in parallel. On one hand, higher values can help mitigate the effect of stragglers. On the other hand, higher values can increase contention if CPU is scarce. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether the last batch should be dropped in case its size is smaller than desired; the default behavior is not to drop the smaller batch. num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, representing the number of elements to process in parallel. If not specified, `batch_size * num_parallel_batches` elements will be processed in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then the number of parallel calls is set dynamically based on available CPU. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. Raises: ValueError: If both `num_parallel_batches` and `num_parallel_calls` are specified." 1877,unbatch,tensorflow/tensorflow/python/data/experimental/ops/batching.py,269,function,"Splits elements of a dataset into multiple elements on the batch dimension. For example, if elements of the dataset are shaped `[B, a0, a1, ...]`, where `B` may vary for each input element, then for each element in the dataset, the unbatched dataset will contain `B` consecutive elements of shape `[a0, a1, ...]`. ```python # NOTE: The following example uses `{ ... }` to represent the contents # of a dataset. a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] } a.unbatch() == { 'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'} ``` Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1878,_DenseToSparseBatchDataset,tensorflow/tensorflow/python/data/experimental/ops/batching.py,297,class,A `Dataset` that batches ragged dense elements into `tf.sparse.SparseTensor`s. 1879,_MapAndBatchDataset,tensorflow/tensorflow/python/data/experimental/ops/batching.py,327,class,A `Dataset` that maps a function over a batch of elements. 1880,_DenseToRaggedDataset,tensorflow/tensorflow/python/data/experimental/ops/batching.py,380,class,"A `Dataset` that encodes dense inputs as ragged (w/ ragged_rank=0). In particular: * Any tf.Tensor elements with rank>0 are encoded as ragged tensors with ragged_rank=0. This allows tensors with varying shape to be batched together. * Any other elements are left as-is." 1881,cardinality,tensorflow/tensorflow/python/data/experimental/ops/cardinality.py,38,function,"Returns the cardinality of `dataset`, if known. The operation returns the cardinality of `dataset`. The operation may return `tf.data.experimental.INFINITE_CARDINALITY` if `dataset` contains an infinite number of elements or `tf.data.experimental.UNKNOWN_CARDINALITY` if the analysis fails to determine the number of elements in `dataset` (e.g. when the dataset source is a file). >>> dataset = tf.data.Dataset.range(42) >>> print(tf.data.experimental.cardinality(dataset).numpy()) 42 >>> dataset = dataset.repeat() >>> cardinality = tf.data.experimental.cardinality(dataset) >>> print((cardinality == tf.data.experimental.INFINITE_CARDINALITY).numpy()) True >>> dataset = dataset.filter(lambda x: True) >>> cardinality = tf.data.experimental.cardinality(dataset) >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy()) True Args: dataset: A `tf.data.Dataset` for which to determine cardinality. Returns: A scalar `tf.int64` `Tensor` representing the cardinality of `dataset`. If the cardinality is infinite or unknown, the operation returns the named constant `INFINITE_CARDINALITY` and `UNKNOWN_CARDINALITY` respectively." 1882,assert_cardinality,tensorflow/tensorflow/python/data/experimental/ops/cardinality.py,72,function,"Asserts the cardinality of the input dataset. NOTE: The following assumes that ""examples.tfrecord"" contains 42 records. >>> dataset = tf.data.TFRecordDataset(""examples.tfrecord"") >>> cardinality = tf.data.experimental.cardinality(dataset) >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy()) True >>> dataset = dataset.apply(tf.data.experimental.assert_cardinality(42)) >>> print(tf.data.experimental.cardinality(dataset).numpy()) 42 Args: expected_cardinality: The expected cardinality of the input dataset. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. Raises: FailedPreconditionError: The assertion is checked at runtime (when iterating the dataset) and an error is raised if the actual and expected cardinality differ." 1883,_AssertCardinalityDataset,tensorflow/tensorflow/python/data/experimental/ops/cardinality.py,103,class,A `Dataset` that assert the cardinality of its input. 1884,compress,tensorflow/tensorflow/python/data/experimental/ops/compression_ops.py,24,function,"Compress a dataset element. Args: element: A nested structure of types supported by Tensorflow. Returns: A variant tensor representing the compressed element. This variant can be passed to `uncompress` to get back the original element." 1885,uncompress,tensorflow/tensorflow/python/data/experimental/ops/compression_ops.py,39,function,"Uncompress a compressed dataset element. Args: element: A scalar variant tensor to uncompress. The element should have been created by calling `compress`. output_spec: A nested structure of `tf.TypeSpec` representing the type(s) of the uncompressed element. Returns: The uncompressed element." 1886,CounterV2,tensorflow/tensorflow/python/data/experimental/ops/counter.py,29,function,"Creates a `Dataset` that counts from `start` in steps of size `step`. For example: ```python Dataset.count() == [0, 1, 2, ...) Dataset.count(2) == [2, 3, ...) Dataset.count(2, 5) == [2, 7, 12, ...) Dataset.count(0, -1) == [0, -1, -2, ...) Dataset.count(10, -1) == [10, 9, ...) ``` Args: start: (Optional.) The starting value for the counter. Defaults to 0. step: (Optional.) The step size for the counter. Defaults to 1. dtype: (Optional.) The data type for counter elements. Defaults to `tf.int64`. Returns: A `Dataset` of scalar `dtype` elements." 1887,CounterV1,tensorflow/tensorflow/python/data/experimental/ops/counter.py,59,function, 1888,ProcessingMode,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,36,class, 1889,_DataServiceDatasetV2,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,49,class,A `Dataset` that reads elements from the tf.data service. 1890,_DataServiceDatasetV1,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,124,class,A `Dataset` that executes its input through the tf.data service. 1891,_parse_service,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,148,function,"Parses a tf.data service string into a (protocol, address) tuple. Args: service: A string in the format ""protocol://address"". Returns: The parsed (protocol, address) tuple" 1892,_from_dataset_id,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,175,function,"Creates a dataset which reads data from the tf.data service. This transformation is similar to `from_dataset_id`, but supports additional parameters which we do not yet want to add to the public Python API. Args: processing_mode: A string specifying the policy for how data should be processed by tf.data workers. Currently, the only supported value is ""parallel_epochs"". service: A string indicating how to connect to the tf.data service. The string should be in the format ""://
"", e.g. ""grpc://localhost:5000"". dataset_id: The id of the dataset to read from. This id is returned by `register_dataset` when the dataset is registered with the tf.data service. element_spec: A nested structure of `tf.TypeSpec`s representing the type of elements produced by the dataset. Use `tf.data.Dataset.element_spec` to see the element spec for a given dataset. job_name: (Optional.) The name of the job. This argument makes it possible for multiple datasets to share the same job. The default behavior is that the dataset creates anonymous, exclusively owned jobs. max_outstanding_requests: (Optional.) A limit on how many elements may be requested at the same time. You can use this option to control the amount of memory used, since `distribute` won't use more than `element_size` * `max_outstanding_requests` of memory. task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the dispatcher for task changes. Returns: A `tf.data.Dataset` which reads from the tf.data service." 1893,_distribute,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,248,function,"A transformation that moves dataset processing to the tf.data service. This transformation is similar to `distribute`, but supports additional parameters which we do not yet want to add to the public Python API. Args: processing_mode: A string specifying the policy for how data should be processed by tf.data workers. Currently, the only supported value is ""parallel_epochs"". service: A string indicating how to connect to the tf.data service. The string should be in the format ""://
"", e.g. ""grpc://localhost:5000"". job_name: (Optional.) The name of the job. This argument makes it possible for multiple datasets to share the same job. The default behavior is that the dataset creates anonymous, exclusively owned jobs. max_outstanding_requests: (Optional.) A limit on how many elements may be requested at the same time. You can use this option to control the amount of memory used, since `distribute` won't use more than `element_size` * `max_outstanding_requests` of memory. task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the dispatcher for task changes. Returns: Dataset: A `Dataset` of the elements produced by the data service." 1894,distribute,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,295,function,"A transformation that moves dataset processing to the tf.data service. When you iterate over a dataset containing the `distribute` transformation, the tf.data service creates a ""job"" which produces data for the dataset iteration. The `processing_mode` argument controls what data is produced by a tf.data service job. Currently, the only supported mode is ""parallel_epochs"". processing_mode=""parallel_epochs"" means that multiple tf.data workers will iterate through the dataset in parallel, each producing all elements of the dataset. For example, if the dataset contains {0, 1, 2}, every tf.data worker used for execution will produce {0, 1, 2}. If there are 3 workers, the job will produce the elements {0, 0, 0, 1, 1, 1, 2, 2, 2} (though not necessarily in that order). To account for this, it is recommended to randomly shuffle your dataset, so that different tf.data workers will iterate through the dataset in different orders. In the future, there will be additional processing modes. For example, a ""one_epoch"" mode which partitions the dataset across the tf.data workers, so that the consumers see each element of the dataset only once. ``` dataset = tf.data.Dataset.range(5) dataset = dataset.map(lambda x: x*x) dataset = dataset.apply( tf.data.experimental.service.distribute(""parallel_epochs"", ""grpc://dataservice:5000"")) dataset = dataset.map(lambda x: x+1) for element in dataset: print(element) # prints { 1, 2, 5, 10, 17 } ``` In the above example, the first two lines (before the call to `distribute`) will be executed on tf.data workers, and the elements provided over RPC. The remaining transformations (after the call to `distribute`) will be executed locally. The `job_name` argument allows jobs to be shared across multiple datasets. Instead of each dataset creating its own job, all datasets with the same `job_name` will consume from the same job. A new job will be created for each iteration of the dataset (with each repetition of `Dataset.repeat` counting as a new iteration). Suppose two training workers (in either a single client or multi-client setup) iterate over the below dataset, and there is a single tf.data worker: ``` range5_dataset = tf.data.Dataset.range(5) dataset = range5_dataset.apply(tf.data.experimental.service.distribute( ""parallel_epochs"", ""grpc://dataservice:5000"", job_name=""my_job_name"")) for iteration in range(3): print(list(dataset)) ``` The elements of each job will be split between the two processes, with elements being consumed by the processes on a first-come first-served basis. One possible result is that process 1 prints ``` [0, 2, 4] [0, 1, 3] [1] ``` and process 2 prints ``` [1, 3] [2, 4] [0, 2, 3, 4] ``` Job names must not be re-used across different training jobs within the lifetime of the tf.data service. In general, the tf.data service is expected to live for the duration of a single training job. To use the tf.data service with multiple training jobs, make sure to use different job names to avoid conflicts. For example, suppose a training job calls `distribute` with `job_name=""job""` and reads until end of input. If another independent job connects to the same tf.data service and tries to read from `job_name=""job""`, it will immediately receive end of input, without getting any data. **Keras and Distribution Strategies** The dataset produced by the `distribute` transformation can be passed to Keras' `Model.fit` or Distribution Strategy's `tf.distribute.Strategy.experimental_distribute_dataset` like any other `tf.data.Dataset`. We recommend setting a `job_name` on the call to `distribute` so that if there are multiple workers, they read data from the same job. Note that the autosharding normally performed by `experimental_distribute_dataset` will be disabled when setting a `job_name`, since sharing the job already results in splitting data across the workers. When using a shared job, data will be dynamically balanced across workers, so that they reach end of input about the same time. This results in better worker utilization than with autosharding, where each worker processes an independent set of files, and some workers may run out of data earlier than others. Args: processing_mode: A string specifying the policy for how data should be processed by tf.data workers. Currently, the only supported value is ""parallel_epochs"". service: A string indicating how to connect to the tf.data service. The string should be in the format ""protocol://address"", e.g. ""grpc://localhost:5000"". job_name: (Optional.) The name of the job. This argument makes it possible for multiple datasets to share the same job. The default behavior is that the dataset creates anonymous, exclusively owned jobs. max_outstanding_requests: (Optional.) A limit on how many elements may be requested at the same time. You can use this option to control the amount of memory used, since `distribute` won't use more than `element_size` * `max_outstanding_requests` of memory. Returns: Dataset: A `Dataset` of the elements produced by the data service." 1895,register_dataset,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,424,function,"Registers a dataset with the tf.data service. `register_dataset` registers a dataset with the tf.data service so that datasets can be created later with `tf.data.experimental.service.from_dataset_id`. This is useful when the dataset is registered by one process, then used in another process. When the same process is both registering and reading from the dataset, it is simpler to use `tf.data.experimental.service.distribute` instead. If the dataset is already registered with the tf.data service, `register_dataset` returns the already-registered dataset's id. >>> dispatcher = tf.data.experimental.service.DispatchServer(port=0) >>> dispatcher_address = dispatcher.target.split(""://"")[1] >>> worker = tf.data.experimental.service.WorkerServer( ... port=0, dispatcher_address=dispatcher_address) >>> dataset = tf.data.Dataset.range(10) >>> dataset_id = tf.data.experimental.service.register_dataset( ... dispatcher.target, dataset) >>> dataset = tf.data.experimental.service.from_dataset_id( ... processing_mode=""parallel_epochs"", ... service=dispatcher.target, ... dataset_id=dataset_id, ... element_spec=dataset.element_spec) >>> print(list(dataset.as_numpy_iterator())) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] Args: service: A string indicating how to connect to the tf.data service. The string should be in the format ""protocol://address"", e.g. ""grpc://localhost:5000"". dataset: A `tf.data.Dataset` to register with the tf.data service. Returns: A scalar int64 tensor of the registered dataset's id." 1896,from_dataset_id,tensorflow/tensorflow/python/data/experimental/ops/data_service_ops.py,491,function,"Creates a dataset which reads data from the tf.data service. This is useful when the dataset is registered by one process, then used in another process. When the same process is both registering and reading from the dataset, it is simpler to use `tf.data.experimental.service.distribute` instead. Before using `from_dataset_id`, the dataset must have been registered with the tf.data service using `tf.data.experimental.service.register_dataset`. `register_dataset` returns a dataset id for the registered dataset. That is the `dataset_id` which should be passed to `from_dataset_id`. The `element_spec` argument indicates the `tf.TypeSpec`s for the elements produced by the dataset. Currently `element_spec` must be explicitly specified, and match the dataset registered under `dataset_id`. `element_spec` defaults to `None` so that in the future we can support automatically discovering the `element_spec` by querying the tf.data service. `tf.data.experimental.service.distribute` is a convenience method which combines `register_dataset` and `from_dataset_id` into a dataset transformation. See the documentation for `tf.data.experimental.service.distribute` for more detail about how `from_dataset_id` works. >>> dispatcher = tf.data.experimental.service.DispatchServer(port=0) >>> dispatcher_address = dispatcher.target.split(""://"")[1] >>> worker = tf.data.experimental.service.WorkerServer( ... port=0, dispatcher_address=dispatcher_address) >>> dataset = tf.data.Dataset.range(10) >>> dataset_id = tf.data.experimental.service.register_dataset( ... dispatcher.target, dataset) >>> dataset = tf.data.experimental.service.from_dataset_id( ... processing_mode=""parallel_epochs"", ... service=dispatcher.target, ... dataset_id=dataset_id, ... element_spec=dataset.element_spec) >>> print(list(dataset.as_numpy_iterator())) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] Args: processing_mode: A string specifying the policy for how data should be processed by tf.data workers. Currently, the only supported value is ""parallel_epochs"". service: A string indicating how to connect to the tf.data service. The string should be in the format ""protocol://address"", e.g. ""grpc://localhost:5000"". dataset_id: The id of the dataset to read from. This id is returned by `register_dataset` when the dataset is registered with the tf.data service. element_spec: A nested structure of `tf.TypeSpec`s representing the type of elements produced by the dataset. Use `tf.data.Dataset.element_spec` to see the element spec for a given dataset. job_name: (Optional.) The name of the job. This argument makes it possible for multiple datasets to share the same job. The default behavior is that the dataset creates anonymous, exclusively owned jobs. max_outstanding_requests: (Optional.) A limit on how many elements may be requested at the same time. You can use this option to control the amount of memory used, since `distribute` won't use more than `element_size` * `max_outstanding_requests` of memory. Returns: A `tf.data.Dataset` which reads from the tf.data service." 1897,_AutoShardDataset,tensorflow/tensorflow/python/data/experimental/ops/distribute.py,34,class,"A `Dataset` that shards the `Dataset` automatically. This dataset takes in an existing dataset and tries to automatically figure out how to shard the dataset in a multi-worker scenario. Currently, it uses Grappler to walk up the dataset graph until it finds a reader dataset (e.g. CSVDataset, TFRecordDataset), then inserts a ShardDataset op before that node so that each worker only sees some files. Args: num_workers: Total number of workers to shard this dataset across. index: The current worker index (out of the total number of workers) this dataset is for. Raises: NotFoundError: If we cannot find a suitable reader dataset to begin automatically sharding the dataset." 1898,_AutoShardDatasetV1,tensorflow/tensorflow/python/data/experimental/ops/distribute.py,71,function, 1899,_RebatchDataset,tensorflow/tensorflow/python/data/experimental/ops/distribute.py,76,class,"A `Dataset` that rebatches elements from its input into new batch sizes. `_RebatchDataset(input_dataset, batch_sizes)` is functionally equivalent to `input_dataset.unbatch().batch(N)`, where the value of N cycles through the `batch_sizes` input list. The elements produced by this dataset have the same rank as the elements of the input dataset. For example: ```python ds = tf.data.Dataset.range(8) ds = ds.batch(4) ds = _RebatchDataset(ds, batch_sizes=[2, 1, 1]) for elem in ds: print(elem) >> [0, 1], [2], [3], [4, 5], [6], [7] ds = tf.data.Dataset.range(16) ds = ds.batch(4) ds = _RebatchDataset(ds, batch_sizes=[6]) for elem in ds: print(elem) >> [0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15] ```" 1900,_LegacyRebatchDataset,tensorflow/tensorflow/python/data/experimental/ops/distribute.py,209,class,"A `Dataset` that divides its input batches into `num_replicas` sub-batches. For each batch in the input dataset, _LegacyRebatchDataset will produce `num_replicas` smaller batches whose sizes add up to the original batch size. For example: ```python ds = tf.data.Dataset.range(8) ds = ds.batch(4) ds = _LegacyRebatchDataset(ds, num_replicas=3) for elem in ds: print(elem) >> [0, 1], [2, 3], [], [4, 5], [6, 7], [] ```" 1901,_RemoteDataset,tensorflow/tensorflow/python/data/experimental/ops/distribute.py,280,class,Creates a dataset on a given `device` given a graph def. 1902,replicate,tensorflow/tensorflow/python/data/experimental/ops/distribute.py,294,function,"A transformation that replicates `dataset` onto a list of devices. Args: dataset: A `tf.data.Dataset` object. devices: A list of devices to replicate the dataset on. Returns: A dictionary mapping device name to a dataset on that device." 1903,batch_sizes_for_worker,tensorflow/tensorflow/python/data/experimental/ops/distribute.py,328,function,"Determines how to rebatch a dataset for the given worker. Given the global batch size, number of workers, number of replicas per worker, and worker index, returns the correct batch sizes for rebatching a dataset on worker `worker_index` of `num_workers`, such that each global step (across all workers and replicas) will consume global_batch_size elements. The returned value should be passed as the `batch_sizes` input parameter to `tf.data.experimental.rebatch()`. The returned batch sizes meet the following constraints: Let G = global_batch_size, W = num_workers, R = num_replicas_per_worker (A) for any worker, len(batch_sizes) = W * R (B) for any worker, sum(batch_sizes) == G (C) for any global step (i.e. R iterations on each worker), the sum of batches consumed by replicas across all workers is G. (D) any two batch sizes of any two replicas differs by at most one. For example, suppose we have G = 7, W = 2, R = 2, and suppose we have two files which each contain 7 elements: ```python # WORKER 0 batch_sizes_0 = batch_sizes_for_worker(global_batch_size=global_batch_size, num_workers=2, num_replicas_per_worker=2, worker_index=0) print(batch_sizes_0) >> [2, 2, 2, 1] dataset_0 = tf.data.Dataset.from_tensor_slices([""file_a"", ""file_b""]) dataset_0 = dataset_0.shard(num_shards, index=0) dataset_0 = dataset_0.batch(7) dataset_0 = dataset_0.apply(tf.data.experimental.rebatch(batch_sizes_0)) for elem in dataset_0: print(elem) >> [[A0, A1], [A2, A3], [A4, A5], [A6]] # WORKER 1 batch_sizes_1 = batch_sizes_for_worker(global_batch_size=global_batch_size, num_workers=2, num_replicas_per_worker=2, worker_index=1) print(batch_sizes_1) >> [2, 1, 2, 2] dataset_1 = tf.data.Dataset.from_tensor_slices([""file_a"", ""file_b""]) dataset_1 = dataset_1.shard(num_shards, index=1) dataset_1 = dataset_1.batch(7) dataset_1 = dataset_1.apply(tf.data.experimental.rebatch(batch_sizes_1)) for elem in dataset_1: print(elem) >> [[B0, B1], [B2], [B3, B4], [B5, B6]] ``` The above example will produce the following elements: Step 1: Worker 0 Replica 0: [A0, A1] Worker 0 Replica 1: [A2, A3] Worker 1 Replica 0: [B0, B1] Worker 1 Replica 1: [B2] Total batch size = 7 Step 2: Worker 0 Replica 0: [A4, A5] Worker 0 Replica 1: [A6] Worker 1 Replica 0: [B3, B4] Worker 1 Replica 1: [B5, B6] Total batch size = 7 Args: global_batch_size: A `tf.int64` scalar, representing the global batch size. num_workers: An integer representing the number of workers the dataset will be distributed across. num_replicas_per_worker: An integer representing the number of replicas per worker. All workers are assumed to have the same number of replicas. worker_index: An integer index of the worker to be rebatched. Returns: A `tf.int64` vector, representing the batch sizes to rebatch the dataset into." 1904,compute_batch_size,tensorflow/tensorflow/python/data/experimental/ops/distribute.py,436,function,"An operation that returns the batch size of the dataset. This op tries to infer the batch size statically by walking up the dataset tree from the final dataset node and returning the batch size of the first batching dataset (such as from .batch() and .padded_batch()) that it encounters. This differs from using the `element_spec` of a dataset in that it does not account for partial batches. This operation may fail if it encounters contradictory batch sizes (for example, if the dataset is created by zipping together two datasets with different batch sizes), if there are no explicit batching transformations, or if there are operations downstream from the batching transformation that may modify its batch size. In these cases, it returns a -1. Args: dataset: A `tf.data.Dataset` object. Returns: A `tf.int64` Tensor representing the batch size of the dataset sans partial batches. If this cannot be inferred statically, the value of this tensor will be -1." 1905,AutoShardPolicy,tensorflow/tensorflow/python/data/experimental/ops/distribute_options.py,27,class,"Represents the type of auto-sharding we enable. Please see the DistributeOptions.auto_shard_policy documentation for more information on each type of autosharding." 1906,ExternalStatePolicy,tensorflow/tensorflow/python/data/experimental/ops/distribute_options.py,39,class, 1907,DistributeOptions,tensorflow/tensorflow/python/data/experimental/ops/distribute_options.py,46,class,"Represents options for distributed data processing. You can set the distribution options of a dataset through the `experimental_distribute` property of `tf.data.Options`; the property is an instance of `tf.data.experimental.DistributeOptions`. ```python options = tf.data.Options() options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF dataset = dataset.with_options(options) ```" 1908,enumerate_dataset,tensorflow/tensorflow/python/data/experimental/ops/enumerate_ops.py,26,function,"A transformation that enumerates the elements of a dataset. It is similar to python's `enumerate`. For example: ```python # NOTE: The following examples use `{ ... }` to represent the # contents of a dataset. a = { 1, 2, 3 } b = { (7, 8), (9, 10) } # The nested structure of the `datasets` argument determines the # structure of elements in the resulting dataset. a.apply(tf.data.experimental.enumerate_dataset(start=5)) => { (5, 1), (6, 2), (7, 3) } b.apply(tf.data.experimental.enumerate_dataset()) => { (0, (7, 8)), (1, (9, 10)) } ``` Args: start: A `tf.int64` scalar `tf.Tensor`, representing the start value for enumeration. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1909,ignore_errors,tensorflow/tensorflow/python/data/experimental/ops/error_ops.py,26,function,"Creates a `Dataset` from another `Dataset` and silently ignores any errors. Use this transformation to produce a dataset that contains the same elements as the input, but silently drops any elements that caused an error. For example: ```python dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.]) # Computing `tf.debugging.check_numerics(1. / 0.)` will raise an InvalidArgumentError. dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, ""error"")) # Using `ignore_errors()` will drop the element that causes an error. dataset = dataset.apply(tf.data.experimental.ignore_errors()) # ==> {1., 0.5, 0.2} ``` Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1910,_IgnoreErrorsDataset,tensorflow/tensorflow/python/data/experimental/ops/error_ops.py,56,class,A `Dataset` that silently ignores errors when computing its input. 1911,get_single_element,tensorflow/tensorflow/python/data/experimental/ops/get_single_element.py,27,function,"Returns the single element in `dataset` as a nested structure of tensors. This function enables you to use a `tf.data.Dataset` in a stateless ""tensor-in tensor-out"" expression, without creating an iterator. This can be useful when your preprocessing transformations are expressed as a `Dataset`, and you want to use the transformation at serving time. For example: ```python def preprocessing_fn(input_str): # ... return image, label input_batch = ... # input batch of BATCH_SIZE elements dataset = (tf.data.Dataset.from_tensor_slices(input_batch) .map(preprocessing_fn, num_parallel_calls=BATCH_SIZE) .batch(BATCH_SIZE)) image_batch, label_batch = tf.data.experimental.get_single_element(dataset) ``` Args: dataset: A `tf.data.Dataset` object containing a single element. Returns: A nested structure of `tf.Tensor` objects, corresponding to the single element of `dataset`. Raises: TypeError: if `dataset` is not a `tf.data.Dataset` object. InvalidArgumentError (at runtime): if `dataset` does not contain exactly one element." 1912,group_by_reducer,tensorflow/tensorflow/python/data/experimental/ops/grouping.py,38,function,"A transformation that groups elements and performs a reduction. This transformation maps element of a dataset to a key using `key_func` and groups the elements by key. The `reducer` is used to process each group; its `init_func` is used to initialize state for each group when it is created, the `reduce_func` is used to update the state every time an element is mapped to the matching group, and the `finalize_func` is used to map the final state to an output value. Args: key_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a scalar `tf.int64` tensor. reducer: An instance of `Reducer`, which captures the reduction logic using the `init_func`, `reduce_func`, and `finalize_func` functions. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1913,group_by_window,tensorflow/tensorflow/python/data/experimental/ops/grouping.py,68,function,"A transformation that groups windows of elements by key and reduces them. This transformation maps each consecutive element in a dataset to a key using `key_func` and groups the elements by key. It then applies `reduce_func` to at most `window_size_func(key)` elements matching the same key. All except the final window for each key will contain `window_size_func(key)` elements; the final window may be smaller. You may provide either a constant `window_size` or a window size determined by the key through `window_size_func`. Args: key_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a scalar `tf.int64` tensor. reduce_func: A function mapping a key and a dataset of up to `window_size` consecutive elements matching that key to another dataset. window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements matching the same key to combine in a single batch, which will be passed to `reduce_func`. Mutually exclusive with `window_size_func`. window_size_func: A function mapping a key to a `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements matching the same key to combine in a single batch, which will be passed to `reduce_func`. Mutually exclusive with `window_size`. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. Raises: ValueError: if neither or both of {`window_size`, `window_size_func`} are passed." 1914,bucket_by_sequence_length,tensorflow/tensorflow/python/data/experimental/ops/grouping.py,128,function,"A transformation that buckets elements in a `Dataset` by length. Elements of the `Dataset` are grouped together by length and then are padded and batched. This is useful for sequence tasks in which the elements have variable length. Grouping together elements that have similar lengths reduces the total fraction of padding in a batch which increases training step efficiency. Args: element_length_func: function from element in `Dataset` to `tf.int32`, determines the length of the element, which will determine the bucket it goes into. bucket_boundaries: `list`, upper length boundaries of the buckets. bucket_batch_sizes: `list`, batch size per bucket. Length should be `len(bucket_boundaries) + 1`. padded_shapes: Nested structure of `tf.TensorShape` to pass to `tf.data.Dataset.padded_batch`. If not provided, will use `dataset.output_shapes`, which will result in variable length dimensions being padded out to the maximum length in each batch. padding_values: Values to pad with, passed to `tf.data.Dataset.padded_batch`. Defaults to padding with 0. pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown size to maximum length in batch. If `True`, will pad dimensions with unknown size to bucket boundary minus 1 (i.e., the maximum length in each bucket), and caller must ensure that the source `Dataset` does not contain any elements with length longer than `max(bucket_boundaries)`. no_padding: `bool`, indicates whether to pad the batch features (features need to be either of type `tf.sparse.SparseTensor` or of same shape). drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether the last batch should be dropped in the case it has fewer than `batch_size` elements; the default behavior is not to drop the smaller batch. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. Raises: ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`." 1915,_GroupByReducerDataset,tensorflow/tensorflow/python/data/experimental/ops/grouping.py,247,class,A `Dataset` that groups its input and performs a reduction. 1916,_GroupByWindowDataset,tensorflow/tensorflow/python/data/experimental/ops/grouping.py,370,class,A `Dataset` that groups its input and performs a windowed reduction. 1917,Reducer,tensorflow/tensorflow/python/data/experimental/ops/grouping.py,443,class,"A reducer is used for reducing a set of elements. A reducer is represented as a tuple of the three functions: 1) initialization function: key => initial state 2) reduce function: (old state, input) => new state 3) finalization function: state => result" 1918,parallel_interleave,tensorflow/tensorflow/python/data/experimental/ops/interleave_ops.py,43,function,"A parallel version of the `Dataset.interleave()` transformation. `parallel_interleave()` maps `map_func` across its input to produce nested datasets, and outputs their elements interleaved. Unlike `tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested datasets in parallel, which increases the throughput, especially in the presence of stragglers. Furthermore, the `sloppy` argument can be used to improve performance, by relaxing the requirement that the outputs are produced in a deterministic order, and allowing the implementation to skip over nested datasets whose elements are not readily available when requested. Example usage: ```python # Preprocess 4 files concurrently. filenames = tf.data.Dataset.list_files(""/path/to/data/train*.tfrecords"") dataset = filenames.apply( tf.data.experimental.parallel_interleave( lambda filename: tf.data.TFRecordDataset(filename), cycle_length=4)) ``` WARNING: If `sloppy` is `True`, the order of produced elements is not deterministic. Args: map_func: A function mapping a nested structure of tensors to a `Dataset`. cycle_length: The number of input `Dataset`s to interleave from in parallel. block_length: The number of consecutive elements to pull from an input `Dataset` before advancing to the next input `Dataset`. sloppy: A boolean controlling whether determinism should be traded for performance by allowing elements to be produced out of order. If `sloppy` is `None`, the `tf.data.Options.experimental_deterministic` dataset option (`True` by default) is used to decide whether to enforce a deterministic order. buffer_output_elements: The number of elements each iterator being interleaved should buffer (similar to the `.prefetch()` transformation for each interleaved iterator). prefetch_input_elements: The number of input elements to transform to iterators before they are needed for interleaving. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1919,_DirectedInterleaveDataset,tensorflow/tensorflow/python/data/experimental/ops/interleave_ops.py,104,class,A substitute for `Dataset.interleave()` on a fixed list of datasets. 1920,sample_from_datasets_v2,tensorflow/tensorflow/python/data/experimental/ops/interleave_ops.py,146,function,"Samples elements at random from the datasets in `datasets`. Args: datasets: A list of `tf.data.Dataset` objects with compatible structure. weights: (Optional.) A list of `len(datasets)` floating-point values where `weights[i]` represents the probability with which an element should be sampled from `datasets[i]`, or a `tf.data.Dataset` object where each element is such a list. Defaults to a uniform distribution across `datasets`. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.random.set_seed` for behavior. Returns: A dataset that interleaves elements from `datasets` at random, according to `weights` if provided, otherwise with uniform probability. Raises: TypeError: If the `datasets` or `weights` arguments have the wrong type. ValueError: If the `weights` argument is specified and does not match the length of the `datasets` element." 1921,sample_from_datasets_v1,tensorflow/tensorflow/python/data/experimental/ops/interleave_ops.py,230,function, 1922,choose_from_datasets_v2,tensorflow/tensorflow/python/data/experimental/ops/interleave_ops.py,237,function,"Creates a dataset that deterministically chooses elements from `datasets`. For example, given the following datasets: ```python datasets = [tf.data.Dataset.from_tensors(""foo"").repeat(), tf.data.Dataset.from_tensors(""bar"").repeat(), tf.data.Dataset.from_tensors(""baz"").repeat()] # Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`. choice_dataset = tf.data.Dataset.range(3).repeat(3) result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset) ``` The elements of `result` will be: ``` ""foo"", ""bar"", ""baz"", ""foo"", ""bar"", ""baz"", ""foo"", ""bar"", ""baz"" ``` Args: datasets: A list of `tf.data.Dataset` objects with compatible structure. choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between `0` and `len(datasets) - 1`. Returns: A dataset that interleaves elements from `datasets` according to the values of `choice_dataset`. Raises: TypeError: If the `datasets` or `choice_dataset` arguments have the wrong type." 1923,choose_from_datasets_v1,tensorflow/tensorflow/python/data/experimental/ops/interleave_ops.py,280,function, 1924,save,tensorflow/tensorflow/python/data/experimental/ops/io.py,34,function,"Saves the content of the given dataset. Example usage: >>> import tempfile >>> path = os.path.join(tempfile.gettempdir(), ""saved_data"") >>> # Save a dataset >>> dataset = tf.data.Dataset.range(2) >>> tf.data.experimental.save(dataset, path) >>> new_dataset = tf.data.experimental.load(path, ... tf.TensorSpec(shape=(), dtype=tf.int64)) >>> for elem in new_dataset: ... print(elem) tf.Tensor(0, shape=(), dtype=int64) tf.Tensor(1, shape=(), dtype=int64) The saved dataset is saved in multiple file ""shards"". By default, the dataset output is divided to shards in a round-robin fashion but custom sharding can be specified via the `shard_func` function. For example, you can save the dataset to using a single shard as follows: ```python dataset = make_dataset() def custom_shard_func(element): return 0 dataset = tf.data.experimental.save( path=""/path/to/data"", ..., shard_func=custom_shard_func) ``` NOTE: The directory layout and file format used for saving the dataset is considered an implementation detail and may change. For this reason, datasets saved through `tf.data.experimental.save` should only be consumed through `tf.data.experimental.load`, which is guaranteed to be backwards compatible. Args: dataset: The dataset to save. path: Required. A directory to use for saving the dataset. compression: Optional. The algorithm to use to compress data when writing it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`. shard_func: Optional. A function to control the mapping of dataset elements to file shards. The function is expected to map elements of the input dataset to int64 shard IDs. If present, the function will be traced and executed as graph computation." 1925,_LoadDataset,tensorflow/tensorflow/python/data/experimental/ops/io.py,107,class,A dataset that loads previously saved dataset. 1926,load,tensorflow/tensorflow/python/data/experimental/ops/io.py,146,function,"Loads a previously saved dataset. Example usage: >>> import tempfile >>> path = os.path.join(tempfile.gettempdir(), ""saved_data"") >>> # Save a dataset >>> dataset = tf.data.Dataset.range(2) >>> tf.data.experimental.save(dataset, path) >>> new_dataset = tf.data.experimental.load(path, ... tf.TensorSpec(shape=(), dtype=tf.int64)) >>> for elem in new_dataset: ... print(elem) tf.Tensor(0, shape=(), dtype=int64) tf.Tensor(1, shape=(), dtype=int64) Note that to load a previously saved dataset, you need to specify `element_spec` -- a type signature of the elements of the saved dataset, which can be obtained via `tf.data.Dataset.element_spec`. This requirement exists so that shape inference of the loaded dataset does not need to perform I/O. If the default option of sharding the saved dataset was used, the element order of the saved dataset will be preserved when loading it. The `reader_func` argument can be used to specify a custom order in which elements should be loaded from the individual shards. The `reader_func` is expected to take a single argument -- a dataset of datasets, each containing elements of one of the shards -- and return a dataset of elements. For example, the order of shards can be shuffled when loading them as follows: ```python def custom_reader_func(datasets): datasets = datasets.shuffle(NUM_SHARDS) return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE) dataset = tf.data.experimental.load( path=""/path/to/data"", ..., reader_func=custom_reader_func) ``` Args: path: Required. A path pointing to a previously saved dataset. element_spec: Required. A nested structure of `tf.TypeSpec` objects matching the structure of an element of the saved dataset and specifying the type of individual element components. compression: Optional. The algorithm to use to decompress the data when reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`. reader_func: Optional. A function to control how to read data from shards. If present, the function will be traced and executed as graph computation. Returns: A `tf.data.Dataset` instance." 1927,_convert_external_state_policy_to_enum,tensorflow/tensorflow/python/data/experimental/ops/iterator_ops.py,32,function, 1928,make_saveable_from_iterator,tensorflow/tensorflow/python/data/experimental/ops/iterator_ops.py,49,function,"Returns a SaveableObject for saving/restoring iterator state using Saver. Args: iterator: Iterator. external_state_policy: A string that identifies how to handle input pipelines that depend on external state. Possible values are 'ignore': The external state is silently ignored. 'warn': The external state is ignored, logging a warning. 'fail': The operation fails upon encountering external state. By default we set it to 'fail'. Returns: A SaveableObject for saving/restoring iterator state using Saver. Raises: ValueError: If iterator does not support checkpointing. ValueError: If `external_state_policy` is not one of 'warn', 'ignore' or 'fail'. For example: ```python with tf.Graph().as_default(): ds = tf.data.Dataset.range(10) iterator = ds.make_initializable_iterator() # Build the iterator SaveableObject. saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator) # Add the SaveableObject to the SAVEABLE_OBJECTS collection so # it can be automatically saved using Saver. tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj) saver = tf.compat.v1.train.Saver() while continue_training: ... Perform training ... if should_save_checkpoint: saver.save() ``` Note: When restoring the iterator, the existing iterator state is completely discarded. This means that any changes you may have made to the Dataset graph will be discarded as well! This includes the new Dataset graph that you may have built during validation. So, while running validation, make sure to run the initializer for the validation input pipeline after restoring the checkpoint. Note: Not all iterators support checkpointing yet. Attempting to save the state of an unsupported iterator will throw an error." 1929,CheckpointInputPipelineHook,tensorflow/tensorflow/python/data/experimental/ops/iterator_ops.py,106,class,"Checkpoints input pipeline state every N steps or seconds. This hook saves the state of the iterators in the `Graph` so that when training is resumed the input pipeline continues from where it left off. This could potentially avoid overfitting in certain pipelines where the number of training steps per eval are small compared to the dataset size or if the training pipeline is pre-empted. Differences from `CheckpointSaverHook`: 1. Saves only the input pipelines in the ""iterators"" collection and not the global variables or other saveable objects. 2. Does not write the `GraphDef` and `MetaGraphDef` to the summary. Example of checkpointing the training pipeline: ```python est = tf.estimator.Estimator(model_fn) while True: est.train( train_input_fn, hooks=[tf.data.experimental.CheckpointInputPipelineHook(est)], steps=train_steps_per_eval) # Note: We do not pass the hook here. metrics = est.evaluate(eval_input_fn) if should_stop_the_training(metrics): break ``` This hook should be used if the input pipeline state needs to be saved separate from the model checkpoint. Doing so may be useful for a few reasons: 1. The input pipeline checkpoint may be large, if there are large shuffle or prefetch buffers for instance, and may bloat the checkpoint size. 2. If the input pipeline is shared between training and validation, restoring the checkpoint during validation may override the validation input pipeline. For saving the input pipeline checkpoint alongside the model weights use `tf.data.experimental.make_saveable_from_iterator` directly to create a `SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however, that you will need to be careful not to restore the training iterator during eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS collector when building the eval graph." 1930,_CustomSaver,tensorflow/tensorflow/python/data/experimental/ops/iterator_ops.py,297,class,"`Saver` with a different default `latest_filename`. This is used in the `CheckpointInputPipelineHook` to avoid conflicts with the model ckpt saved by the `CheckpointSaverHook`." 1931,map_defun,tensorflow/tensorflow/python/data/experimental/ops/map_defun.py,26,function,"Map a function on the list of tensors unpacked from `elems` on dimension 0. Args: fn: A function (`function.defun`) that takes a list of tensors and returns another list of tensors. The output list has the same types as output_dtypes. The elements of the output list have the same dimension 0 as `elems`, and the remaining dimensions correspond to those of `fn_output_shapes`. elems: A list of tensors. output_dtypes: A list of dtypes corresponding to the output types of the function. output_shapes: A list of `TensorShape`s corresponding to the output shapes from each invocation of the function on slices of inputs. max_intra_op_parallelism: An integer. If positive, sets the max parallelism limit of each function call to this. Raises: ValueError: if any of the inputs are malformed. Returns: A list of `Tensor` objects with the same types as `output_dtypes`." 1932,MatchingFilesDataset,tensorflow/tensorflow/python/data/experimental/ops/matching_files.py,28,class,A `Dataset` that list the files according to the input patterns. 1933,model,tensorflow/tensorflow/python/data/experimental/ops/optimization.py,24,function,"A transformation that models performance. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1934,optimize,tensorflow/tensorflow/python/data/experimental/ops/optimization.py,39,function,"A transformation that applies optimizations. Args: optimizations: (Optional.) A `tf.string` vector `tf.Tensor` identifying optimizations to use. If not specified, the default set of optimizations is applied. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1935,_ChooseFastestDataset,tensorflow/tensorflow/python/data/experimental/ops/optimization.py,59,class,A `Dataset` that merges two input datasets. 1936,_ChooseFastestBranchDataset,tensorflow/tensorflow/python/data/experimental/ops/optimization.py,106,class,A `Dataset` that merges two input datasets. 1937,_AutotuneAlgorithm,tensorflow/tensorflow/python/data/experimental/ops/optimization_options.py,29,class,Controls what algorithm is used in the autotune implementation. 1938,MapVectorizationOptions,tensorflow/tensorflow/python/data/experimental/ops/optimization_options.py,36,class,Represents options for the MapVectorization optimization. 1939,OptimizationOptions,tensorflow/tensorflow/python/data/experimental/ops/optimization_options.py,70,class,"Represents options for dataset optimizations. You can set the optimization options of a dataset through the `experimental_optimization` property of `tf.data.Options`; the property is an instance of `tf.data.experimental.OptimizationOptions`. ```python options = tf.data.Options() options.experimental_optimization.noop_elimination = True options.experimental_optimization.map_vectorization.enabled = True options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) ```" 1940,_ParseExampleDataset,tensorflow/tensorflow/python/data/experimental/ops/parsing_ops.py,31,class,A `Dataset` that parses `example` dataset into a `dict` dataset. 1941,parse_example_dataset,tensorflow/tensorflow/python/data/experimental/ops/parsing_ops.py,110,function,"A transformation that parses `Example` protos into a `dict` of tensors. Parses a number of serialized `Example` protos given in `serialized`. We refer to `serialized` as a batch with `batch_size` many entries of individual `Example` protos. This op parses serialized examples into a dictionary mapping keys to `Tensor`, `SparseTensor`, and `RaggedTensor` objects. `features` is a dict from keys to `VarLenFeature`, `RaggedFeature`, `SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` and `SparseFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more details about feature dictionaries. Args: features: A `dict` mapping feature keys to `FixedLenFeature`, `VarLenFeature`, `RaggedFeature`, and `SparseFeature` values. num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, representing the number of parsing processes to call in parallel. deterministic: (Optional.) A boolean controlling whether determinism should be traded for performance by allowing elements to be produced out of order if some parsing calls complete faster than others. If `deterministic` is `None`, the `tf.data.Options.experimental_deterministic` dataset option (`True` by default) is used to decide whether to produce elements deterministically. Returns: A dataset transformation function, which can be passed to `tf.data.Dataset.apply`. Raises: ValueError: if features argument is None." 1942,prefetch_to_device,tensorflow/tensorflow/python/data/experimental/ops/prefetching_ops.py,37,function,"A transformation that prefetches dataset values to the given `device`. NOTE: Although the transformation creates a `tf.data.Dataset`, the transformation must be the final `Dataset` in the input pipeline. Args: device: A string. The name of a device to which elements will be prefetched. buffer_size: (Optional.) The number of elements to buffer on `device`. Defaults to an automatically chosen value. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1943,copy_to_device,tensorflow/tensorflow/python/data/experimental/ops/prefetching_ops.py,60,function,"A transformation that copies dataset elements to the given `target_device`. Args: target_device: The name of a device to which elements will be copied. source_device: The original device on which `input_dataset` will be placed. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1944,_CopyToDeviceDataset,tensorflow/tensorflow/python/data/experimental/ops/prefetching_ops.py,86,class,A `Dataset` that copies elements to another device. 1945,_MapOnGpuDataset,tensorflow/tensorflow/python/data/experimental/ops/prefetching_ops.py,228,class,A `Dataset` that maps a function over elements in its using a GPU. 1946,map_on_gpu,tensorflow/tensorflow/python/data/experimental/ops/prefetching_ops.py,260,function,"Maps `map_func` across the elements of this dataset. NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs `map_func` on GPU. It must be used after applying the `tf.data.experimental.copy_to_device` transformation with a GPU device argument. Args: map_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to another nested structure of tensors. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1947,RandomDatasetV2,tensorflow/tensorflow/python/data/experimental/ops/random_ops.py,32,class,A `Dataset` of pseudorandom values. 1948,RandomDatasetV1,tensorflow/tensorflow/python/data/experimental/ops/random_ops.py,48,class,A `Dataset` of pseudorandom values. 1949,_is_valid_int32,tensorflow/tensorflow/python/data/experimental/ops/readers.py,50,function, 1950,_is_valid_int64,tensorflow/tensorflow/python/data/experimental/ops/readers.py,59,function, 1951,_is_valid_float,tensorflow/tensorflow/python/data/experimental/ops/readers.py,67,function, 1952,_infer_type,tensorflow/tensorflow/python/data/experimental/ops/readers.py,74,function,"Given a string, infers its tensor type. Infers the type of a value by picking the least 'permissive' type possible, while still allowing the previous type inference for this column to be valid. Args: str_val: String value to infer the type of. na_value: Additional string to recognize as a NA/NaN CSV value. prev_type: Type previously inferred based on values of this column that we've seen up till now. Returns: Inferred dtype." 1953,_next_csv_row,tensorflow/tensorflow/python/data/experimental/ops/readers.py,111,function,Generator that yields rows of CSV file(s) in order. 1954,_infer_column_defaults,tensorflow/tensorflow/python/data/experimental/ops/readers.py,131,function,Infers column types from the first N valid CSV records of files. 1955,_infer_column_names,tensorflow/tensorflow/python/data/experimental/ops/readers.py,158,function,Infers column names from first rows of files. 1956,_get_sorted_col_indices,tensorflow/tensorflow/python/data/experimental/ops/readers.py,183,function,Transforms select_columns argument into sorted column indices. 1957,_maybe_shuffle_and_repeat,tensorflow/tensorflow/python/data/experimental/ops/readers.py,213,function,"Optionally shuffle and repeat dataset, as requested." 1958,make_tf_record_dataset,tensorflow/tensorflow/python/data/experimental/ops/readers.py,223,function,"Reads and optionally parses TFRecord files into a dataset. Provides common functionality such as batching, optional parsing, shuffling, and performant defaults. Args: file_pattern: List of files or patterns of TFRecord file paths. See `tf.io.gfile.glob` for pattern rules. batch_size: An int representing the number of records to combine in a single batch. parser_fn: (Optional.) A function accepting string input to parse and process the record contents. This function must map records to components of a fixed shape, so they may be batched. By default, uses the record contents unmodified. num_epochs: (Optional.) An int specifying the number of times this dataset is repeated. If None (the default), cycles through the dataset forever. shuffle: (Optional.) A bool that indicates whether the input should be shuffled. Defaults to `True`. shuffle_buffer_size: (Optional.) Buffer size to use for shuffling. A large buffer size ensures better shuffling, but increases memory usage and startup time. shuffle_seed: (Optional.) Randomization seed to use for shuffling. prefetch_buffer_size: (Optional.) An int specifying the number of feature batches to prefetch for performance improvement. Defaults to auto-tune. Set to 0 to disable prefetching. num_parallel_reads: (Optional.) Number of threads used to read records from files. By default or if set to a value >1, the results will be interleaved. Defaults to `24`. num_parallel_parser_calls: (Optional.) Number of parallel records to parse in parallel. Defaults to `batch_size`. drop_final_batch: (Optional.) Whether the last batch should be dropped in case its size is smaller than `batch_size`; the default behavior is not to drop the smaller batch. Returns: A dataset, where each element matches the output of `parser_fn` except it will have an additional leading `batch-size` dimension, or a `batch_size`-length 1-D tensor of strings if `parser_fn` is unspecified." 1959,make_csv_dataset_v2,tensorflow/tensorflow/python/data/experimental/ops/readers.py,322,function,"Reads CSV files into a dataset. Reads CSV files into a dataset, where each element is a (features, labels) tuple that corresponds to a batch of CSV rows. The features dictionary maps feature column names to `Tensor`s containing the corresponding feature data, and labels is a `Tensor` containing the batch's label data. Args: file_pattern: List of files or patterns of file paths containing CSV records. See `tf.io.gfile.glob` for pattern rules. batch_size: An int representing the number of records to combine in a single batch. column_names: An optional list of strings that corresponds to the CSV columns, in order. One per column of the input record. If this is not provided, infers the column names from the first row of the records. These names will be the keys of the features dict of each dataset element. column_defaults: A optional list of default values for the CSV fields. One item per selected column of the input record. Each item in the list is either a valid CSV dtype (float32, float64, int32, int64, or string), or a `Tensor` with one of the aforementioned types. The tensor can either be a scalar default value (if the column is optional), or an empty tensor (if the column is required). If a dtype is provided instead of a tensor, the column is also treated as required. If this list is not provided, tries to infer types based on reading the first num_rows_for_inference rows of files specified, and assumes all columns are optional, defaulting to `0` for numeric values and `""""` for string values. If both this and `select_columns` are specified, these must have the same lengths, and `column_defaults` is assumed to be sorted in order of increasing column index. label_name: A optional string corresponding to the label column. If provided, the data for this column is returned as a separate `Tensor` from the features dictionary, so that the dataset complies with the format expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input function. select_columns: An optional list of integer indices or string column names, that specifies a subset of columns of CSV data to select. If column names are provided, these must correspond to names provided in `column_names` or inferred from the file header lines. When this argument is specified, only a subset of CSV columns will be parsed and returned, corresponding to the columns specified. Using this results in faster parsing and lower memory usage. If both this and `column_defaults` are specified, these must have the same lengths, and `column_defaults` is assumed to be sorted in order of increasing column index. field_delim: An optional `string`. Defaults to `"",""`. Char delimiter to separate fields in a record. use_quote_delim: An optional bool. Defaults to `True`. If false, treats double quotation marks as regular characters inside of the string fields. na_value: Additional string to recognize as NA/NaN. header: A bool that indicates whether the first rows of provided CSV files correspond to header lines with column names, and should not be included in the data. num_epochs: An int specifying the number of times this dataset is repeated. If None, cycles through the dataset forever. shuffle: A bool that indicates whether the input should be shuffled. shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size ensures better shuffling, but increases memory usage and startup time. shuffle_seed: Randomization seed to use for shuffling. prefetch_buffer_size: An int specifying the number of feature batches to prefetch for performance improvement. Recommended value is the number of batches consumed per training step. Defaults to auto-tune. num_parallel_reads: Number of threads used to read CSV records from files. If >1, the results will be interleaved. Defaults to `1`. sloppy: If `True`, reading performance will be improved at the cost of non-deterministic ordering. If `False`, the order of elements produced is deterministic prior to shuffling (elements are still randomized if `shuffle=True`. Note that if the seed is set, then order of elements after shuffling is deterministic). Defaults to `False`. num_rows_for_inference: Number of rows of a file to use for type inference if record_defaults is not provided. If None, reads all the rows of all the files. Defaults to 100. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""""` (no compression), `""ZLIB""`, or `""GZIP""`. Defaults to no compression. ignore_errors: (Optional.) If `True`, ignores errors with CSV file parsing, such as malformed data or empty lines, and moves on to the next valid CSV record. Otherwise, the dataset raises an error and stops processing when encountering any invalid records. Defaults to `False`. Returns: A dataset, where each element is a (features, labels) tuple that corresponds to a batch of `batch_size` CSV rows. The features dictionary maps feature column names to `Tensor`s containing the corresponding column data, and labels is a `Tensor` containing the column data for the label column specified by `label_name`. Raises: ValueError: If any of the arguments is malformed." 1960,make_csv_dataset_v1,tensorflow/tensorflow/python/data/experimental/ops/readers.py,569,function, 1961,CsvDatasetV2,tensorflow/tensorflow/python/data/experimental/ops/readers.py,604,class,A Dataset comprising lines from one or more CSV files. 1962,CsvDatasetV1,tensorflow/tensorflow/python/data/experimental/ops/readers.py,783,class,A Dataset comprising lines from one or more CSV files. 1963,make_batched_features_dataset_v2,tensorflow/tensorflow/python/data/experimental/ops/readers.py,874,function,"Returns a `Dataset` of feature dictionaries from `Example` protos. If label_key argument is provided, returns a `Dataset` of tuple comprising of feature dictionaries and label. Example: ``` serialized_examples = [ features { feature { key: ""age"" value { int64_list { value: [ 0 ] } } } feature { key: ""gender"" value { bytes_list { value: [ ""f"" ] } } } feature { key: ""kws"" value { bytes_list { value: [ ""code"", ""art"" ] } } } }, features { feature { key: ""age"" value { int64_list { value: [] } } } feature { key: ""gender"" value { bytes_list { value: [ ""f"" ] } } } feature { key: ""kws"" value { bytes_list { value: [ ""sports"" ] } } } } ] ``` We can use arguments: ``` features: { ""age"": FixedLenFeature([], dtype=tf.int64, default_value=-1), ""gender"": FixedLenFeature([], dtype=tf.string), ""kws"": VarLenFeature(dtype=tf.string), } ``` And the expected output is: ```python { ""age"": [[0], [-1]], ""gender"": [[""f""], [""f""]], ""kws"": SparseTensor( indices=[[0, 0], [0, 1], [1, 0]], values=[""code"", ""art"", ""sports""] dense_shape=[2, 2]), } ``` Args: file_pattern: List of files or patterns of file paths containing `Example` records. See `tf.io.gfile.glob` for pattern rules. batch_size: An int representing the number of records to combine in a single batch. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. See `tf.io.parse_example`. reader: A function or class that can be called with a `filenames` tensor and (optional) `reader_args` and returns a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`. label_key: (Optional) A string corresponding to the key labels are stored in `tf.Examples`. If provided, it must be one of the `features` key, otherwise results in `ValueError`. reader_args: Additional arguments to pass to the reader class. num_epochs: Integer specifying the number of times to read through the dataset. If None, cycles through the dataset forever. Defaults to `None`. shuffle: A boolean, indicates whether the input should be shuffled. Defaults to `True`. shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity ensures better shuffling but would increase memory usage and startup time. shuffle_seed: Randomization seed to use for shuffling. prefetch_buffer_size: Number of feature batches to prefetch in order to improve performance. Recommended value is the number of batches consumed per training step. Defaults to auto-tune. reader_num_threads: Number of threads used to read `Example` records. If >1, the results will be interleaved. Defaults to `1`. parser_num_threads: Number of threads to use for parsing `Example` tensors into a dictionary of `Feature` tensors. Defaults to `2`. sloppy_ordering: If `True`, reading performance will be improved at the cost of non-deterministic ordering. If `False`, the order of elements produced is deterministic prior to shuffling (elements are still randomized if `shuffle=True`. Note that if the seed is set, then order of elements after shuffling is deterministic). Defaults to `False`. drop_final_batch: If `True`, and the batch size does not evenly divide the input dataset size, the final smaller batch will be dropped. Defaults to `False`. Returns: A dataset of `dict` elements, (or a tuple of `dict` elements and label). Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects. Raises: TypeError: If `reader` is of the wrong type. ValueError: If `label_key` is not one of the `features` keys." 1964,make_batched_features_dataset_v1,tensorflow/tensorflow/python/data/experimental/ops/readers.py,1058,function, 1965,_get_file_names,tensorflow/tensorflow/python/data/experimental/ops/readers.py,1082,function,"Parse list of file names from pattern, optionally shuffled. Args: file_pattern: File glob pattern, or list of glob patterns. shuffle: Whether to shuffle the order of file names. Returns: List of file names matching `file_pattern`. Raises: ValueError: If `file_pattern` is empty, or pattern matches no files." 1966,SqlDatasetV2,tensorflow/tensorflow/python/data/experimental/ops/readers.py,1114,class,A `Dataset` consisting of the results from a SQL query. 1967,SqlDatasetV1,tensorflow/tensorflow/python/data/experimental/ops/readers.py,1160,class,A `Dataset` consisting of the results from a SQL query. 1968,rejection_resample,tensorflow/tensorflow/python/data/experimental/ops/resampling.py,37,function,"A transformation that resamples a dataset to achieve a target distribution. **NOTE** Resampling is performed via rejection sampling; some fraction of the input values will be dropped. Args: class_func: A function mapping an element of the input dataset to a scalar `tf.int32` tensor. Values should be in `[0, num_classes)`. target_dist: A floating point type tensor, shaped `[num_classes]`. initial_dist: (Optional.) A floating point type tensor, shaped `[num_classes]`. If not provided, the true class distribution is estimated live in a streaming fashion. seed: (Optional.) Python integer seed for the resampler. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1969,_get_prob_original_static,tensorflow/tensorflow/python/data/experimental/ops/resampling.py,109,function,"Returns the static probability of sampling from the original. `tensor_util.constant_value(prob_of_original)` returns `None` if it encounters an Op that it isn't defined for. We have some custom logic to avoid this. Args: initial_dist_t: A tensor of the initial distribution. target_dist_t: A tensor of the target distribution. Returns: The probability of sampling from the original distribution as a constant, if it is a constant, or `None`." 1970,_filter_ds,tensorflow/tensorflow/python/data/experimental/ops/resampling.py,132,function,"Filters a dataset based on per-class acceptance probabilities. Args: dataset: The dataset to be filtered. acceptance_dist_ds: A dataset of acceptance probabilities. initial_dist_ds: A dataset of the initial probability distribution, given or estimated. class_func: A function mapping an element of the input dataset to a scalar `tf.int32` tensor. Values should be in `[0, num_classes)`. seed: (Optional.) Python integer seed for the resampler. Returns: A dataset of (class value, data) after filtering." 1971,_estimate_initial_dist_ds,tensorflow/tensorflow/python/data/experimental/ops/resampling.py,177,function, 1972,_get_target_to_initial_ratio,tensorflow/tensorflow/python/data/experimental/ops/resampling.py,199,function, 1973,_estimate_data_distribution,tensorflow/tensorflow/python/data/experimental/ops/resampling.py,205,function,"Estimate data distribution as labels are seen. Args: c: The class labels. Type `int32`, shape `[batch_size]`. num_examples_per_class_seen: Type `int64`, shape `[num_classes]`, containing counts. Returns: num_examples_per_lass_seen: Updated counts. Type `int64`, shape `[num_classes]`. dist: The updated distribution. Type `float32`, shape `[num_classes]`." 1974,_calculate_acceptance_probs_with_mixing,tensorflow/tensorflow/python/data/experimental/ops/resampling.py,230,function,"Calculates the acceptance probabilities and mixing ratio. In this case, we assume that we can *either* sample from the original data distribution with probability `m`, or sample from a reshaped distribution that comes from rejection sampling on the original distribution. This rejection sampling is done on a per-class basis, with `a_i` representing the probability of accepting data from class `i`. This method is based on solving the following analysis for the reshaped distribution: Let F be the probability of a rejection (on any example). Let p_i be the proportion of examples in the data in class i (init_probs) Let a_i is the rate the rejection sampler should *accept* class i Let t_i is the target proportion in the minibatches for class i (target_probs) ``` F = sum_i(p_i * (1-a_i)) = 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1 ``` An example with class `i` will be accepted if `k` rejections occur, then an example with class `i` is seen by the rejector, and it is accepted. This can be written as follows: ``` t_i = sum_k=0^inf(F^k * p_i * a_i) = p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1 = p_i * a_i / sum_j(p_j * a_j) using F from above ``` Note that the following constraints hold: ``` 0 <= p_i <= 1, sum_i(p_i) = 1 0 <= a_i <= 1 0 <= t_i <= 1, sum_i(t_i) = 1 ``` A solution for a_i in terms of the other variables is the following: ```a_i = (t_i / p_i) / max_i[t_i / p_i]``` If we try to minimize the amount of data rejected, we get the following: M_max = max_i [ t_i / p_i ] M_min = min_i [ t_i / p_i ] The desired probability of accepting data if it comes from class `i`: a_i = (t_i/p_i - m) / (M_max - m) The desired probability of pulling a data element from the original dataset, rather than the filtered one: m = M_min Args: initial_probs: A Tensor of the initial probability distribution, given or estimated. target_probs: A Tensor of the corresponding classes. Returns: (A 1D Tensor with the per-class acceptance probabilities, the desired probability of pull from the original distribution.)" 1975,_ScanDataset,tensorflow/tensorflow/python/data/experimental/ops/scan_ops.py,29,class,A dataset that scans a function across its input. 1976,scan,tensorflow/tensorflow/python/data/experimental/ops/scan_ops.py,158,function,"A transformation that scans a function across an input dataset. This transformation is a stateful relative of `tf.data.Dataset.map`. In addition to mapping `scan_func` across the elements of the input dataset, `scan()` accumulates one or more state tensors, whose initial values are `initial_state`. Args: initial_state: A nested structure of tensors, representing the initial state of the accumulator. scan_func: A function that maps `(old_state, input_element)` to `(new_state, output_element)`. It must take two arguments and return a pair of nested structures of tensors. The `new_state` must match the structure of `initial_state`. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1977,_ShuffleAndRepeatDataset,tensorflow/tensorflow/python/data/experimental/ops/shuffle_ops.py,30,class,A `Dataset` that fuses `shuffle` and `repeat`. 1978,shuffle_and_repeat,tensorflow/tensorflow/python/data/experimental/ops/shuffle_ops.py,60,function,"Shuffles and repeats a Dataset, reshuffling with each repetition. >>> d = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> d = d.apply(tf.data.experimental.shuffle_and_repeat(2, count=2)) >>> [elem.numpy() for elem in d] # doctest: +SKIP [2, 3, 1, 1, 3, 2] ```python dataset.apply( tf.data.experimental.shuffle_and_repeat(buffer_size, count, seed)) ``` produces the same output as ```python dataset.shuffle( buffer_size, seed=seed, reshuffle_each_iteration=True).repeat(count) ``` In each repetition, this dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, set the buffer size equal to the full size of the dataset. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1,000, then `shuffle` will initially select a random element from only the first 1,000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1,000 element buffer. Args: buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum number elements that will be buffered when prefetching. count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the number of times the dataset should be repeated. The default behavior (if `count` is `None` or `-1`) is for the dataset be repeated indefinitely. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.random.set_seed` for behavior. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1979,_SleepDataset,tensorflow/tensorflow/python/data/experimental/ops/sleep.py,24,class,A `Dataset` that sleeps before producing each upstream element. 1980,sleep,tensorflow/tensorflow/python/data/experimental/ops/sleep.py,37,function,"Sleeps for `sleep_microseconds` before producing each input element. Args: sleep_microseconds: The number of microseconds to sleep before producing an input element. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1981,_LegacySnapshotDataset,tensorflow/tensorflow/python/data/experimental/ops/snapshot.py,36,class,A Dataset that captures a snapshot or reads from a snapshot. 1982,legacy_snapshot,tensorflow/tensorflow/python/data/experimental/ops/snapshot.py,108,function,"Writes to/reads from a snapshot of a dataset. This function attempts to determine whether a valid snapshot exists at the `path`, and reads from the snapshot if so. If not, it will run the preprocessing pipeline as usual, and write out a snapshot of the data processed for future use. Args: path: A directory where we want to save our snapshots and/or read from a previously saved snapshot. compression: The type of compression to apply to the Dataset. Currently supports ""GZIP"" or None. Defaults to None (no compression). reader_path_prefix: A prefix to add to the path when reading from snapshots. Defaults to None. writer_path_prefix: A prefix to add to the path when writing to snapshots. Defaults to None. shard_size_bytes: The size of each shard to be written by the snapshot dataset op. Defaults to 10 GiB. pending_snapshot_expiry_seconds: How long to wait (in seconds) before the snapshot op considers a previously unfinished snapshot to be stale. num_reader_threads: Number of threads to parallelize reading from snapshot. Especially useful if compression is turned on since the decompression operation tends to be intensive. Defaults to 1. If > 1, then this might introduce non-determinism i.e. the order in which the elements are read from the snapshot are different from the order they're written. reader_buffer_size: Maximum number of elements we can prefetch reading from the snapshot. Defaults to 1. Increasing this might improve performance but will increase memory consumption. num_writer_threads: Number of threads to parallelize writing from snapshot. We'll open up `num_writer_threads` files and write to them in parallel. Especially useful if compression is turned on since the compression operation tends to be intensive. Defaults to 1. If > 1, then this might introduce non-determinism i.e. the order in which the elements are read from the upstream iterator are different from the order they're written. writer_buffer_size: Maximum number of pipeline elements to fill up the buffer before writing them out using `num_writer_threads`. shuffle_on_read: If this is True, then the order in which examples are produced when reading from a snapshot will be random. Defaults to False. shuffle_seed: Optional. If shuffle_seed is set, the random number generator used for shuffling (when shuffle_on_read is turned on) is seeded by the given seed. Otherwise, it is seeded by a random seed that differs for every run. mode: The mode at which snapshot should operate. Valid options are ""auto"", ""read"", ""write"", and ""passthrough"". The default mode is ""auto"", where the snapshot op will automatically determine what mode to operate in. snapshot_name: If set, use the supplied string as a named snapshot name instead of introspecting the data pipeline and automatically generating a unique identifier for the snapshot. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1983,_SnapshotDataset,tensorflow/tensorflow/python/data/experimental/ops/snapshot.py,197,class,A dataset that allows saving and re-use of already processed data. 1984,snapshot,tensorflow/tensorflow/python/data/experimental/ops/snapshot.py,258,function,"API to persist the output of the input dataset. The snapshot API allows users to transparently persist the output of their preprocessing pipeline to disk, and materialize the pre-processed data on a different training run. This API enables repeated preprocessing steps to be consolidated, and allows re-use of already processed data, trading off disk storage and network bandwidth for freeing up more valuable CPU resources and accelerator compute time. https://github.com/tensorflow/community/blob/master/rfcs/20200107-tf-data-snapshot.md has detailed design documentation of this feature. Users can specify various options to control the behavior of snapshot, including how snapshots are read from and written to by passing in user-defined functions to the `reader_func` and `shard_func` parameters. `shard_func` is a user specified function that maps input elements to snapshot shards. Users may want to specify this function to control how snapshot files should be written to disk. Below is an example of how a potential shard_func could be written. ```python dataset = ... dataset = dataset.enumerate() dataset = dataset.apply(tf.data.experimental.snapshot(""/path/to/snapshot/dir"", shard_func=lambda x, y: x % NUM_SHARDS, ...)) dataset = dataset.map(lambda x, y: y) ``` `reader_func` is a user specified function that accepts a single argument: (1) a Dataset of Datasets, each representing a ""split"" of elements of the original dataset. The cardinality of the input dataset matches the number of the shards specified in the `shard_func` (see above). The function should return a Dataset of elements of the original dataset. Users may want specify this function to control how snapshot files should be read from disk, including the amount of shuffling and parallelism. Here is an example of a standard reader function a user can define. This function enables both dataset shuffling and parallel reading of datasets: ```python def user_reader_func(datasets): # shuffle the datasets splits datasets = datasets.shuffle(NUM_CORES) # read datasets in parallel and interleave their elements return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE) dataset = dataset.apply(tf.data.experimental.snapshot(""/path/to/snapshot/dir"", reader_func=user_reader_func)) ``` By default, snapshot parallelizes reads by the number of cores available on the system, but will not attempt to shuffle the data. Args: path: Required. A directory to use for storing / loading the snapshot to / from. compression: Optional. The type of compression to apply to the snapshot written to disk. Supported options are `GZIP`, `SNAPPY`, `AUTO` or None. Defaults to AUTO, which attempts to pick an appropriate compression algorithm for the dataset. reader_func: Optional. A function to control how to read data from snapshot shards. shard_func: Optional. A function to control how to shard data when writing a snapshot. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1985,StatsAggregatorV2,tensorflow/tensorflow/python/data/experimental/ops/stats_aggregator.py,31,class,"A stateful resource that aggregates statistics from one or more iterators. To record statistics, use one of the custom transformation functions defined in this module when defining your `tf.data.Dataset`. All statistics will be aggregated by the `StatsAggregator` that is associated with a particular iterator (see below). For example, to record the latency of producing each element by iterating over a dataset: ```python dataset = ... dataset = dataset.apply(tf.data.experimental.latency_stats(""total_bytes"")) ``` To associate a `StatsAggregator` with a `tf.data.Dataset` object, use the following pattern: ```python aggregator = tf.data.experimental.StatsAggregator() dataset = ... # Apply `StatsOptions` to associate `dataset` with `aggregator`. options = tf.data.Options() options.experimental_stats.aggregator = aggregator dataset = dataset.with_options(options) ``` Note: This interface is experimental and expected to change. In particular, we expect to add other implementations of `StatsAggregator` that provide different ways of exporting statistics, and add more types of statistics." 1986,StatsAggregatorV1,tensorflow/tensorflow/python/data/experimental/ops/stats_aggregator.py,82,class,"A stateful resource that aggregates statistics from one or more iterators. To record statistics, use one of the custom transformation functions defined in this module when defining your `tf.data.Dataset`. All statistics will be aggregated by the `StatsAggregator` that is associated with a particular iterator (see below). For example, to record the latency of producing each element by iterating over a dataset: ```python dataset = ... dataset = dataset.apply(tf.data.experimental.latency_stats(""total_bytes"")) ``` To associate a `StatsAggregator` with a `tf.data.Dataset` object, use the following pattern: ```python aggregator = tf.data.experimental.StatsAggregator() dataset = ... # Apply `StatsOptions` to associate `dataset` with `aggregator`. options = tf.data.Options() options.experimental_stats.aggregator = aggregator dataset = dataset.with_options(options) ``` To get a protocol buffer summary of the currently aggregated statistics, use the `StatsAggregator.get_summary()` tensor. The easiest way to do this is to add the returned tensor to the `tf.GraphKeys.SUMMARIES` collection, so that the summaries will be included with any existing summaries. ```python aggregator = tf.data.experimental.StatsAggregator() # ... stats_summary = aggregator.get_summary() tf.compat.v1.add_to_collection(tf.GraphKeys.SUMMARIES, stats_summary) ``` Note: This interface is experimental and expected to change. In particular, we expect to add other implementations of `StatsAggregator` that provide different ways of exporting statistics, and add more types of statistics." 1987,set_stats_aggregator,tensorflow/tensorflow/python/data/experimental/ops/stats_ops.py,29,function,"Set the given `stats_aggregator` for aggregating the input dataset stats. Args: stats_aggregator: A `tf.data.experimental.StatsAggregator` object. prefix: (Optional) String, all statistics recorded for the input `dataset` will have given `prefix` prepend with the name. counter_prefix: (Optional) String, all statistics recorded as `counters` will have the given `prefix` for the counter. Defaults to ""/tensorflow"". Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1988,bytes_produced_stats,tensorflow/tensorflow/python/data/experimental/ops/stats_ops.py,52,function,"Records the number of bytes produced by each element of the input dataset. To consume the statistics, associate a `StatsAggregator` with the output dataset. Args: tag: String. All statistics recorded by the returned transformation will be associated with the given `tag`. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1989,latency_stats,tensorflow/tensorflow/python/data/experimental/ops/stats_ops.py,75,function,"Records the latency of producing each element of the input dataset. To consume the statistics, associate a `StatsAggregator` with the output dataset. Args: tag: String. All statistics recorded by the returned transformation will be associated with the given `tag`. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1990,_StatsDataset,tensorflow/tensorflow/python/data/experimental/ops/stats_ops.py,97,class,"A `Dataset` that acts as an identity, and also records statistics." 1991,StatsOptions,tensorflow/tensorflow/python/data/experimental/ops/stats_options.py,28,class,"Represents options for collecting dataset stats using `StatsAggregator`. You can set the stats options of a dataset through the `experimental_stats` property of `tf.data.Options`; the property is an instance of `tf.data.experimental.StatsOptions`. For example, to collect latency stats on all dataset edges, use the following pattern: ```python aggregator = tf.data.experimental.StatsAggregator() options = tf.data.Options() options.experimental_stats.aggregator = aggregator options.experimental_stats.latency_all_edges = True dataset = dataset.with_options(options) ```" 1992,_TakeWhileDataset,tensorflow/tensorflow/python/data/experimental/ops/take_while_ops.py,27,class,A dataset that stops iteration when `predicate` returns false. 1993,take_while,tensorflow/tensorflow/python/data/experimental/ops/take_while_ops.py,56,function,"A transformation that stops dataset iteration based on a `predicate`. Args: predicate: A function that maps a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a scalar `tf.bool` tensor. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1994,assert_next,tensorflow/tensorflow/python/data/experimental/ops/testing.py,26,function,"A transformation that asserts which transformations happen next. Transformations should be referred to by their base name, not including version suffix. For example, use ""Batch"" instead of ""BatchV2"". ""Batch"" will match any of ""Batch"", ""BatchV1"", ""BatchV2"", etc. Args: transformations: A `tf.string` vector `tf.Tensor` identifying the transformations that are expected to happen next. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1995,non_serializable,tensorflow/tensorflow/python/data/experimental/ops/testing.py,49,function,"A non-serializable identity transformation. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1996,sleep,tensorflow/tensorflow/python/data/experimental/ops/testing.py,64,function,"Sleeps for `sleep_microseconds` before producing each input element. Args: sleep_microseconds: The number of microseconds to sleep before producing an input element. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 1997,_AssertNextDataset,tensorflow/tensorflow/python/data/experimental/ops/testing.py,82,class,A `Dataset` that asserts which transformations happen next. 1998,_NonSerializableDataset,tensorflow/tensorflow/python/data/experimental/ops/testing.py,100,class,A `Dataset` that performs non-serializable identity transformation. 1999,_SleepDataset,tensorflow/tensorflow/python/data/experimental/ops/testing.py,113,class,A `Dataset` that sleeps before producing each upstream element. 2000,ThreadingOptions,tensorflow/tensorflow/python/data/experimental/ops/threading_options.py,26,class,"Represents options for dataset threading. You can set the threading options of a dataset through the `experimental_threading` property of `tf.data.Options`; the property is an instance of `tf.data.experimental.ThreadingOptions`. ```python options = tf.data.Options() options.experimental_threading.private_threadpool_size = 10 dataset = dataset.with_options(options) ```" 2001,_generate_shared_name,tensorflow/tensorflow/python/data/experimental/ops/threadpool.py,31,function, 2002,PrivateThreadPool,tensorflow/tensorflow/python/data/experimental/ops/threadpool.py,41,class,A stateful resource that represents a private thread pool. 2003,_ThreadPoolDataset,tensorflow/tensorflow/python/data/experimental/ops/threadpool.py,63,class,"A `Dataset` that acts as an identity, and sets a custom threadpool." 2004,override_threadpool,tensorflow/tensorflow/python/data/experimental/ops/threadpool.py,78,function,"Returns a new dataset that uses the given thread pool for its operations. Args: dataset: A `tf.data.Dataset` object. thread_pool: A `PrivateThreadPool` object. Returns: A dataset containing the same values as `dataset`, but which uses `thread_pool` to compute any of its parallel operations (such as `tf.data.Dataset.map`)." 2005,unique,tensorflow/tensorflow/python/data/experimental/ops/unique.py,27,function,"Creates a `Dataset` from another `Dataset`, discarding duplicates. Use this transformation to produce a dataset that contains one instance of each unique element in the input. For example: ```python dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1]) # Using `unique()` will drop the duplicate elements. dataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 } ``` Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`." 2006,_UniqueDataset,tensorflow/tensorflow/python/data/experimental/ops/unique.py,51,class,A `Dataset` contains the unique elements from its input. 2007,TFRecordWriter,tensorflow/tensorflow/python/data/experimental/ops/writers.py,30,class,"Writes a dataset to a TFRecord file. The elements of the dataset must be scalar strings. To serialize dataset elements as strings, you can use the `tf.io.serialize_tensor` function. ```python dataset = tf.data.Dataset.range(3) dataset = dataset.map(tf.io.serialize_tensor) writer = tf.data.experimental.TFRecordWriter(""/path/to/file.tfrecord"") writer.write(dataset) ``` To read back the elements, use `TFRecordDataset`. ```python dataset = tf.data.TFRecordDataset(""/path/to/file.tfrecord"") dataset = dataset.map(lambda x: tf.io.parse_tensor(x, tf.int64)) ``` To shard a `dataset` across multiple TFRecord files: ```python dataset = ... # dataset to be written def reduce_func(key, dataset): filename = tf.strings.join([PATH_PREFIX, tf.strings.as_string(key)]) writer = tf.data.experimental.TFRecordWriter(filename) writer.write(dataset.map(lambda _, x: x)) return tf.data.Dataset.from_tensors(filename) dataset = dataset.enumerate() dataset = dataset.apply(tf.data.experimental.group_by_window( lambda i, _: i % NUM_SHARDS, reduce_func, tf.int64.max )) ```" 2008,DispatchServer,tensorflow/tensorflow/python/data/experimental/service/server_lib.py,29,class,"An in-process tf.data service dispatch server. A `tf.data.experimental.service.DispatchServer` coordinates a cluster of `tf.data.experimental.service.WorkerServer`s. When the workers start, they register themselves with the dispatcher. >>> dispatcher = tf.data.experimental.service.DispatchServer(port=0) >>> dispatcher_address = dispatcher.target.split(""://"")[1] >>> worker = tf.data.experimental.service.WorkerServer( ... port=0, dispatcher_address=dispatcher_address) >>> dataset = tf.data.Dataset.range(10) >>> dataset = dataset.apply(tf.data.experimental.service.distribute( ... processing_mode=""parallel_epochs"", service=dispatcher.target)) >>> print(list(dataset.as_numpy_iterator())) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] When starting a dedicated tf.data dispatch process, use join() to block indefinitely after starting up the server. ``` dispatcher = tf.data.experimental.service.DispatchServer(port=5050) dispatcher.join() ```" 2009,WorkerServer,tensorflow/tensorflow/python/data/experimental/service/server_lib.py,148,class,"An in-process tf.data service worker server. A `tf.data.experimental.service.WorkerServer` performs `tf.data.Dataset` processing for user-defined datasets, and provides the resulting elements over RPC. A worker is associated with a single `tf.data.experimental.service.DispatchServer`. >>> dispatcher = tf.data.experimental.service.DispatchServer(port=0) >>> dispatcher_address = dispatcher.target.split(""://"")[1] >>> worker = tf.data.experimental.service.WorkerServer( ... port=0, dispatcher_address=dispatcher_address) >>> dataset = tf.data.Dataset.range(10) >>> dataset = dataset.apply(tf.data.experimental.service.distribute( ... processing_mode=""parallel_epochs"", service=dispatcher.target)) >>> print(list(dataset.as_numpy_iterator())) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] When starting a dedicated tf.data worker process, use join() to block indefinitely after starting up the server. ``` worker = tf.data.experimental.service.WorkerServer( port=5051, dispatcher_address=""grpc://localhost:5050"") worker.join() ```" 2010,ServerLibTest,tensorflow/tensorflow/python/data/experimental/service/server_lib_test.py,26,class, 2011,AsNumpyIteratorTest,tensorflow/tensorflow/python/data/kernel_tests/as_numpy_iterator_test.py,34,class, 2012,BatchTest,tensorflow/tensorflow/python/data/kernel_tests/batch_test.py,40,class, 2013,FileCacheTest,tensorflow/tensorflow/python/data/kernel_tests/cache_test.py,43,class, 2014,MemoryCacheTest,tensorflow/tensorflow/python/data/kernel_tests/cache_test.py,204,class, 2015,_test_combinations,tensorflow/tensorflow/python/data/kernel_tests/cardinality_test.py,31,function, 2016,CardinalityTest,tensorflow/tensorflow/python/data/kernel_tests/cardinality_test.py,182,class,Tests for `tf.data.Dataset.cardinality()`. 2017,CheckpointTest,tensorflow/tensorflow/python/data/kernel_tests/checkpoint_test.py,48,class, 2018,ConcatenateTest,tensorflow/tensorflow/python/data/kernel_tests/concatenate_test.py,32,class, 2019,_make_distributed_dataset,tensorflow/tensorflow/python/data/kernel_tests/data_service_ops_test.py,48,function,Creates a distributed dataset with a short task refresh interval. 2020,DataServiceOpsTest,tensorflow/tensorflow/python/data/kernel_tests/data_service_ops_test.py,58,class, 2021,DatasetSpecTest,tensorflow/tensorflow/python/data/kernel_tests/dataset_spec_test.py,34,class, 2022,DatasetTest,tensorflow/tensorflow/python/data/kernel_tests/dataset_test.py,49,class, 2023,EnumerateTest,tensorflow/tensorflow/python/data/kernel_tests/enumerate_test.py,31,class, 2024,_test_combinations,tensorflow/tensorflow/python/data/kernel_tests/filter_test.py,33,function, 2025,FilterTest,tensorflow/tensorflow/python/data/kernel_tests/filter_test.py,55,class, 2026,FixedLengthRecordDatasetTest,tensorflow/tensorflow/python/data/kernel_tests/fixed_length_record_dataset_test.py,34,class, 2027,FlatMapTest,tensorflow/tensorflow/python/data/kernel_tests/flat_map_test.py,41,class, 2028,FromGeneratorTest,tensorflow/tensorflow/python/data/kernel_tests/from_generator_test.py,35,class, 2029,FromSparseTensorSlicesTest,tensorflow/tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py,33,class, 2030,FromTensorSlicesTest,tensorflow/tensorflow/python/data/kernel_tests/from_tensor_slices_test.py,36,class, 2031,FromTensorsTest,tensorflow/tensorflow/python/data/kernel_tests/from_tensors_test.py,41,class, 2032,_interleave,tensorflow/tensorflow/python/data/kernel_tests/interleave_test.py,38,function,"Reference implementation of interleave used for testing. Args: lists: a list of lists to interleave cycle_length: the length of the interleave cycle block_length: the length of the interleave block num_parallel_calls: the number of parallel calls Yields: Elements of `lists` interleaved in the order determined by `cycle_length` and `block_length`." 2033,_repeat,tensorflow/tensorflow/python/data/kernel_tests/interleave_test.py,92,function,"Produces a list of lists suitable for testing interleave. Args: values: for each element `x` the result contains `[x] * x` count: determines how many times to repeat `[x] * x` in the result Returns: A list of lists of values suitable for testing interleave." 2034,InterleaveTest,tensorflow/tensorflow/python/data/kernel_tests/interleave_test.py,105,class, 2035,IteratorClusterTest,tensorflow/tensorflow/python/data/kernel_tests/iterator_cluster_test.py,43,class, 2036,IteratorTest,tensorflow/tensorflow/python/data/kernel_tests/iterator_test.py,56,class, 2037,LenTest,tensorflow/tensorflow/python/data/kernel_tests/len_test.py,28,class, 2038,ListFilesTest,tensorflow/tensorflow/python/data/kernel_tests/list_files_test.py,35,class, 2039,_test_combinations_with_mode_v1,tensorflow/tensorflow/python/data/kernel_tests/map_test.py,61,function, 2040,_test_combinations_with_mode_v2,tensorflow/tensorflow/python/data/kernel_tests/map_test.py,82,function, 2041,_test_combinations_with_mode,tensorflow/tensorflow/python/data/kernel_tests/map_test.py,93,function, 2042,_test_combinations,tensorflow/tensorflow/python/data/kernel_tests/map_test.py,98,function, 2043,_short_circuit_test_cases,tensorflow/tensorflow/python/data/kernel_tests/map_test.py,103,function, 2044,_make_coordinated_sloppy_dataset,tensorflow/tensorflow/python/data/kernel_tests/map_test.py,119,function,"Produces a dataset iterator and events to control the order of elements. Args: apply_map: method that applies the `map` transformation num_elements: the number of input elements num_parallel_calls: the degree of map parallelism Returns: A dataset iterator (represented as `get_next` op) and events that can be used to control the order of output elements." 2045,Foo,tensorflow/tensorflow/python/data/kernel_tests/map_test.py,153,class,Dummy class used for invalid return value tests. 2046,MapTest,tensorflow/tensorflow/python/data/kernel_tests/map_test.py,160,class, 2047,MemoryCleanupTest,tensorflow/tensorflow/python/data/kernel_tests/memory_cleanup_test.py,47,class, 2048,skip_v2_test_combinations,tensorflow/tensorflow/python/data/kernel_tests/multi_device_iterator_test.py,42,function, 2049,MultiDeviceIteratorTest,tensorflow/tensorflow/python/data/kernel_tests/multi_device_iterator_test.py,47,class, 2050,OwnedMultiDeviceIteratorTest,tensorflow/tensorflow/python/data/kernel_tests/multi_device_iterator_test.py,350,class, 2051,_optional_spec_test_combinations,tensorflow/tensorflow/python/data/kernel_tests/optional_test.py,46,function, 2052,_get_next_as_optional_test_combinations,tensorflow/tensorflow/python/data/kernel_tests/optional_test.py,80,function, 2053,OptionalTest,tensorflow/tensorflow/python/data/kernel_tests/optional_test.py,122,class, 2054,OptionsTest,tensorflow/tensorflow/python/data/kernel_tests/options_test.py,32,class, 2055,PaddedBatchTest,tensorflow/tensorflow/python/data/kernel_tests/padded_batch_test.py,40,class, 2056,PrefetchTest,tensorflow/tensorflow/python/data/kernel_tests/prefetch_test.py,31,class, 2057,RangeTest,tensorflow/tensorflow/python/data/kernel_tests/range_test.py,31,class, 2058,ReduceTest,tensorflow/tensorflow/python/data/kernel_tests/reduce_test.py,42,class, 2059,RepeatTest,tensorflow/tensorflow/python/data/kernel_tests/repeat_test.py,29,class, 2060,ShardTest,tensorflow/tensorflow/python/data/kernel_tests/shard_test.py,29,class, 2061,ShuffleTest,tensorflow/tensorflow/python/data/kernel_tests/shuffle_test.py,42,class, 2062,SkipTest,tensorflow/tensorflow/python/data/kernel_tests/skip_test.py,29,class, 2063,TakeTest,tensorflow/tensorflow/python/data/kernel_tests/take_test.py,29,class, 2064,default_test_combinations,tensorflow/tensorflow/python/data/kernel_tests/test_base.py,39,function,Returns the default test combinations for tf.data tests. 2065,eager_only_combinations,tensorflow/tensorflow/python/data/kernel_tests/test_base.py,44,function,Returns the default test combinations for eager mode only tf.data tests. 2066,graph_only_combinations,tensorflow/tensorflow/python/data/kernel_tests/test_base.py,49,function,Returns the default test combinations for graph mode only tf.data tests. 2067,v2_only_combinations,tensorflow/tensorflow/python/data/kernel_tests/test_base.py,54,function,Returns the default test combinations for v1 only tf.data tests. 2068,DatasetTestBase,tensorflow/tensorflow/python/data/kernel_tests/test_base.py,59,class,Base class for dataset tests. 2069,TextLineDatasetTest,tensorflow/tensorflow/python/data/kernel_tests/text_line_dataset_test.py,41,class, 2070,TFRecordDatasetTest,tensorflow/tensorflow/python/data/kernel_tests/tf_record_dataset_test.py,36,class, 2071,UnbatchTest,tensorflow/tensorflow/python/data/kernel_tests/unbatch_test.py,38,class, 2072,WindowTest,tensorflow/tensorflow/python/data/kernel_tests/window_test.py,36,class, 2073,_dataset_factory,tensorflow/tensorflow/python/data/kernel_tests/zip_test.py,31,function, 2074,ZipTest,tensorflow/tensorflow/python/data/kernel_tests/zip_test.py,39,class, 2075,DatasetV2,tensorflow/tensorflow/python/data/ops/dataset_ops.py,106,class,"Represents a potentially large set of elements. The `tf.data.Dataset` API supports writing descriptive and efficient input pipelines. `Dataset` usage follows a common pattern: 1. Create a source dataset from your input data. 2. Apply dataset transformations to preprocess the data. 3. Iterate over the dataset and process the elements. Iteration happens in a streaming fashion, so the full dataset does not need to fit into memory. Source Datasets: The simplest way to create a dataset is to create it from a python `list`: >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> for element in dataset: ... print(element) tf.Tensor(1, shape=(), dtype=int32) tf.Tensor(2, shape=(), dtype=int32) tf.Tensor(3, shape=(), dtype=int32) To process lines from files, use `tf.data.TextLineDataset`: >>> dataset = tf.data.TextLineDataset([""file1.txt"", ""file2.txt""]) To process records written in the `TFRecord` format, use `TFRecordDataset`: >>> dataset = tf.data.TFRecordDataset([""file1.tfrecords"", ""file2.tfrecords""]) To create a dataset of all files matching a pattern, use `tf.data.Dataset.list_files`: >>> dataset = tf.data.Dataset.list_files(""/path/*.txt"") # doctest: +SKIP See `tf.data.FixedLengthRecordDataset` and `tf.data.Dataset.from_generator` for more ways to create datasets. Transformations: Once you have a dataset, you can apply transformations to prepare the data for your model: >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> dataset = dataset.map(lambda x: x*2) >>> list(dataset.as_numpy_iterator()) [2, 4, 6] Common Terms: **Element**: A single output from calling `next()` on a dataset iterator. Elements may be nested structures containing multiple components. For example, the element `(1, (3, ""apple""))` has one tuple nested in another tuple. The components are `1`, `3`, and `""apple""`. **Component**: The leaf in the nested structure of an element. Supported types: Elements can be nested structures of tuples, named tuples, and dictionaries. Note that Python lists are *not* treated as nested structures of components. Instead, lists are converted to tensors and treated as components. For example, the element `(1, [1, 2, 3])` has only two components; the tensor `1` and the tensor `[1, 2, 3]`. Element components can be of any type representable by `tf.TypeSpec`, including `tf.Tensor`, `tf.data.Dataset`, `tf.sparse.SparseTensor`, `tf.RaggedTensor`, and `tf.TensorArray`. >>> a = 1 # Integer element >>> b = 2.0 # Float element >>> c = (1, 2) # Tuple element with 2 components >>> d = {""a"": (2, 2), ""b"": 3} # Dict element with 3 components >>> Point = collections.namedtuple(""Point"", [""x"", ""y""]) # doctest: +SKIP >>> e = Point(1, 2) # Named tuple # doctest: +SKIP >>> f = tf.data.Dataset.range(10) # Dataset element" 2076,DatasetV1,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2169,class,"Represents a potentially large set of elements. A `Dataset` can be used to represent an input pipeline as a collection of elements and a ""logical plan"" of transformations that act on those elements." 2077,DatasetV1Adapter,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2628,class,Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API. 2078,_ensure_same_dataset_graph,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2658,function,Walks the dataset graph to ensure all datasets come from the same graph. 2079,make_one_shot_iterator,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2684,function,"Creates an iterator for elements of `dataset`. Note: The returned iterator will be initialized automatically. A ""one-shot"" iterator does not support re-initialization. Args: dataset: A `tf.data.Dataset`. Returns: A `tf.data.Iterator` for elements of `dataset`." 2080,make_initializable_iterator,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2705,function,"Creates an iterator for elements of `dataset`. Note: The returned iterator will be in an uninitialized state, and you must run the `iterator.initializer` operation before using it: ```python dataset = ... iterator = tf.compat.v1.data.make_initializable_iterator(dataset) # ... sess.run(iterator.initializer) ``` Args: dataset: A `tf.data.Dataset`. shared_name: (Optional.) If non-empty, the returned iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). Returns: A `tf.data.Iterator` for elements of `dataset`. Raises: RuntimeError: If eager execution is enabled." 2081,get_structure,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2739,function,"Returns the type signature for elements of the input dataset / iterator. Args: dataset_or_iterator: A `tf.data.Dataset` or an `tf.data.Iterator`. Returns: A nested structure of `tf.TypeSpec` objects matching the structure of an element of `dataset_or_iterator` and specifying the type of individual components. Raises: TypeError: If input is not a `tf.data.Dataset` or an `tf.data.Iterator` object." 2082,get_legacy_output_classes,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2763,function,"Returns the output classes for elements of the input dataset / iterator. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`. Returns: A nested structure of Python `type` objects matching the structure of the dataset / iterator elements and specifying the class of the individual components." 2083,get_legacy_output_shapes,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2780,function,"Returns the output shapes for elements of the input dataset / iterator. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`. Returns: A nested structure of `tf.TensorShape` objects matching the structure of the dataset / iterator elements and specifying the shape of the individual components." 2084,get_legacy_output_types,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2797,function,"Returns the output shapes for elements of the input dataset / iterator. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`. Returns: A nested structure of `tf.DType` objects objects matching the structure of dataset / iterator elements and specifying the shape of the individual components." 2085,Options,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2814,class,"Represents options for tf.data.Dataset. An `Options` object can be, for instance, used to control which graph optimizations to apply or whether to use performance modeling to dynamically tune the parallelism of operations such as `tf.data.Dataset.map` or `tf.data.Dataset.interleave`. After constructing an `Options` object, use `dataset.with_options(options)` to apply the options to a dataset. >>> dataset = tf.data.Dataset.range(3) >>> options = tf.data.Options() >>> # Set options here. >>> dataset = dataset.with_options(options)" 2086,DatasetSource,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2946,class,Abstract class representing a dataset with no inputs. 2087,UnaryDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2953,class,Abstract class representing a dataset with one input. 2088,UnaryUnchangedStructureDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2964,class,Represents a unary dataset with the same input and output structure. 2089,TensorDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2977,class,A `Dataset` with a single element. 2090,TensorSliceDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,2996,class,A `Dataset` of slices from a dataset element. 2091,SparseTensorSliceDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3023,class,A `Dataset` that splits a rank-N `tf.sparse.SparseTensor` into its rows. 2092,_VariantDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3052,class,A Dataset wrapper around a `tf.variant`-typed function argument. 2093,_NestedVariant,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3067,class, 2094,from_variant,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3080,function,"Constructs a dataset from the given variant and structure. Args: variant: A scalar `tf.variant` tensor representing a dataset. structure: A `tf.data.experimental.Structure` object representing the structure of each element in the dataset. Returns: A `tf.data.Dataset` instance." 2095,to_variant,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3095,function,"Returns a variant representing the given dataset. Args: dataset: A `tf.data.Dataset`. Returns: A scalar `tf.variant` tensor representing the given dataset." 2096,DatasetSpec,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3110,class,"Type specification for `tf.data.Dataset`. See `tf.TypeSpec` for more information about TensorFlow type specifications. >>> dataset = tf.data.Dataset.range(3) >>> tf.data.DatasetSpec.from_value(dataset) DatasetSpec(TensorSpec(shape=(), dtype=tf.int64, name=None), TensorShape([]))" 2097,StructuredFunctionWrapper,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3183,class,A function wrapper that supports structured arguments and return values. 2098,_GeneratorDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3414,class,A `Dataset` that generates elements by invoking a function. 2099,ZipDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3469,class,A `Dataset` that zips its inputs together. 2100,ConcatenateDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3501,class,A `Dataset` that concatenates its input with given dataset. 2101,RepeatDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3549,class,A `Dataset` that repeats its input several times. 2102,RangeDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3567,class,A `Dataset` of a step separated range of values. 2103,CacheDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3610,class,A `Dataset` that caches elements of its input. 2104,ShuffleDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3632,class,A `Dataset` that randomly shuffles the elements of its input. 2105,TakeDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3688,class,A `Dataset` containing the first `count` elements from its input. 2106,SkipDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3702,class,A `Dataset` skipping the first `count` elements from its input. 2107,ShardDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3716,class,A `Dataset` for sharding its input. 2108,BatchDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3733,class,A `Dataset` that batches contiguous elements from its input. 2109,_NumpyIterator,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3770,class,Iterator over a dataset with elements converted to numpy. 2110,_VariantTracker,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3788,class,"Allows export of functions capturing a Dataset in SavedModels. When saving a SavedModel, `tf.saved_model.save` traverses the object graph. Since Datasets reference _VariantTracker objects, that traversal will find a _VariantTracker for each Dataset and so know how to save and restore functions which reference the Dataset's variant Tensor." 2111,_is_padded_shape_compatible_with,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3813,function,"Returns `True` if `input_component_shape` can be padded to `padded_shape`. Args: padded_shape: A `tf.TensorShape`. input_component_shape: A `tf.TensorShape`. Returns: `True` if `input_component_shape` can be padded to `padded_shape`, otherwise `False`." 2112,_padded_shape_to_tensor,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3837,function,"Converts `padded_shape` to a `tf.Tensor` representing that shape. Args: padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python sequence, or a 1-D `tf.Tensor` of `tf.int64` elements. input_component_shape: A `tf.TensorShape`, with which `padded_shape` must be compatible. Returns: A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`. Raises: ValueError: If `padded_shape` is not a shape or not compatible with `input_component_shape`. TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor." 2113,_padding_value_to_tensor,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3889,function,"Converts the padding value to a tensor. Args: value: The padding value. output_type: Its expected dtype. Returns: A scalar `Tensor`. Raises: ValueError: if the padding value is not a scalar. TypeError: if the padding value's type does not match `output_type`." 2114,_padding_values_or_default,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3912,function,Returns padding values with None elements replaced with default values. 2115,PaddedBatchDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,3939,class,A `Dataset` that batches and pads contiguous elements from its input. 2116,_should_unpack_args,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4025,function,Returns `True` if `args` should be `*args` when passed to a callable. 2117,MapDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4030,class,A `Dataset` that maps a function over elements in its input. 2118,ParallelMapDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4068,class,A `Dataset` that maps a function over elements in its input in parallel. 2119,FlatMapDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4118,class,A `Dataset` that maps a function over its input and flattens the result. 2120,InterleaveDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4149,class,A `Dataset` that interleaves the result of transformed inputs. 2121,ParallelInterleaveDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4188,class,A `Dataset` that maps a function over its input and interleaves the result. 2122,FilterDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4256,class,A `Dataset` that filters its input according to a predicate function. 2123,PrefetchDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4288,class,A `Dataset` that asynchronously prefetches its input. 2124,WindowDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4321,class,A dataset that creates window datasets from the input elements. 2125,_OptionsDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4357,class,An identity `Dataset` that stores options. 2126,_ModelDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4374,class,"A `Dataset` that acts as an identity, and models performance." 2127,_OptimizeDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4387,class,"A `Dataset` that acts as an identity, and applies optimizations." 2128,_SetStatsAggregatorDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4406,class,"A `Dataset` that acts as an identity, and sets a stats aggregator." 2129,_MaxIntraOpParallelismDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4424,class,"A `Dataset` that acts as an identity, overriding intra-op parallelism." 2130,_PrivateThreadPoolDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4441,class,"A `Dataset` that acts as an identity, setting a private threadpool." 2131,normalize_to_dense,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4456,function,"Normalizes non-tensor components in a dataset to dense representations. This is necessary for dataset transformations that slice along the batch dimension and are oblivious to non-tensors, e.g. `unbatch`, `rebatch`. Args: dataset: Dataset to normalize. Returns: A dataset whose sparse and ragged tensors have been normalized to their dense representations." 2132,_RestructuredDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4488,class,An internal helper for changing the structure and shape of a dataset. 2133,_UnbatchDataset,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4503,class,A dataset that splits the elements of its input into multiple elements. 2134,_collect_resource_inputs,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4532,function,Collects resource inputs for the given ops (and its variant inputs). 2135,_resource_resolver,tensorflow/tensorflow/python/data/ops/dataset_ops.py,4575,function,Updates resource inputs for tf.data ops with indirect dependencies. 2136,_device_stack_is_empty,tensorflow/tensorflow/python/data/ops/iterator_ops.py,71,function, 2137,Iterator,tensorflow/tensorflow/python/data/ops/iterator_ops.py,81,class,Represents the state of iterating through a `Dataset`. 2138,_generate_shared_name,tensorflow/tensorflow/python/data/ops/iterator_ops.py,510,function, 2139,IteratorResourceDeleter,tensorflow/tensorflow/python/data/ops/iterator_ops.py,518,class,"An object which cleans up an iterator resource handle. An alternative to defining a __del__ method on an object. Even if the parent object is part of a reference cycle, the cycle will be collectable." 2140,IteratorBase,tensorflow/tensorflow/python/data/ops/iterator_ops.py,548,class,"Represents an iterator of a `tf.data.Dataset`. `tf.data.Iterator` is the primary mechanism for enumerating elements of a `tf.data.Dataset`. It supports the Python Iterator protocol, which means it can be iterated over using a for-loop: >>> dataset = tf.data.Dataset.range(2) >>> for element in dataset: ... print(element) tf.Tensor(0, shape=(), dtype=int64) tf.Tensor(1, shape=(), dtype=int64) or by fetching individual elements explicitly via `get_next()`: >>> dataset = tf.data.Dataset.range(2) >>> iterator = iter(dataset) >>> print(iterator.get_next()) tf.Tensor(0, shape=(), dtype=int64) >>> print(iterator.get_next()) tf.Tensor(1, shape=(), dtype=int64) In addition, non-raising iteration is supported via `get_next_as_optional()`, which returns the next element (if available) wrapped in a `tf.experimental.Optional`. >>> dataset = tf.data.Dataset.from_tensors(42) >>> iterator = iter(dataset) >>> optional = iterator.get_next_as_optional() >>> print(optional.has_value()) tf.Tensor(True, shape=(), dtype=bool) >>> optional = iterator.get_next_as_optional() >>> print(optional.has_value()) tf.Tensor(False, shape=(), dtype=bool)" 2141,OwnedIterator,tensorflow/tensorflow/python/data/ops/iterator_ops.py,641,class,"An iterator producing tf.Tensor objects from a tf.data.Dataset. The iterator resource created through `OwnedIterator` is owned by the Python object and the life time of the underlying resource is tied to the life time of the `OwnedIterator` object. This makes `OwnedIterator` appropriate for use in eager mode and inside of tf.functions." 2142,IteratorSpec,tensorflow/tensorflow/python/data/ops/iterator_ops.py,857,class,"Type specification for `tf.data.Iterator`. For instance, `tf.data.IteratorSpec` can be used to define a tf.function that takes `tf.data.Iterator` as an input argument: >>> @tf.function(input_signature=[tf.data.IteratorSpec( ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))]) ... def square(iterator): ... x = iterator.get_next() ... return x * x >>> dataset = tf.data.Dataset.from_tensors(5) >>> iterator = iter(dataset) >>> print(square(iterator)) tf.Tensor(25, shape=(), dtype=int32) Attributes: element_spec: A nested structure of `TypeSpec` objects that represents the type specification of the iterator elements." 2143,_IteratorSaveable,tensorflow/tensorflow/python/data/ops/iterator_ops.py,912,class,SaveableObject for saving/restoring iterator state. 2144,get_next_as_optional,tensorflow/tensorflow/python/data/ops/iterator_ops.py,939,function,"Returns a `tf.experimental.Optional` with the next element of the iterator. If the iterator has reached the end of the sequence, the returned `tf.experimental.Optional` will have no value. Args: iterator: A `tf.data.Iterator`. Returns: A `tf.experimental.Optional` object which either contains the next element of the iterator (if it exists) or no value." 2145,_PerDeviceGenerator,tensorflow/tensorflow/python/data/ops/multi_device_iterator_ops.py,38,class,A `dummy` generator dataset. 2146,_ReincarnatedPerDeviceGenerator,tensorflow/tensorflow/python/data/ops/multi_device_iterator_ops.py,151,class,"Creates a _PerDeviceGenerator-like dataset with a new incarnation_id. Re-uses the functions from the provided per_device_dataset and just switches out the function argument corresponding to the incarnation_id." 2147,_create_device_dataset,tensorflow/tensorflow/python/data/ops/multi_device_iterator_ops.py,193,function,Uses _prototype_device_datasets[i] to build a dataset for the device. 2148,MultiDeviceIterator,tensorflow/tensorflow/python/data/ops/multi_device_iterator_ops.py,211,class,An iterator over multiple devices. 2149,MultiDeviceIteratorResourceDeleter,tensorflow/tensorflow/python/data/ops/multi_device_iterator_ops.py,373,class,"An object which cleans up a Multi Device Iterator resource. An alternative to defining a __del__ method on an object. Even if the parent object is part of a reference cycle, the cycle will be collectible." 2150,MultiDeviceIteratorSpec,tensorflow/tensorflow/python/data/ops/multi_device_iterator_ops.py,411,class,Type specification for `OwnedMultiDeviceIterator`. 2151,OwnedMultiDeviceIterator,tensorflow/tensorflow/python/data/ops/multi_device_iterator_ops.py,461,class,"An iterator over multiple devices. The multi-device iterator resource created through `OwnedMultiDeviceIterator` is owned by the Python object and the life time of the underlying resource is tied to the life time of the `OwnedMultiDeviceIterator` object. This makes `OwnedMultiDeviceIterator` appropriate for use in eager mode and inside of tf.functions." 2152,Optional,tensorflow/tensorflow/python/data/ops/optional_ops.py,38,class,"Represents a value that may or may not be present. A `tf.experimental.Optional` can represent the result of an operation that may fail as a value, rather than raising an exception and halting execution. For example, `tf.data.Iterator.get_next_as_optional()` returns a `tf.experimental.Optional` that either contains the next element of an iterator if one exists, or an ""empty"" value that indicates the end of the sequence has been reached. `tf.experimental.Optional` can only be used with values that are convertible to `tf.Tensor` or `tf.CompositeTensor`. One can create a `tf.experimental.Optional` from a value using the `from_value()` method: >>> optional = tf.experimental.Optional.from_value(42) >>> print(optional.has_value()) tf.Tensor(True, shape=(), dtype=bool) >>> print(optional.get_value()) tf.Tensor(42, shape=(), dtype=int32) or without a value using the `empty()` method: >>> optional = tf.experimental.Optional.empty( ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None)) >>> print(optional.has_value()) tf.Tensor(False, shape=(), dtype=bool)" 2153,_OptionalImpl,tensorflow/tensorflow/python/data/ops/optional_ops.py,166,class,"Concrete implementation of `tf.experimental.Optional`. NOTE(mrry): This implementation is kept private, to avoid defining `Optional.__init__()` in the public API." 2154,OptionalSpec,tensorflow/tensorflow/python/data/ops/optional_ops.py,206,class,"Type specification for `tf.experimental.Optional`. For instance, `tf.OptionalSpec` can be used to define a tf.function that takes `tf.experimental.Optional` as an input argument: >>> @tf.function(input_signature=[tf.OptionalSpec( ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None))]) ... def maybe_square(optional): ... if optional.has_value(): ... x = optional.get_value() ... return x * x ... return -1 >>> optional = tf.experimental.Optional.from_value(5) >>> print(maybe_square(optional)) tf.Tensor(25, shape=(), dtype=int32) Attributes: element_spec: A nested structure of `TypeSpec` objects that represents the type specification of the optional element." 2155,_create_or_validate_filenames_dataset,tensorflow/tensorflow/python/data/ops/readers.py,35,function,"Creates (or validates) a dataset of filenames. Args: filenames: Either a list or dataset of filenames. If it is a list, it is convert to a dataset. If it is a dataset, its type and shape is validated. Returns: A dataset of filenames." 2156,_create_dataset_reader,tensorflow/tensorflow/python/data/ops/readers.py,66,function,"Creates a dataset that reads the given files using the given reader. Args: dataset_creator: A function that takes in a single file name and returns a dataset. filenames: A `tf.data.Dataset` containing one or more filenames. num_parallel_reads: The number of parallel reads we should do. Returns: A `Dataset` that reads data from `filenames`." 2157,_TextLineDataset,tensorflow/tensorflow/python/data/ops/readers.py,99,class,A `Dataset` comprising records from one or more text files. 2158,TextLineDatasetV2,tensorflow/tensorflow/python/data/ops/readers.py,134,class,A `Dataset` comprising lines from one or more text files. 2159,TextLineDatasetV1,tensorflow/tensorflow/python/data/ops/readers.py,179,class,A `Dataset` comprising lines from one or more text files. 2160,_TFRecordDataset,tensorflow/tensorflow/python/data/ops/readers.py,202,class,A `Dataset` comprising records from one or more TFRecord files. 2161,ParallelInterleaveDataset,tensorflow/tensorflow/python/data/ops/readers.py,235,class,A `Dataset` that maps a function over its input and flattens the result. 2162,TFRecordDatasetV2,tensorflow/tensorflow/python/data/ops/readers.py,290,class,A `Dataset` comprising records from one or more TFRecord files. 2163,TFRecordDatasetV1,tensorflow/tensorflow/python/data/ops/readers.py,354,class,A `Dataset` comprising records from one or more TFRecord files. 2164,_FixedLengthRecordDataset,tensorflow/tensorflow/python/data/ops/readers.py,389,class,A `Dataset` of fixed-length records from one or more binary files. 2165,FixedLengthRecordDatasetV2,tensorflow/tensorflow/python/data/ops/readers.py,439,class,A `Dataset` of fixed-length records from one or more binary files. 2166,FixedLengthRecordDatasetV1,tensorflow/tensorflow/python/data/ops/readers.py,497,class,A `Dataset` of fixed-length records from one or more binary files. 2167,optional_param_to_tensor,tensorflow/tensorflow/python/data/util/convert.py,26,function, 2168,partial_shape_to_tensor,tensorflow/tensorflow/python/data/util/convert.py,38,function,"Returns a `tf.Tensor` that represents the given shape. Args: shape_like: A value that can be converted to a `tf.TensorShape` or a `tf.Tensor`. Returns: A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where `-1` is substituted for any unknown dimensions." 2169,ConvertTest,tensorflow/tensorflow/python/data/util/convert_test.py,30,class, 2170,_sorted,tensorflow/tensorflow/python/data/util/nest.py,45,function,"Returns a sorted list of the dict keys, with error if keys not sortable." 2171,_sequence_like,tensorflow/tensorflow/python/data/util/nest.py,53,function,"Converts the sequence `args` to the same type as `instance`. Args: instance: an instance of `tuple`, `list`, or a `namedtuple` class. args: elements to be converted to a sequence. Returns: `args` with the type of `instance`." 2172,_yield_value,tensorflow/tensorflow/python/data/util/nest.py,81,function, 2173,assert_same_structure,tensorflow/tensorflow/python/data/util/nest.py,104,function,"Asserts that two structures are nested in the same way. Args: nest1: an arbitrarily nested structure. nest2: an arbitrarily nested structure. check_types: if `True` (default) types of sequences should be same as well. For dictionary, ""type"" of dictionary is considered to include its keys. In other words, two dictionaries with different keys are considered to have a different ""type"". If set to `False`, two iterables are considered same as long as they yield the elements that have same structures. Raises: ValueError: If the two structures do not have the same number of elements or if the two structures are not nested in the same way. TypeError: If the two structures differ in the type of sequence in any of their substructures. Only possible if `check_types` is `True`." 2174,_packed_nest_with_indices,tensorflow/tensorflow/python/data/util/nest.py,126,function,"Helper function for pack_nest_as. Args: structure: Substructure (tuple of elements and/or tuples) to mimic flat: Flattened values to output substructure for. index: Index at which to start reading from flat. Returns: The tuple (new_index, child), where: * new_index - the updated index into `flat` having processed `structure`. * packed - the subset of `flat` corresponding to `structure`, having started at `index`, and packed into the same nested format. Raises: ValueError: if `structure` contains more elements than `flat` (assuming indexing starts from `index`)." 2175,pack_sequence_as,tensorflow/tensorflow/python/data/util/nest.py,157,function,"Returns a given flattened sequence packed into a nest. If `structure` is a scalar, `flat_sequence` must be a single-element list; in this case the return value is `flat_sequence[0]`. Args: structure: tuple or list constructed of scalars and/or other tuples/lists, or a scalar. Note: numpy arrays are considered scalars. flat_sequence: flat sequence to pack. Returns: packed: `flat_sequence` converted to have the same recursive structure as `structure`. Raises: ValueError: If nest and structure have different element counts." 2176,map_structure,tensorflow/tensorflow/python/data/util/nest.py,195,function,"Applies `func` to each entry in `structure` and returns a new structure. Applies `func(x[0], x[1], ...)` where x[i] is an entry in `structure[i]`. All structures in `structure` must have the same arity, and the return value will contain the results in the same structure. Args: func: A callable that accepts as many arguments are there are structures. *structure: scalar, or tuple or list of constructed scalars and/or other tuples/lists, or scalars. Note: numpy arrays are considered scalars. **check_types_dict: only valid keyword argument is `check_types`. If set to `True` (default) the types of iterables within the structures have to be same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow this set this argument to `False`. Returns: A new structure with the same arity as `structure`, whose values correspond to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding location in `structure[i]`. If there are different sequence types and `check_types` is `False` the sequence types of the first structure will be used. Raises: TypeError: If `func` is not callable or if the structures do not match each other by depth tree. ValueError: If no structure is provided or if the structures do not match each other by type. ValueError: If wrong keyword arguments are provided." 2177,_yield_flat_up_to,tensorflow/tensorflow/python/data/util/nest.py,248,function,Yields elements `input_tree` partially flattened up to `shallow_tree`. 2178,assert_shallow_structure,tensorflow/tensorflow/python/data/util/nest.py,259,function,"Asserts that `shallow_tree` is a shallow structure of `input_tree`. That is, this function tests if the `input_tree` structure can be created from the `shallow_tree` structure by replacing its leaf nodes with deeper tree structures. Examples: The following code will raise an exception: ```python shallow_tree = [""a"", ""b""] input_tree = [""c"", [""d"", ""e""], ""f""] assert_shallow_structure(shallow_tree, input_tree) ``` The following code will not raise an exception: ```python shallow_tree = [""a"", ""b""] input_tree = [""c"", [""d"", ""e""]] assert_shallow_structure(shallow_tree, input_tree) ``` Args: shallow_tree: an arbitrarily nested structure. input_tree: an arbitrarily nested structure. check_types: if `True` (default) the sequence types of `shallow_tree` and `input_tree` have to be the same. Raises: TypeError: If `shallow_tree` is a sequence but `input_tree` is not. TypeError: If the sequence types of `shallow_tree` are different from `input_tree`. Only raised if `check_types` is `True`. ValueError: If the sequence lengths of `shallow_tree` are different from `input_tree`." 2179,flatten_up_to,tensorflow/tensorflow/python/data/util/nest.py,327,function,"Flattens `input_tree` up to `shallow_tree`. Any further depth in structure in `input_tree` is retained as elements in the partially flatten output. If `shallow_tree` and `input_tree` are not sequences, this returns a single-element list: `[input_tree]`. Use Case: Sometimes we may wish to partially flatten a nested sequence, retaining some of the nested structure. We achieve this by specifying a shallow structure, `shallow_tree`, we wish to flatten up to. The input, `input_tree`, can be thought of as having the same structure as `shallow_tree`, but with leaf nodes that are themselves tree structures. Examples: ```python input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] shallow_tree = [[True, True], [False, True]] flattened_input_tree = flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree) # Output is: # [[2, 2], [3, 3], [4, 9], [5, 5]] # [True, True, False, True] ``` ```python input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) input_tree_flattened = flatten(input_tree) # Output is: # [('a', 1), ('b', 2), ('c', 3), ('d', 4)] # ['a', 1, 'b', 2, 'c', 3, 'd', 4] ``` Non-Sequence Edge Cases: ```python flatten_up_to(0, 0) # Output: [0] flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]] flatten_up_to([0, 1, 2], 0) # Output: TypeError flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2] ``` Args: shallow_tree: a possibly pruned structure of input_tree. input_tree: an arbitrarily nested structure or a scalar object. Note, numpy arrays are considered scalars. Returns: A Python list, the partially flattened version of `input_tree` according to the structure of `shallow_tree`. Raises: TypeError: If `shallow_tree` is a sequence but `input_tree` is not. TypeError: If the sequence types of `shallow_tree` are different from `input_tree`. ValueError: If the sequence lengths of `shallow_tree` are different from `input_tree`." 2180,map_structure_up_to,tensorflow/tensorflow/python/data/util/nest.py,400,function,"Applies a function or op to a number of partially flattened inputs. The `inputs` are flattened up to `shallow_tree` before being mapped. Use Case: Sometimes we wish to apply a function to a partially flattened sequence (for example when the function itself takes sequence inputs). We achieve this by specifying a shallow structure, `shallow_tree` we wish to flatten up to. The `inputs`, can be thought of as having the same structure as `shallow_tree`, but with leaf nodes that are themselves tree structures. This function, therefore, will return something with the same base structure as `shallow_tree`. Examples: ```python ab_tuple = collections.namedtuple(""ab_tuple"", ""a, b"") op_tuple = collections.namedtuple(""op_tuple"", ""add, mul"") inp_val = ab_tuple(a=2, b=3) inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops) # Output is: ab_tuple(a=6, b=15) ``` ```python data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] name_list = ['evens', ['odds', 'primes']] out = map_structure_up_to( name_list, lambda name, sec: ""first_{}_{}"".format(len(sec), name), name_list, data_list) # Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']] ``` Args: shallow_tree: a shallow tree, common to all the inputs. func: callable which will be applied to each input individually. *inputs: arbitrarily nested combination of objects that are compatible with shallow_tree. The function `func` is applied to corresponding partially flattened elements of each input, so the function must support arity of `len(inputs)`. Raises: TypeError: If `shallow_tree` is a sequence but `input_tree` is not. TypeError: If the sequence types of `shallow_tree` are different from `input_tree`. ValueError: If the sequence lengths of `shallow_tree` are different from `input_tree`. Returns: result of repeatedly applying `func`, with same structure as `shallow_tree`." 2181,NestTest,tensorflow/tensorflow/python/data/util/nest_test.py,34,class, 2182,_internal_attr_name,tensorflow/tensorflow/python/data/util/options.py,22,function, 2183,OptionsBase,tensorflow/tensorflow/python/data/util/options.py,26,class,"Base class for representing a set of tf.data options. Attributes: _options: Stores the option values." 2184,create_option,tensorflow/tensorflow/python/data/util/options.py,59,function,"Creates a type-checked property. Args: name: The name to use. ty: The type to use. The type of the property will be validated when it is set. docstring: The docstring to use. default_factory: A callable that takes no arguments and returns a default value to use if not set. Returns: A type-checked property." 2185,merge_options,tensorflow/tensorflow/python/data/util/options.py,89,function,"Merges the given options, returning the result as a new options object. The input arguments are expected to have a matching type that derives from `OptionsBase` (and thus each represent a set of options). The method outputs an object of the same type created by merging the sets of options represented by the input arguments. The sets of options can be merged as long as there does not exist an option with different non-default values. If an option is an instance of `OptionsBase` itself, then this method is applied recursively to the set of options represented by this option. Args: *options_list: options to merge Raises: TypeError: if the input arguments are incompatible or not derived from `OptionsBase` ValueError: if the given options cannot be merged Returns: A new options object which is the result of merging the given options." 2186,_TestOptions,tensorflow/tensorflow/python/data/util/options_test.py,25,class, 2187,_NestedTestOptions,tensorflow/tensorflow/python/data/util/options_test.py,35,class, 2188,OptionsTest,tensorflow/tensorflow/python/data/util/options_test.py,40,class, 2189,get_seed,tensorflow/tensorflow/python/data/util/random_seed.py,29,function,"Returns the local seeds an operation should use given an op-specific seed. See `random_seed.get_seed` for more details. This wrapper adds support for the case where `seed` may be a tensor. Args: seed: An integer or a `tf.int64` scalar tensor. Returns: A tuple of two `tf.int64` scalar tensors that should be used for the local seed of the calling dataset." 2190,RandomSeedTest,tensorflow/tensorflow/python/data/util/random_seed_test.py,30,class, 2191,any_sparse,tensorflow/tensorflow/python/data/util/sparse.py,28,function,"Checks for sparse tensor. Args: classes: a structure of objects that identify the dataset item classes Returns: `True` if `classes` contains a sparse tensor type and `False` otherwise." 2192,as_dense_shapes,tensorflow/tensorflow/python/data/util/sparse.py,40,function,"Converts sparse tensor shapes to their physical shapes. Args: shapes: a structure of shapes to convert. classes: a structure of objects that identify the dataset item classes Returns: a structure matching the nested structure of `shapes`, containing `tensor_shape.unknown_shape()` at positions where `classes` contains `tf.sparse.SparseTensor` and matching contents of `shapes` otherwise" 2193,as_dense_types,tensorflow/tensorflow/python/data/util/sparse.py,59,function,"Converts sparse tensor types to `dtypes.variant`. Args: types: a structure of types to convert. classes: a structure of objects that identify the dataset item classes Returns: a structure matching the nested structure of `types`, containing `dtypes.variant` at positions where `classes` contains `tf.sparse.SparseTensor` and matching contents of `types` otherwise" 2194,deserialize_sparse_tensors,tensorflow/tensorflow/python/data/util/sparse.py,78,function,"Deserializes sparse tensors. Args: tensors: a structure of tensors to deserialize. types: a structure that holds information about types of `tensors` shapes: a structure that holds information about shapes of `tensors` classes: a structure of objects that identify the dataset item classes Returns: `tensors` with any serialized sparse tensors replaced by their deserialized version." 2195,get_classes,tensorflow/tensorflow/python/data/util/sparse.py,101,function,"Gets classes for a structure of tensors. Args: tensors: the tensor structure to get classes for. Returns: a structure matching the nested structure of `tensors`, containing `tf.sparse.SparseTensor` at positions where `tensors` contains a sparse tensor and `tf.Tensor` otherwise." 2196,serialize_many_sparse_tensors,tensorflow/tensorflow/python/data/util/sparse.py,119,function,"Serializes many sparse tensors into a batch. Args: tensors: a tensor structure to serialize. Returns: `tensors` with any sparse tensors replaced by the serialized batch." 2197,serialize_sparse_tensors,tensorflow/tensorflow/python/data/util/sparse.py,137,function,"Serializes sparse tensors. Args: tensors: a tensor structure to serialize. Returns: `tensors` with any sparse tensors replaced by their serialized version." 2198,SparseTest,tensorflow/tensorflow/python/data/util/sparse_test.py,32,class, 2199,_TensorStructure,tensorflow/tensorflow/python/data/util/structure.py,44,function, 2200,_SparseTensorStructure,tensorflow/tensorflow/python/data/util/structure.py,50,function, 2201,_TensorArrayStructure,tensorflow/tensorflow/python/data/util/structure.py,56,function, 2202,_RaggedTensorStructure,tensorflow/tensorflow/python/data/util/structure.py,63,function, 2203,normalize_element,tensorflow/tensorflow/python/data/util/structure.py,70,function,"Normalizes a nested structure of element components. * Components matching `SparseTensorSpec` are converted to `SparseTensor`. * Components matching `RaggedTensorSpec` are converted to `RaggedTensor`. * Components matching `DatasetSpec` or `TensorArraySpec` are passed through. * `CompositeTensor` components are passed through. * All other components are converted to `Tensor`. Args: element: A nested structure of individual components. Returns: A nested structure of `Tensor`, `Dataset`, `SparseTensor`, `RaggedTensor`, or `TensorArray` objects." 2204,convert_legacy_structure,tensorflow/tensorflow/python/data/util/structure.py,119,function,"Returns a `Structure` that represents the given legacy structure. This method provides a way to convert from the existing `Dataset` and `Iterator` structure-related properties to a `Structure` object. A ""legacy"" structure is represented by the `tf.data.Dataset.output_types`, `tf.data.Dataset.output_shapes`, and `tf.data.Dataset.output_classes` properties. TODO(b/110122868): Remove this function once `Structure` is used throughout `tf.data`. Args: output_types: A nested structure of `tf.DType` objects corresponding to each component of a structured value. output_shapes: A nested structure of `tf.TensorShape` objects corresponding to each component a structured value. output_classes: A nested structure of Python `type` objects corresponding to each component of a structured value. Returns: A `Structure`. Raises: TypeError: If a structure cannot be built from the arguments, because one of the component classes in `output_classes` is not supported." 2205,_from_tensor_list_helper,tensorflow/tensorflow/python/data/util/structure.py,175,function,"Returns an element constructed from the given spec and tensor list. Args: decode_fn: Method that constructs an element component from the element spec component and a tensor list. element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. tensor_list: A list of tensors to use for constructing the value. Returns: An element constructed from the given spec and tensor list. Raises: ValueError: If the number of tensors needed to construct an element for the given spec does not match the given number of tensors." 2206,from_compatible_tensor_list,tensorflow/tensorflow/python/data/util/structure.py,210,function,"Returns an element constructed from the given spec and tensor list. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. tensor_list: A list of tensors to use for constructing the value. Returns: An element constructed from the given spec and tensor list. Raises: ValueError: If the number of tensors needed to construct an element for the given spec does not match the given number of tensors." 2207,from_tensor_list,tensorflow/tensorflow/python/data/util/structure.py,233,function,"Returns an element constructed from the given spec and tensor list. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. tensor_list: A list of tensors to use for constructing the value. Returns: An element constructed from the given spec and tensor list. Raises: ValueError: If the number of tensors needed to construct an element for the given spec does not match the given number of tensors or the given spec is not compatible with the tensor list." 2208,get_flat_tensor_specs,tensorflow/tensorflow/python/data/util/structure.py,257,function,"Returns a list `tf.TypeSpec`s for the element tensor representation. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. Returns: A list `tf.TypeSpec`s for the element tensor representation." 2209,get_flat_tensor_shapes,tensorflow/tensorflow/python/data/util/structure.py,273,function,"Returns a list `tf.TensorShapes`s for the element tensor representation. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. Returns: A list `tf.TensorShapes`s for the element tensor representation." 2210,get_flat_tensor_types,tensorflow/tensorflow/python/data/util/structure.py,286,function,"Returns a list `tf.DType`s for the element tensor representation. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. Returns: A list `tf.DType`s for the element tensor representation." 2211,_to_tensor_list_helper,tensorflow/tensorflow/python/data/util/structure.py,299,function,"Returns a tensor list representation of the element. Args: encode_fn: Method that constructs a tensor list representation from the given element spec and element. element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. element: The element to convert to tensor list representation. Returns: A tensor list representation of `element`. Raises: ValueError: If `element_spec` and `element` do not have the same number of elements or if the two structures are not nested in the same way. TypeError: If `element_spec` and `element` differ in the type of sequence in any of their substructures." 2212,to_batched_tensor_list,tensorflow/tensorflow/python/data/util/structure.py,329,function,"Returns a tensor list representation of the element. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. element: The element to convert to tensor list representation. Returns: A tensor list representation of `element`. Raises: ValueError: If `element_spec` and `element` do not have the same number of elements or if the two structures are not nested in the same way or the rank of any of the tensors in the tensor list representation is 0. TypeError: If `element_spec` and `element` differ in the type of sequence in any of their substructures." 2213,to_tensor_list,tensorflow/tensorflow/python/data/util/structure.py,355,function,"Returns a tensor list representation of the element. Args: element_spec: A nested structure of `tf.TypeSpec` objects representing to element type specification. element: The element to convert to tensor list representation. Returns: A tensor list representation of `element`. Raises: ValueError: If `element_spec` and `element` do not have the same number of elements or if the two structures are not nested in the same way. TypeError: If `element_spec` and `element` differ in the type of sequence in any of their substructures." 2214,are_compatible,tensorflow/tensorflow/python/data/util/structure.py,380,function,"Indicates whether two type specifications are compatible. Two type specifications are compatible if they have the same nested structure and the their individual components are pair-wise compatible. Args: spec1: A `tf.TypeSpec` object to compare. spec2: A `tf.TypeSpec` object to compare. Returns: `True` if the two type specifications are compatible and `False` otherwise." 2215,type_spec_from_value,tensorflow/tensorflow/python/data/util/structure.py,407,function,"Creates a type specification for the given value. Args: element: The element to create the type specification for. use_fallback: Whether to fall back to converting the element to a tensor in order to compute its `TypeSpec`. Returns: A nested structure of `TypeSpec`s that represents the type specification of `element`. Raises: TypeError: If a `TypeSpec` cannot be built for `element`, because its type is not supported." 2216,NoneTensor,tensorflow/tensorflow/python/data/util/structure.py,471,class,Composite tensor representation for `None` value. 2217,NoneTensorSpec,tensorflow/tensorflow/python/data/util/structure.py,481,class,Type specification for `None` value. 2218,StructureTest,tensorflow/tensorflow/python/data/util/structure_test.py,53,class, 2219,CustomMap,tensorflow/tensorflow/python/data/util/structure_test.py,759,class,"Custom, immutable map." 2220,obtain_all_variant_tensor_ops,tensorflow/tensorflow/python/data/util/traverse.py,25,function,"Given an input dataset, finds all dataset ops used for construction. A series of transformations would have created this dataset with each transformation including zero or more Dataset ops, each producing a dataset variant tensor. This method outputs all of them. Args: dataset: Dataset to find variant tensors for. Returns: A list of variant_tensor producing dataset ops used to construct this dataset." 2221,_TestDataset,tensorflow/tensorflow/python/data/util/traverse_test.py,29,class, 2222,TraverseTest,tensorflow/tensorflow/python/data/util/traverse_test.py,42,class, 2223,_add_main_menu,tensorflow/tensorflow/python/debug/cli/analyzer_cli.py,61,function,"Generate main menu for the screen output from a command. Args: output: (debugger_cli_common.RichTextLines) the output object to modify. node_name: (str or None) name of the node involved (if any). If None, the menu items node_info, list_inputs and list_outputs will be automatically disabled, overriding the values of arguments enable_node_info, enable_list_inputs and enable_list_outputs. enable_list_tensors: (bool) whether the list_tensor menu item will be enabled. enable_node_info: (bool) whether the node_info item will be enabled. enable_print_tensor: (bool) whether the print_tensor item will be enabled. enable_list_inputs: (bool) whether the item list_inputs will be enabled. enable_list_outputs: (bool) whether the item list_outputs will be enabled." 2224,DebugAnalyzer,tensorflow/tensorflow/python/debug/cli/analyzer_cli.py,130,class,Analyzer for debug data from dump directories. 2225,create_analyzer_ui,tensorflow/tensorflow/python/debug/cli/analyzer_cli.py,1583,function,"Create an instance of CursesUI based on a DebugDumpDir object. Args: debug_dump: (debug_data.DebugDumpDir) The debug dump to use. tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor filter (Callable). ui_type: (str) requested UI type, e.g., ""curses"", ""readline"". on_ui_exit: (`Callable`) the callback to be called when the UI exits. config: A `cli_config.CLIConfig` object. Returns: (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer commands and tab-completions registered." 2226,_matmul_op_name,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,53,function, 2227,_cli_config_from_temp_file,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,57,function, 2228,no_rewrite_session_config,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,62,function, 2229,line_number_above,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,74,function, 2230,parse_op_and_node,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,78,function,"Parse a line containing an op node followed by a node name. For example, if the line is "" [Variable] hidden/weights"", this function will return (""Variable"", ""hidden/weights"") Args: line: The line to be parsed, as a str. Returns: Name of the parsed op type. Name of the parsed node." 2231,assert_column_header_command_shortcut,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,102,function, 2232,assert_listed_tensors,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,116,function,"Check RichTextLines output for list_tensors commands. Args: tst: A test_util.TensorFlowTestCase instance. out: The RichTextLines object to be checked. expected_tensor_names: (list of str) Expected tensor names in the list. expected_op_types: (list of str) Expected op types of the tensors, in the same order as the expected_tensor_names. node_name_regex: Optional: node name regex filter. op_type_regex: Optional: op type regex filter. tensor_filter_name: Optional: name of the tensor filter. sort_by: (str) (timestamp | op_type | tensor_name) the field by which the tensors in the list are sorted. reverse: (bool) whether the sorting is in reverse (i.e., descending) order." 2233,assert_node_attribute_lines,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,261,function,"Check RichTextLines output for node_info commands. Args: tst: A test_util.TensorFlowTestCase instance. out: The RichTextLines object to be checked. node_name: Name of the node. op_type: Op type of the node, as a str. device: Name of the device on which the node resides. input_op_type_node_name_pairs: A list of 2-tuples of op type and node name, for the (non-control) inputs to the node. ctrl_input_op_type_node_name_pairs: A list of 2-tuples of op type and node name, for the control inputs to the node. recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node name, for the (non-control) output recipients to the node. ctrl_recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node name, for the control output recipients to the node. attr_key_val_pairs: Optional: attribute key-value pairs of the node, as a list of 2-tuples. num_dumped_tensors: Optional: number of tensor dumps from the node. show_stack_trace: (bool) whether the stack trace of the node's construction is asserted to be present. stack_trace_available: (bool) whether Python stack trace is available." 2234,check_syntax_error_output,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,425,function,Check RichTextLines output for valid command prefix but invalid syntax. 2235,check_error_output,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,434,function,"Check RichTextLines output from invalid/erroneous commands. Args: tst: A test_util.TensorFlowTestCase instance. out: The RichTextLines object to be checked. command_prefix: The command prefix of the command that caused the error. args: The arguments (excluding prefix) of the command that caused the error." 2236,check_main_menu,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,450,function,Check the main menu annotation of an output. 2237,check_menu_item,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,497,function, 2238,create_analyzer_cli,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,514,function,"Create an analyzer CLI. Args: dump: A `DebugDumpDir` object to base the analyzer CLI on. Returns: 1) A `DebugAnalyzer` object created based on `dump`. 2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object and has the common tfdbg commands, e.g., lt, ni, li, lo, registered." 2239,AnalyzerCLISimpleMulAddTest,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,577,class, 2240,AnalyzerCLIPrintLargeTensorTest,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,1638,class, 2241,AnalyzerCLIControlDepTest,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,1699,class, 2242,AnalyzerCLIWhileLoopTest,tensorflow/tensorflow/python/debug/cli/analyzer_cli_test.py,2027,class, 2243,BaseUI,tensorflow/tensorflow/python/debug/cli/base_ui.py,27,class,Base class of tfdbg user interface. 2244,CLIConfig,tensorflow/tensorflow/python/debug/cli/cli_config.py,30,class,Client-facing configurations for TFDBG command-line interfaces. 2245,CLIConfigTest,tensorflow/tensorflow/python/debug/cli/cli_config_test.py,31,class, 2246,bytes_to_readable_str,tensorflow/tensorflow/python/debug/cli/cli_shared.py,55,function,"Generate a human-readable string representing number of bytes. The units B, kB, MB and GB are used. Args: num_bytes: (`int` or None) Number of bytes. include_b: (`bool`) Include the letter B at the end of the unit. Returns: (`str`) A string representing the number of bytes in a human-readable way, including a unit at the end." 2247,time_to_readable_str,tensorflow/tensorflow/python/debug/cli/cli_shared.py,85,function,"Convert time value to human-readable string. Args: value_us: time value in microseconds. force_time_unit: force the output to use the specified time unit. Must be in TIME_UNITS. Returns: Human-readable string representation of the time value. Raises: ValueError: if force_time_unit value is not in TIME_UNITS." 2248,parse_ranges_highlight,tensorflow/tensorflow/python/debug/cli/cli_shared.py,113,function,"Process ranges highlight string. Args: ranges_string: (str) A string representing a numerical range of a list of numerical ranges. See the help info of the -r flag of the print_tensor command for more details. Returns: An instance of tensor_format.HighlightOptions, if range_string is a valid representation of a range or a list of ranges." 2249,numpy_printoptions_from_screen_info,tensorflow/tensorflow/python/debug/cli/cli_shared.py,143,function, 2250,format_tensor,tensorflow/tensorflow/python/debug/cli/cli_shared.py,150,function,"Generate formatted str to represent a tensor or its slices. Args: tensor: (numpy ndarray) The tensor value. tensor_name: (str) Name of the tensor, e.g., the tensor's debug watch key. np_printoptions: (dict) Numpy tensor formatting options. print_all: (bool) Whether the tensor is to be displayed in its entirety, instead of printing ellipses, even if its number of elements exceeds the default numpy display threshold. (Note: Even if this is set to true, the screen output can still be cut off by the UI frontend if it consist of more lines than the frontend can handle.) tensor_slicing: (str or None) Slicing of the tensor, e.g., ""[:, 1]"". If None, no slicing will be performed on the tensor. highlight_options: (tensor_format.HighlightOptions) options to highlight elements of the tensor. See the doc of tensor_format.format_tensor() for more details. include_numeric_summary: Whether a text summary of the numeric values (if applicable) will be included. write_path: A path to save the tensor value (after any slicing) to (optional). `numpy.save()` is used to save the value. Returns: An instance of `debugger_cli_common.RichTextLines` representing the (potentially sliced) tensor." 2251,error,tensorflow/tensorflow/python/debug/cli/cli_shared.py,218,function,"Generate a RichTextLines output for error. Args: msg: (str) The error message. Returns: (debugger_cli_common.RichTextLines) A representation of the error message for screen output." 2252,_recommend_command,tensorflow/tensorflow/python/debug/cli/cli_shared.py,233,function,"Generate a RichTextLines object that describes a recommended command. Args: command: (str) The command to recommend. description: (str) A description of what the command does. indent: (int) How many spaces to indent in the beginning. create_link: (bool) Whether a command link is to be applied to the command string. Returns: (RichTextLines) Formatted text (with font attributes) for recommending the command." 2253,get_tfdbg_logo,tensorflow/tensorflow/python/debug/cli/cli_shared.py,261,function,Make an ASCII representation of the tfdbg logo. 2254,get_run_start_intro,tensorflow/tensorflow/python/debug/cli/cli_shared.py,279,function,"Generate formatted intro for run-start UI. Args: run_call_count: (int) Run call counter. fetches: Fetches of the `Session.run()` call. See doc of `Session.run()` for more details. feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()` for more details. tensor_filters: (dict) A dict from tensor-filter name to tensor-filter callable. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run. Returns: (RichTextLines) Formatted intro message about the `Session.run()` call." 2255,get_run_short_description,tensorflow/tensorflow/python/debug/cli/cli_shared.py,386,function,"Get a short description of the run() call. Args: run_call_count: (int) Run call counter. fetches: Fetches of the `Session.run()` call. See doc of `Session.run()` for more details. feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()` for more details. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run. Returns: (str) A short description of the run() call, including information about the fetche(s) and feed(s)." 2256,get_error_intro,tensorflow/tensorflow/python/debug/cli/cli_shared.py,434,function,"Generate formatted intro for TensorFlow run-time error. Args: tf_error: (errors.OpError) TensorFlow run-time error object. Returns: (RichTextLines) Formatted intro message about the run-time OpError, with sample commands for debugging." 2257,BytesToReadableStrTest,tensorflow/tensorflow/python/debug/cli/cli_shared_test.py,33,class, 2258,TimeToReadableStrTest,tensorflow/tensorflow/python/debug/cli/cli_shared_test.py,73,class, 2259,GetRunStartIntroAndDescriptionTest,tensorflow/tensorflow/python/debug/cli/cli_shared_test.py,109,class, 2260,GetErrorIntroTest,tensorflow/tensorflow/python/debug/cli/cli_shared_test.py,323,class, 2261,assert_lines_equal_ignoring_whitespace,tensorflow/tensorflow/python/debug/cli/cli_test_utils.py,25,function,"Assert equality in lines, ignoring all whitespace. Args: test: An instance of unittest.TestCase or its subtypes (e.g., TensorFlowTestCase). expected_lines: Expected lines as an iterable of strings. actual_lines: Actual lines as an iterable of strings." 2262,assert_array_lines_close,tensorflow/tensorflow/python/debug/cli/cli_test_utils.py,48,function,"Assert that the array value represented by lines is close to expected. Note that the shape of the array represented by the `array_lines` is ignored. Args: test: An instance of TensorFlowTestCase. expected_array: Expected value of the array. array_lines: A list of strings representing the array. E.g., ""array([[ 1.0, 2.0 ], [ 3.0, 4.0 ]])"" Assumes that values are separated by commas, parentheses, brackets, ""|"" characters and whitespace." 2263,Interval,tensorflow/tensorflow/python/debug/cli/command_parser.py,33,class,Represents an interval between a start and end value. 2264,parse_command,tensorflow/tensorflow/python/debug/cli/command_parser.py,56,function,"Parse command string into a list of arguments. - Disregards whitespace inside double quotes and brackets. - Strips paired leading and trailing double quotes in arguments. - Splits the command at whitespace. Nested double quotes and brackets are not handled. Args: command: (str) Input command. Returns: (list of str) List of arguments." 2265,extract_output_file_path,tensorflow/tensorflow/python/debug/cli/command_parser.py,104,function,"Extract output file path from command arguments. Args: args: (list of str) command arguments. Returns: (list of str) Command arguments with the output file path part stripped. (str or None) Output file path (if any). Raises: SyntaxError: If there is no file path after the last "">"" character." 2266,parse_tensor_name_with_slicing,tensorflow/tensorflow/python/debug/cli/command_parser.py,151,function,"Parse tensor name, potentially suffixed by slicing string. Args: in_str: (str) Input name of the tensor, potentially followed by a slicing string. E.g.: Without slicing string: ""hidden/weights/Variable:0"", with slicing string: ""hidden/weights/Variable:0[1, :]"" Returns: (str) name of the tensor (str) slicing string, if any. If no slicing string is present, return """"." 2267,validate_slicing_string,tensorflow/tensorflow/python/debug/cli/command_parser.py,174,function,"Validate a slicing string. Check if the input string contains only brackets, digits, commas and colons that are valid characters in numpy-style array slicing. Args: slicing_string: (str) Input slicing string to be validated. Returns: (bool) True if and only if the slicing string is valid." 2268,_parse_slices,tensorflow/tensorflow/python/debug/cli/command_parser.py,190,function,"Construct a tuple of slices from the slicing string. The string must be a valid slicing string. Args: slicing_string: (str) Input slicing string to be parsed. Returns: tuple(slice1, slice2, ...) Raises: ValueError: If tensor_slicing is not a valid numpy ndarray slicing str." 2269,parse_indices,tensorflow/tensorflow/python/debug/cli/command_parser.py,219,function,"Parse a string representing indices. For example, if the input is ""[1, 2, 3]"", the return value will be a list of indices: [1, 2, 3] Args: indices_string: (str) a string representing indices. Can optionally be surrounded by a pair of brackets. Returns: (list of int): Parsed indices." 2270,parse_ranges,tensorflow/tensorflow/python/debug/cli/command_parser.py,243,function,"Parse a string representing numerical range(s). Args: range_string: (str) A string representing a numerical range or a list of them. For example: ""[-1.0,1.0]"", ""[-inf, 0]"", ""[[-inf, -1.0], [1.0, inf]]"" Returns: (list of list of float) A list of numerical ranges parsed from the input string. Raises: ValueError: If the input doesn't represent a range or a list of ranges." 2271,parse_memory_interval,tensorflow/tensorflow/python/debug/cli/command_parser.py,284,function,"Convert a human-readable memory interval to a tuple of start and end value. Args: interval_str: (`str`) A human-readable str representing an interval (e.g., ""[10kB, 20kB]"", ""<100M"", "">100G""). Only the units ""kB"", ""MB"", ""GB"" are supported. The ""B character at the end of the input `str` may be omitted. Returns: `Interval` object where start and end are in bytes. Raises: ValueError: if the input is not valid." 2272,parse_time_interval,tensorflow/tensorflow/python/debug/cli/command_parser.py,314,function,"Convert a human-readable time interval to a tuple of start and end value. Args: interval_str: (`str`) A human-readable str representing an interval (e.g., ""[10us, 20us]"", ""<100s"", "">100ms""). Supported time suffixes are us, ms, s. Returns: `Interval` object where start and end are in microseconds. Raises: ValueError: if the input is not valid." 2273,_parse_interval,tensorflow/tensorflow/python/debug/cli/command_parser.py,343,function,"Convert a human-readable interval to a tuple of start and end value. Args: interval_str: (`str`) A human-readable str representing an interval (e.g., ""[1M, 2M]"", ""<100k"", "">100ms""). The items following the "">"", ""<"", "">="" and ""<="" signs have to start with a number (e.g., 3.0, -2, .98). The same requirement applies to the items in the parentheses or brackets. Returns: Interval object where start or end can be None if the range is specified as ""N"" respectively. Raises: ValueError: if the input is not valid." 2274,parse_readable_size_str,tensorflow/tensorflow/python/debug/cli/command_parser.py,409,function,"Convert a human-readable str representation to number of bytes. Only the units ""kB"", ""MB"", ""GB"" are supported. The ""B character at the end of the input `str` may be omitted. Args: size_str: (`str`) A human-readable str representing a number of bytes (e.g., ""0"", ""1023"", ""1.1kB"", ""24 MB"", ""23GB"", ""100 G"". Returns: (`int`) The parsed number of bytes. Raises: ValueError: on failure to parse the input `size_str`." 2275,parse_readable_time_str,tensorflow/tensorflow/python/debug/cli/command_parser.py,443,function,"Parses a time string in the format N, Nus, Nms, Ns. Args: time_str: (`str`) string consisting of an integer time value optionally followed by 'us', 'ms', or 's' suffix. If suffix is not specified, value is assumed to be in microseconds. (e.g. 100us, 8ms, 5s, 100). Returns: Microseconds value." 2276,evaluate_tensor_slice,tensorflow/tensorflow/python/debug/cli/command_parser.py,471,function,"Call eval on the slicing of a tensor, with validation. Args: tensor: (numpy ndarray) The tensor value. tensor_slicing: (str or None) Slicing of the tensor, e.g., ""[:, 1]"". If None, no slicing will be performed on the tensor. Returns: (numpy ndarray) The sliced tensor. Raises: ValueError: If tensor_slicing is not a valid numpy ndarray slicing str." 2277,get_print_tensor_argparser,tensorflow/tensorflow/python/debug/cli/command_parser.py,494,function,"Get an ArgumentParser for a command that prints tensor values. Examples of such commands include print_tensor and print_feed. Args: description: Description of the ArgumentParser. Returns: An instance of argparse.ArgumentParser." 2278,ParseCommandTest,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,27,class, 2279,ExtractOutputFilePathTest,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,104,class, 2280,ParseTensorNameTest,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,208,class, 2281,ValidateSlicingStringTest,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,227,class, 2282,ParseIndicesTest,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,243,class, 2283,ParseRangesTest,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,272,class, 2284,ParseReadableSizeStrTest,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,316,class, 2285,ParseReadableTimeStrTest,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,362,class, 2286,ParseInterval,tensorflow/tensorflow/python/debug/cli/command_parser_test.py,391,class, 2287,_get_command_from_line_attr_segs,tensorflow/tensorflow/python/debug/cli/curses_ui.py,51,function,"Attempt to extract command from the attribute segments of a line. Args: mouse_x: (int) x coordinate of the mouse event. attr_segs: (list) The list of attribute segments of a line from a RichTextLines object. Returns: (str or None) If a command exists: the command as a str; otherwise, None." 2288,ScrollBar,tensorflow/tensorflow/python/debug/cli/curses_ui.py,71,class,"Vertical ScrollBar for Curses-based CLI. An object of this class has knowledge of the location of the scroll bar in the screen coordinates, the current scrolling position, and the total number of text lines in the screen text. By using this information, it can generate text rendering of the scroll bar, which consists of and UP button on the top and a DOWN button on the bottom, in addition to a scroll block in between, whose exact location is determined by the scrolling position. The object can also calculate the scrolling command (e.g., _SCROLL_UP_A_LINE, _SCROLL_DOWN) from the coordinate of a mouse click event in the screen region it occupies." 2289,CursesUI,tensorflow/tensorflow/python/debug/cli/curses_ui.py,209,class,"Curses-based Command-line UI. In this class, the methods with the prefix ""_screen_"" are the methods that interact with the actual terminal using the curses library." 2290,string_to_codes,tensorflow/tensorflow/python/debug/cli/curses_ui_test.py,39,function, 2291,codes_to_string,tensorflow/tensorflow/python/debug/cli/curses_ui_test.py,43,function, 2292,MockCursesUI,tensorflow/tensorflow/python/debug/cli/curses_ui_test.py,48,class,Mock subclass of CursesUI that bypasses actual terminal manipulations. 2293,CursesTest,tensorflow/tensorflow/python/debug/cli/curses_ui_test.py,235,class, 2294,ScrollBarTest,tensorflow/tensorflow/python/debug/cli/curses_ui_test.py,1532,class, 2295,NavigationHistoryItem,tensorflow/tensorflow/python/debug/cli/curses_widgets.py,26,class,Individual item in navigation history. 2296,CursesNavigationHistory,tensorflow/tensorflow/python/debug/cli/curses_widgets.py,42,class,"Navigation history containing commands, outputs and scroll info." 2297,CNHTest,tensorflow/tensorflow/python/debug/cli/curses_widgets_test.py,29,class, 2298,CommandLineExit,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,41,class, 2299,RichLine,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,52,class,"Rich single-line text. Attributes: text: A plain string, the raw text represented by this object. Should not contain newlines. font_attr_segs: A list of (start, end, font attribute) triples, representing richness information applied to substrings of text." 2300,rich_text_lines_from_rich_line_list,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,113,function,"Convert a list of RichLine objects or strings to a RichTextLines object. Args: rich_text_list: a list of RichLine objects or strings annotations: annotations for the resultant RichTextLines object. Returns: A corresponding RichTextLines object." 2301,get_tensorflow_version_lines,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,135,function,"Generate RichTextLines with TensorFlow version info. Args: include_dependency_versions: Include the version of TensorFlow's key dependencies, such as numpy. Returns: A formatted, multi-line `RichTextLines` object." 2302,RichTextLines,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,154,class,"Rich multi-line text. Line-by-line text output, with font attributes (e.g., color) and annotations (e.g., indices in a multi-dimensional tensor). Used as the text output of CLI commands. Can be rendered on terminal environments such as curses. This is not to be confused with Rich Text Format (RTF). This class is for text lines only." 2303,regex_find,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,381,function,"Perform regex match in rich text lines. Produces a new RichTextLines object with font_attr_segs containing highlighted regex matches. Example use cases include: 1) search for specific items in a large list of items, and 2) search for specific numerical values in a large tensor. Args: orig_screen_output: The original RichTextLines, in which the regex find is to be performed. regex: The regex used for matching. font_attr: Font attribute used for highlighting the found result. Returns: A modified copy of orig_screen_output. Raises: ValueError: If input str regex is not a valid regular expression." 2304,wrap_rich_text_lines,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,434,function,"Wrap RichTextLines according to maximum number of columns. Produces a new RichTextLines object with the text lines, font_attr_segs and annotations properly wrapped. This ought to be used sparingly, as in most cases, command handlers producing RichTextLines outputs should know the screen/panel width via the screen_info kwarg and should produce properly length-limited lines in the output accordingly. Args: inp: Input RichTextLines object. cols: Number of columns, as an int. Returns: 1) A new instance of RichTextLines, with line lengths limited to cols. 2) A list of new (wrapped) line index. For example, if the original input consists of three lines and only the second line is wrapped, and it's wrapped into two lines, this return value will be: [0, 1, 3]. Raises: ValueError: If inputs have invalid types." 2305,CommandHandlerRegistry,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,530,class,"Registry of command handlers for CLI. Handler methods (callables) for user commands can be registered with this class, which then is able to dispatch commands to the correct handlers and retrieve the RichTextLines output. For example, suppose you have the following handler defined: def echo(argv, screen_info=None): return RichTextLines([""arguments = %s"" % "" "".join(argv), ""screen_info = "" + repr(screen_info)]) you can register the handler with the command prefix ""echo"" and alias ""e"": registry = CommandHandlerRegistry() registry.register_command_handler(""echo"", echo, ""Echo arguments, along with screen info"", prefix_aliases=[""e""]) then to invoke this command handler with some arguments and screen_info, do: registry.dispatch_command(""echo"", [""foo"", ""bar""], screen_info={""cols"": 80}) or with the prefix alias: registry.dispatch_command(""e"", [""foo"", ""bar""], screen_info={""cols"": 80}) The call will return a RichTextLines object which can be rendered by a CLI." 2306,TabCompletionRegistry,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,846,class,Registry for tab completion responses. 2307,CommandHistory,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,1004,class,Keeps command history and supports lookup. 2308,MenuItem,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,1110,class,A class for an item in a text-based menu. 2309,Menu,tensorflow/tensorflow/python/debug/cli/debugger_cli_common.py,1151,class,A class for text-based menu. 2310,CommandLineExitTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,33,class, 2311,RichTextLinesTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,47,class, 2312,CommandHandlerRegistryTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,285,class, 2313,RegexFindTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,590,class, 2314,WrapScreenOutputTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,652,class, 2315,SliceRichTextLinesTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,774,class, 2316,TabCompletionRegistryTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,820,class, 2317,CommandHistoryTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,930,class, 2318,MenuNodeTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,1065,class, 2319,MenuTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,1091,class, 2320,GetTensorFlowVersionLinesTest,tensorflow/tensorflow/python/debug/cli/debugger_cli_common_test.py,1158,class, 2321,_parse_debug_tensor_name,tensorflow/tensorflow/python/debug/cli/evaluator.py,34,function,"Parse a debug tensor name in a to-be-evaluated expression. Args: debug_tensor_name: name of the debug tensor, with or without device name as a prefix, with or without debug op, with or without '[]' as a suffix. E.g., without device name prefix, without debug op suffix: ""hidden_0/MatMul:0"" E.g., with device name prefix: ""/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0"" E.g., with debug op suffix: ""hidden_0/MatMul:0:DebugNumericSummary"" E.g., with device name prefix and debug op suffix: ""/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0:DebugNumericSummary"" E.g., with device name prefix, debug op and an exec index: ""/job:worker/replica:0/task:1/gpu:0:hidden_0/MatMul:0:DebugNumericSummary[1]"" Returns: device_name: If device name prefix exists, the device name; otherwise, `None`. node_name: Name of the node. output_slot: Output slot index as an `int`. debug_op: If the debug op suffix exists, the debug op name; otherwise, `None`. exec_index: Execution index (applicable to cases in which a debug tensor is computed multiple times in a `tf.Session.run` call, e.g., due to `tf.while_loop`). If the exec_index suffix does not exist, this value defaults to `0`. Raises: ValueError: If the input `debug_tensor_name` is malformed." 2322,ExpressionEvaluator,tensorflow/tensorflow/python/debug/cli/evaluator.py,106,class,Evaluates Python expressions using debug tensor values from a dump. 2323,ParseDebugTensorNameTest,tensorflow/tensorflow/python/debug/cli/evaluator_test.py,28,class, 2324,EvaluatorTest,tensorflow/tensorflow/python/debug/cli/evaluator_test.py,145,class, 2325,main,tensorflow/tensorflow/python/debug/cli/offline_analyzer.py,30,function, 2326,ProfileDataTableView,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli.py,48,class,Table View of profiling data. 2327,_list_profile_filter,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli.py,146,function,"Filter function for list_profile command. Args: profile_datum: A `ProfileDatum` object. node_name_regex: Regular expression pattern object to filter by name. file_path_regex: Regular expression pattern object to filter by file path. op_type_regex: Regular expression pattern object to filter by op type. op_time_interval: `Interval` for filtering op time. exec_time_interval: `Interval` for filtering exec time. min_lineno: Lower bound for 1-based line number, inclusive. If <= 0, has no effect. max_lineno: Upper bound for 1-based line number, exclusive. If <= 0, has no effect. # TODO(cais): Maybe filter by function name. Returns: True iff profile_datum should be included." 2328,_list_profile_sort_key,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli.py,198,function,"Get a profile_datum property to sort by in list_profile command. Args: profile_datum: A `ProfileDatum` object. sort_by: (string) indicates a value to sort by. Must be one of SORT_BY* constants. Returns: profile_datum property to sort by." 2329,ProfileAnalyzer,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli.py,223,class,Analyzer for profiling data. 2330,create_profiler_ui,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli.py,768,function,"Create an instance of CursesUI based on a `tf.Graph` and `RunMetadata`. Args: graph: Python `Graph` object. run_metadata: A `RunMetadata` protobuf object. ui_type: (str) requested UI type, e.g., ""curses"", ""readline"". on_ui_exit: (`Callable`) the callback to be called when the UI exits. config: An instance of `cli_config.CLIConfig`. Returns: (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer commands and tab-completions registered." 2331,no_rewrite_session_config,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli_test.py,39,function, 2332,_line_number_above,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli_test.py,47,function, 2333,_at_least_one_line_matches,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli_test.py,51,function, 2334,_assert_at_least_one_line_matches,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli_test.py,59,function, 2335,_assert_no_lines_match,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli_test.py,66,function, 2336,ProfileAnalyzerListProfileTest,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli_test.py,74,class, 2337,ProfileAnalyzerPrintSourceTest,tensorflow/tensorflow/python/debug/cli/profile_analyzer_cli_test.py,326,class, 2338,ReadlineUI,tensorflow/tensorflow/python/debug/cli/readline_ui.py,28,class,Readline-based Command-line UI. 2339,MockReadlineUI,tensorflow/tensorflow/python/debug/cli/readline_ui_test.py,34,class,Test subclass of ReadlineUI that bypasses terminal manipulations. 2340,CursesTest,tensorflow/tensorflow/python/debug/cli/readline_ui_test.py,56,class, 2341,HighlightOptions,tensorflow/tensorflow/python/debug/cli/tensor_format.py,40,class,Options for highlighting elements of a tensor. 2342,format_tensor,tensorflow/tensorflow/python/debug/cli/tensor_format.py,72,function,"Generate a RichTextLines object showing a tensor in formatted style. Args: tensor: The tensor to be displayed, as a numpy ndarray or other appropriate format (e.g., None representing uninitialized tensors). tensor_label: A label for the tensor, as a string. If set to None, will suppress the tensor name line in the return value. include_metadata: Whether metadata such as dtype and shape are to be included in the formatted text. auxiliary_message: An auxiliary message to display under the tensor label, dtype and shape information lines. include_numeric_summary: Whether a text summary of the numeric values (if applicable) will be included. np_printoptions: A dictionary of keyword arguments that are passed to a call of np.set_printoptions() to set the text format for display numpy ndarrays. highlight_options: (HighlightOptions) options for highlighting elements of the tensor. Returns: A RichTextLines object. Its annotation field has line-by-line markups to indicate which indices in the array the first element of each line corresponds to." 2343,_annotate_ndarray_lines,tensorflow/tensorflow/python/debug/cli/tensor_format.py,202,function,"Generate annotations for line-by-line begin indices of tensor text. Parse the numpy-generated text representation of a numpy ndarray to determine the indices of the first element of each text line (if any element is present in the line). For example, given the following multi-line ndarray text representation: [""array([[ 0. , 0.0625, 0.125 , 0.1875],"", "" [ 0.25 , 0.3125, 0.375 , 0.4375],"", "" [ 0.5 , 0.5625, 0.625 , 0.6875],"", "" [ 0.75 , 0.8125, 0.875 , 0.9375]])""] the generate annotation will be: {0: {BEGIN_INDICES_KEY: [0, 0]}, 1: {BEGIN_INDICES_KEY: [1, 0]}, 2: {BEGIN_INDICES_KEY: [2, 0]}, 3: {BEGIN_INDICES_KEY: [3, 0]}} Args: array_lines: Text lines representing the tensor, as a list of str. tensor: The tensor being formatted as string. np_printoptions: A dictionary of keyword arguments that are passed to a call of np.set_printoptions(). offset: Line number offset applied to the line indices in the returned annotation. Returns: An annotation as a dict." 2344,locate_tensor_element,tensorflow/tensorflow/python/debug/cli/tensor_format.py,282,function,"Locate a tensor element in formatted text lines, given element indices. Given a RichTextLines object representing a tensor and indices of the sought element, return the row number at which the element is located (if exists). Args: formatted: A RichTextLines object containing formatted text lines representing the tensor. indices: Indices of the sought element, as a list of int or a list of list of int. The former case is for a single set of indices to look up, whereas the latter case is for looking up a batch of indices sets at once. In the latter case, the indices must be in ascending order, or a ValueError will be raised. Returns: 1) A boolean indicating whether the element falls into an omitted line. 2) Row index. 3) Column start index, i.e., the first column in which the representation of the specified tensor starts, if it can be determined. If it cannot be determined (e.g., due to ellipsis), None. 4) Column end index, i.e., the column right after the last column that represents the specified tensor. Iff it cannot be determined, None. For return values described above are based on a single set of indices to look up. In the case of batch mode (multiple sets of indices), the return values will be lists of the types described above. Raises: AttributeError: If: Input argument ""formatted"" does not have the required annotations. ValueError: If: 1) Indices do not match the dimensions of the tensor, or 2) Indices exceed sizes of the tensor, or 3) Indices contain negative value(s). 4) If in batch mode, and if not all sets of indices are in ascending order." 2345,_validate_indices_list,tensorflow/tensorflow/python/debug/cli/tensor_format.py,406,function, 2346,_locate_elements_in_line,tensorflow/tensorflow/python/debug/cli/tensor_format.py,429,function,"Determine the start and end indices of an element in a line. Args: line: (str) the line in which the element is to be sought. indices_list: (list of list of int) list of indices of the element to search for. Assumes that the indices in the batch are unique and sorted in ascending order. ref_indices: (list of int) reference indices, i.e., the indices of the first element represented in the line. Returns: start_columns: (list of int) start column indices, if found. If not found, None. end_columns: (list of int) end column indices, if found. If not found, None. If found, the element is represented in the left-closed-right-open interval [start_column, end_column]." 2347,_pad_string_to_length,tensorflow/tensorflow/python/debug/cli/tensor_format.py,484,function, 2348,numeric_summary,tensorflow/tensorflow/python/debug/cli/tensor_format.py,488,function,"Get a text summary of a numeric tensor. This summary is only available for numeric (int*, float*, complex*) and Boolean tensors. Args: tensor: (`numpy.ndarray`) the tensor value object to be summarized. Returns: The summary text as a `RichTextLines` object. If the type of `tensor` is not numeric or Boolean, a single-line `RichTextLines` object containing a warning message will reflect that." 2349,RichTextLinesTest,tensorflow/tensorflow/python/debug/cli/tensor_format_test.py,34,class, 2350,NumericSummaryTest,tensorflow/tensorflow/python/debug/cli/tensor_format_test.py,628,class, 2351,get_ui,tensorflow/tensorflow/python/debug/cli/ui_factory.py,26,function,"Create a `base_ui.BaseUI` subtype. This factory method attempts to fallback to other available ui_types on ImportError. For example, if `ui_type` is `curses`, but `curses` cannot be imported properly, e.g., on Windows, will fallback to `readline`. Args: ui_type: (`str`) requested UI type. Currently supported: (curses | readline) on_ui_exit: (`Callable`) the callback to be called when the UI exits. available_ui_types: (`None` or `list` of `str`) Manually-set available ui_types. config: An instance of `cli_config.CLIConfig()` carrying user-facing configurations. Returns: A `base_ui.BaseUI` subtype object. Raises: ValueError: on invalid ui_type or on exhausting or fallback ui_types." 2352,main,tensorflow/tensorflow/python/debug/examples/debug_mnist.py,31,function, 2353,main,tensorflow/tensorflow/python/debug/examples/v1/debug_errors.py,32,function, 2354,main,tensorflow/tensorflow/python/debug/examples/v1/debug_fibonacci.py,34,function, 2355,main,tensorflow/tensorflow/python/debug/examples/v1/debug_keras.py,33,function, 2356,parse_args,tensorflow/tensorflow/python/debug/examples/v1/debug_mnist_v1.py,45,function,"Parses commandline arguments. Returns: A tuple (parsed, unparsed) of the parsed object and a group of unparsed arguments that did not match the parser." 2357,main,tensorflow/tensorflow/python/debug/examples/v1/debug_mnist_v1.py,112,function, 2358,main,tensorflow/tensorflow/python/debug/examples/v1/debug_tflearn_iris.py,33,function, 2359,main,tensorflow/tensorflow/python/debug/examples/v2/debug_fibonacci_v2.py,33,function, 2360,parse_args,tensorflow/tensorflow/python/debug/examples/v2/debug_mnist_v2.py,45,function,"Parses commandline arguments. Returns: A tuple (parsed, unparsed) of the parsed object and a group of unparsed arguments that did not match the parser." 2361,main,tensorflow/tensorflow/python/debug/examples/v2/debug_mnist_v2.py,125,function, 2362,limit_string_length,tensorflow/tensorflow/python/debug/lib/check_numerics_callback.py,98,function,"Limit the length of input string. Args: string: Input string. max_len: (int or None) If int, the length limit. If None, no limit. Returns: Possibly length-limited string." 2363,_maybe_lookup_original_input_tensor,tensorflow/tensorflow/python/debug/lib/check_numerics_callback.py,118,function, 2364,get_check_numerics_error_message,tensorflow/tensorflow/python/debug/lib/check_numerics_callback.py,127,function,"Create a meaningful and user-friendly error message about offending tensor. The error message reveals the following info about the op that outputs NaN/Infinity: dtype, shape (to the extent known at graph-construction time), input tensors, stack trace for op creation (if is graph mode). Args: slot: (int) slot index of the tensor output. num_outputs: (int) total number of outputs of the op. op_type: (str) Type of the that generates `tensor`. tensor: (Tensor) the offending tensor, i.e., the tensor that contains Infinities or NaNs. inputs: (array of Tensor) inputs to the op that generates `tensor`. graph: (tf.Graph) the graph object that `tensor` belongs to. Available only under graph mode. traceback: (list of trace frames) the stack trace of the op's creation. Available only under graph model. stack_height_limit: (int or None) If int, limit to the height of the stack trace printed in the error message. If None, no limit to the height. path_length_limit: (int or None) Length limit for file paths included in the formatted stack trace. Returns: (str) A formatted error message." 2365,_debug_summary,tensorflow/tensorflow/python/debug/lib/check_numerics_callback.py,220,function, 2366,CheckNumericsCallback,tensorflow/tensorflow/python/debug/lib/check_numerics_callback.py,227,class,Wrapper for the numerics-checking callback for thread locality. 2367,enable_check_numerics,tensorflow/tensorflow/python/debug/lib/check_numerics_callback.py,339,function,"Enable tensor numerics checking in an eager/graph unified fashion. The numerics checking mechanism will cause any TensorFlow eager execution or graph execution to error out as soon as an op's output tensor contains infinity or NaN. This method is idempotent. Calling it multiple times has the same effect as calling it once. This method takes effect only on the thread in which it is called. When a op's float-type output tensor contains any Infinity or NaN, an `tf.errors.InvalidArgumentError` will be thrown, with an error message that reveals the following information: - The type of the op that generated the tensor with bad numerics. - Data type (dtype) of the tensor. - Shape of the tensor (to the extent known at the time of eager execution or graph construction). - Name of the containing graph (if available). - (Graph mode only): The stack trace of the intra-graph op's creation, with a stack-height limit and a path-length limit for visual clarity. The stack frames that belong to the user's code (as opposed to tensorflow's internal code) are highlighted with a text arrow (""->""). - (Eager mode only): How many of the offending tensor's elements are `Infinity` and `NaN`, respectively. Once enabled, the check-numerics mechanism can be disabled by using `tf.debugging.disable_check_numerics()`. Example usage: 1. Catching infinity during the execution of a `tf.function` graph: ```py import tensorflow as tf tf.debugging.enable_check_numerics() @tf.function def square_log_x_plus_1(x): v = tf.math.log(x + 1) return tf.math.square(v) x = -1.0 # When the following line runs, a function graph will be compiled # from the Python function `square_log_x_plus_1()`. Due to the # `enable_check_numerics()` call above, the graph will contain # numerics checking ops that will run during the function graph's # execution. The function call generates an -infinity when the Log # (logarithm) op operates on the output tensor of the Add op. # The program errors out at this line, printing an error message. y = square_log_x_plus_1(x) z = -y ``` 2. Catching NaN during eager execution: ```py import numpy as np import tensorflow as tf tf.debugging.enable_check_numerics() x = np.array([[0.0, -1.0], [4.0, 3.0]]) # The following line executes the Sqrt op eagerly. Due to the negative # element in the input array, a NaN is generated. Due to the # `enable_check_numerics()` call above, the program errors immediately # at this line, printing an error message. y = tf.math.sqrt(x) z = tf.matmul(y, y) ``` NOTE: If your code is running on TPUs, be sure to call `tf.config.set_soft_device_placement(True)` before calling `tf.debugging.enable_check_numerics()` as this API uses automatic outside compilation on TPUs. For example: ```py tf.config.set_soft_device_placement(True) tf.debugging.enable_check_numerics() resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') strategy = tf.distribute.TPUStrategy(resolver) with strategy.scope(): # ... ``` Args: stack_height_limit: Limit to the height of the printed stack trace. Applicable only to ops in `tf.function`s (graphs). path_length_limit: Limit to the file path included in the printed stack trace. Applicable only to ops in `tf.function`s (graphs)." 2368,disable_check_numerics,tensorflow/tensorflow/python/debug/lib/check_numerics_callback.py,448,function,"Disable the eager/graph unified numerics checking mechanism. This method can be used after a call to `tf.debugging.enable_check_numerics()` to disable the numerics-checking mechanism that catches infinity and NaN values output by ops executed eagerly or in tf.function-compiled graphs. This method is idempotent. Calling it multiple times has the same effect as calling it once. This method takes effect only on the thread in which it is called." 2369,LimitStringLengthTest,tensorflow/tensorflow/python/debug/lib/check_numerics_callback_test.py,45,class, 2370,CheckNumericsCallbackTest,tensorflow/tensorflow/python/debug/lib/check_numerics_callback_test.py,70,class, 2371,CheckNumericsCallbackUnhealthyTest,tensorflow/tensorflow/python/debug/lib/check_numerics_callback_test.py,131,class,Test for cases in which enable_check_numerics() catches infs or nans. 2372,get_graph_element_name,tensorflow/tensorflow/python/debug/lib/common.py,29,function,"Obtain the name or string representation of a graph element. If the graph element has the attribute ""name"", return name. Otherwise, return a __str__ representation of the graph element. Certain graph elements, such as `SparseTensor`s, do not have the attribute ""name"". Args: elem: The graph element in question. Returns: If the attribute 'name' is available, return the name. Otherwise, return str(fetch)." 2373,get_flattened_names,tensorflow/tensorflow/python/debug/lib/common.py,47,function,"Get a flattened list of the names in run() call feeds or fetches. Args: feeds_or_fetches: Feeds or fetches of the `Session.run()` call. It maybe a Tensor, an Operation or a Variable. It may also be nested lists, tuples or dicts. See doc of `Session.run()` for more details. Returns: (list of str) A flattened list of fetch names from `feeds_or_fetches`." 2374,get_run_key,tensorflow/tensorflow/python/debug/lib/common.py,74,function,"Summarize the names of feeds and fetches as a RunKey JSON string. Args: feed_dict: The feed_dict given to the `Session.run()` call. fetches: The fetches from the `Session.run()` call. Returns: A JSON Array consisting of two items. They first items is a flattened Array of the names of the feeds. The second item is a flattened Array of the names of the fetches." 2375,CommonTest,tensorflow/tensorflow/python/debug/lib/common_test.py,28,class, 2376,_glob,tensorflow/tensorflow/python/debug/lib/debug_data.py,52,function, 2377,InconvertibleTensorProto,tensorflow/tensorflow/python/debug/lib/debug_data.py,59,class,Represents a TensorProto that cannot be converted to np.ndarray. 2378,load_tensor_from_event_file,tensorflow/tensorflow/python/debug/lib/debug_data.py,83,function,"Load a tensor from an event file. Assumes that the event file contains a `Event` protobuf and the `Event` protobuf contains a `Tensor` value. Args: event_file_path: (`str`) path to the event file. Returns: The tensor value loaded from the event file, as a `numpy.ndarray`. For uninitialized Tensors, returns `None`. For Tensors of data types that cannot be converted to `numpy.ndarray` (e.g., `tf.resource`), return `None`." 2379,load_tensor_from_event,tensorflow/tensorflow/python/debug/lib/debug_data.py,105,function,"Load a tensor from an Event proto. Args: event: The Event proto, assumed to hold a tensor value in its summary.value[0] field. Returns: The tensor value loaded from the event file, as a `numpy.ndarray`, if representation of the tensor value by a `numpy.ndarray` is possible. For uninitialized Tensors, returns `None`. For Tensors of data types that cannot be represented as `numpy.ndarray` (e.g., `tf.resource`), return the `TensorProto` protobuf object without converting it to a `numpy.ndarray`." 2380,_load_graph_def_from_event_file,tensorflow/tensorflow/python/debug/lib/debug_data.py,143,function, 2381,_load_log_message_from_event_file,tensorflow/tensorflow/python/debug/lib/debug_data.py,151,function, 2382,_is_graph_file,tensorflow/tensorflow/python/debug/lib/debug_data.py,159,function, 2383,_is_run_fetches_info_file,tensorflow/tensorflow/python/debug/lib/debug_data.py,163,function, 2384,_is_run_feed_keys_info_file,tensorflow/tensorflow/python/debug/lib/debug_data.py,167,function, 2385,_get_tensor_name,tensorflow/tensorflow/python/debug/lib/debug_data.py,171,function,"Get tensor name given node name and output slot index. Args: node_name: Name of the node that outputs the tensor, as a string. output_slot: Output slot index of the tensor, as an integer. Returns: Name of the tensor, as a string." 2386,_get_tensor_watch_key,tensorflow/tensorflow/python/debug/lib/debug_data.py,185,function,"Get the string representation of a debug watch on a tensor. Args: node_name: Name of the node by which the watched tensor is produced, as a string. output_slot: Output slot index of the tensor, as an integer. debug_op: Name of the debug op that is used to watch the tensor, as a string. Returns: A string representing the debug watch on the tensor (i.e., the ""watch key"")." 2387,has_inf_or_nan,tensorflow/tensorflow/python/debug/lib/debug_data.py,202,function,"A predicate for whether a tensor consists of any bad numerical values. This predicate is common enough to merit definition in this module. Bad numerical values include `nan`s and `inf`s. The signature of this function follows the requirement of the method `DebugDumpDir.find()`. Args: datum: (`DebugTensorDatum`) Datum metadata. tensor: (`numpy.ndarray` or None) Value of the tensor. None represents an uninitialized tensor. Returns: (`bool`) True if and only if tensor consists of any nan or inf values." 2388,extract_core_metadata_from_event_proto,tensorflow/tensorflow/python/debug/lib/debug_data.py,240,function, 2389,device_name_to_device_path,tensorflow/tensorflow/python/debug/lib/debug_data.py,250,function,Convert device name to device path. 2390,device_path_to_device_name,tensorflow/tensorflow/python/debug/lib/debug_data.py,257,function,"Parse device name from device path. Args: device_dir: (str) a directory name for the device. Returns: (str) parsed device name." 2391,DebugTensorDatum,tensorflow/tensorflow/python/debug/lib/debug_data.py,273,class,"A single tensor dumped by TensorFlow Debugger (tfdbg). Contains metadata about the dumped tensor, including `timestamp`, `node_name`, `output_slot`, `debug_op`, and path to the dump file (`file_path`). This type does not hold the generally space-expensive tensor value (numpy array). Instead, it points to the file from which the tensor value can be loaded (with the `get_tensor` method) if needed." 2392,WatchKeyDoesNotExistInDebugDumpDirError,tensorflow/tensorflow/python/debug/lib/debug_data.py,458,class, 2393,DebugDumpDir,tensorflow/tensorflow/python/debug/lib/debug_data.py,462,class,"Data set from a debug-dump directory on filesystem. An instance of `DebugDumpDir` contains all `DebugTensorDatum` instances in a tfdbg dump root directory." 2394,DeviceNamePathConversionTest,tensorflow/tensorflow/python/debug/lib/debug_data_test.py,36,class, 2395,HasNanOrInfTest,tensorflow/tensorflow/python/debug/lib/debug_data_test.py,52,class, 2396,DebugTensorDatumTest,tensorflow/tensorflow/python/debug/lib/debug_data_test.py,111,class, 2397,DebugDumpDirTest,tensorflow/tensorflow/python/debug/lib/debug_data_test.py,151,class, 2398,BaseMonitor,tensorflow/tensorflow/python/debug/lib/debug_events_monitors.py,49,class,Base class for debug event data monitors. 2399,InfNanAlert,tensorflow/tensorflow/python/debug/lib/debug_events_monitors.py,84,class,Alert for Infinity and NaN values. 2400,InfNanMonitor,tensorflow/tensorflow/python/debug/lib/debug_events_monitors.py,144,class,Monitor for Infinity and NaN in tensor values. 2401,TestMonitor,tensorflow/tensorflow/python/debug/lib/debug_events_monitors_test.py,40,class, 2402,DebugEventsMonitorTest,tensorflow/tensorflow/python/debug/lib/debug_events_monitors_test.py,63,class, 2403,AlertDataObjectsTest,tensorflow/tensorflow/python/debug/lib/debug_events_monitors_test.py,208,class,Unit tests for alert-class objects. 2404,InfNanMonitorTest,tensorflow/tensorflow/python/debug/lib/debug_events_monitors_test.py,233,class, 2405,DebugEventsReader,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,39,class,Reader class for a tfdbg v2 DebugEvents directory. 2406,BaseDigest,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,322,class,"Base class for digest. Properties: wall_time: A timestamp for the digest as a `float` (unit: s). locator: A datum that allows tracng the digest to its original location. It can be either of the two: 1. Bytes offset from the beginning of the file as a single integer, for the case of all digests of the same kind coming from the same file. 2. A tuple of a file index and a byte offset. This applies to case in which the same type of debugger data may come from multple files, e.g., graph execution traces." 2407,ExecutionDigest,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,353,class,"Light-weight digest summarizing top-level execution event. Use `DebugDataReader.read_execution(execution_digest)` to load the more detailed data object concerning the execution event (`Execution`). Properties: op_type: Type name of the executed op. In the case of the eager execution of an individual op, it is the name of the op (e.g., ""MatMul""). In the case of the execution of a tf.function (FuncGraph), this is the internally-generated name of the function (e.g., ""__inference_my_func_123""). output_tensor_device_ids: IDs of the devices on which the output tensors of the execution reside. For no-output execution, this is `None`." 2408,_tuple_or_none,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,395,function, 2409,Execution,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,399,class,"Detailed data relating to a top-level execution event. The execution is of an individual op or a tf.function, which may have any number of output tensors. Properties (beyond the base class `ExecutionDigest`): host_name: Name of the host on which the execution happened. stack_frame_ids: Reference IDs for stack frames, ordered from bottommost to topmost. Use `DebugDataReader.read_execution_stack_trace()` to load the detailed stack frames (filepath, lineno and function name). tensor_debug_mode: TensorDebugMode enum value, as an `int`. graph_id: ID of the executed FuncGraph (applicable only the execution of a tf.function). `None` for the eager execution of an individual op. input_tensor_ids: IDs of the input (eager) tensor(s) for this execution, if any. If the eager execution has no input tensor, this is `None`. Else, this is a `tuple` of `int`s. output_tensor_ids: IDs of the output (eager) tensor(s) from this execution, if any. If the eager execution produces no output tensor, this is `None`. Else, this is a `tuple` of `int`s. debug_tensor_values: Values of the debug tensor(s), applicable only to non-FULL_TENSOR tensor debug mode. A tuple of list of numbers. Each element of the tuple corresponds to an output tensor of the execution. See documentation of the various TensorDebugModes for the semantics of the numbers. If the eager execution produces no output tensor, this is `None`. Else, this is a `tuple` of `list`s." 2410,DebuggedGraph,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,495,class,"Data object representing debugging information about a tf.Graph. Includes `FuncGraph`s. Properties: name: Name of the graph (if any). May be `None` for non-function graphs. graph_id: Debugger-generated ID for the graph. inner_graph_ids: A list of the debugger-generated IDs for the graphs enclosed by this graph. outer_graph_id: If this graph is nested within an outer graph, ID of the outer graph. If this is an outermost graph, `None`." 2411,DebuggedDevice,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,609,class,"Debugger data regarding a device involved in the debugged program. Properties: device_name: Name of the device, as a str. device_id: An integer ID for the device, unique for each device within the scope of the debugged TensorFlow program." 2412,GraphOpCreationDigest,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,639,class,"Data object describing the creation of an op inside a graph. For size efficiency, this digest object does not contain any stack frames or any references to them. To obtain the stack frames, use `DataReader.read_graph_op_creation_stack_trace()`. Properties (beyond the base class): graph_id: Debugger-generated ID of the immediately-enclosing graph. op_type: Type name of the op (e.g., ""MatMul""). op_name: Name of the op (e.g., ""dense_1/MatMul""). output_tensor_ids: Debugger-generated IDs for the output(s) of the op. If the op produces no output tensor, this is `None`. Else, this is a `tuple` of `int`s. input_names: Names of the input tensors to the op. device_name: The name of the device that the op is placed on (if available). host_name: Name of the host on which the op is created. stack_frame_ids: IDs of the frames of the stack trace at which the op is created." 2413,GraphExecutionTraceDigest,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,732,class,"Light-weight summary of a intra-graph tensor execution event. Use `DebugDataReader.read_graph_execution_trace()` on this object to read more detailed data (`GraphExecutionTrace`). Properties (beyond the base class): op_type: Type name of the executed op (e.g., ""Conv2D""). op_name: Name of the op (e.g., ""conv_2d_3/Conv2D""). output_slot: Output slot index of the tensor. graph_id: The debugger-generated ID of the innermost (immediately-enclosing) graph." 2414,GraphExecutionTrace,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,781,class,"Detailed data object describing an intra-graph tensor execution. Attributes (in addition to GraphExecutionTraceDigest): graph_ids: The debugger-generated IDs of the graphs that enclose the executed op (tensor), ordered from the outermost to the innermost. graph_id: The debugger-generated ID of the innermost (immediately-enclosing) graph. tensor_debug_mode: TensorDebugMode enum value. debug_tensor_value: Debug tensor values (only for non-FULL_TENSOR tensor_debug_mode). A list of numbers. See the documentation of the TensorDebugModes for the semantics of the numbers. device_name: Device on which the tensor resides (if available)" 2415,_parse_tensor_value,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,845,function,"Helper method for reading a tensor value from a tensor proto. The rationale for the distinction between `True` and `False value of `return_list` is as follows: - `return_list=True` is used for TensorDebugMode values other than FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under those modes, the value is guaranteed (by contract) to be a 1D float64 tensor. - `return_list=False` is used for the FULL_HEALTH TensorDebugMode specifically. Instead, we use `numpy.ndarray` to maximally preserve the shape, dtype and value information regarding the underlying tensor value. Under that mode, we don't use a python list to represent the tensor value because that can lead to loss of information (e.g., both float16 and float32 dtypes get mapped to Python floats). Args: tensor_proto: The TensorProto instance from which the tensor value will be loaded. return_list: Whether the return value will be a nested Python list that comes out from `numpy.ndarray.tolist()`. Returns: If parsing is successful, the tensor value as a `numpy.ndarray` or the nested Python list converted from it. If parsing fails, `None`." 2416,_execution_digest_from_debug_event_proto,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,881,function,Convert a DebugEvent proto into an ExecutionDigest data object. 2417,_execution_from_debug_event_proto,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,891,function,Convert a DebugEvent proto into an Execution data object. 2418,DebugDataReader,tensorflow/tensorflow/python/debug/lib/debug_events_reader.py,917,class,"A reader that reads structured debugging data in the tfdbg v2 format. The set of data read by an object of this class concerns the execution history of a tfdbg2-instrumented TensorFlow program. Note: - An object of this class incrementally reads data from files that belong to the tfdbg v2 DebugEvent file set. Calling `update()` triggers the reading from the last-successful reading positions in the files. - This object can be used as a context manager. Its `__exit__()` call closes the file readers cleanly." 2419,DebugEventsWriter,tensorflow/tensorflow/python/debug/lib/debug_events_writer.py,30,class,A writer for TF debugging events. Used by tfdbg v2. 2420,DebugEventsWriterTest,tensorflow/tensorflow/python/debug/lib/debug_events_writer_test.py,40,class, 2421,MultiSetReaderTest,tensorflow/tensorflow/python/debug/lib/debug_events_writer_test.py,602,class,Test for DebugDataReader for multiple file sets under a dump root. 2422,DataObjectsTest,tensorflow/tensorflow/python/debug/lib/debug_events_writer_test.py,682,class, 2423,_tensor_to_grad_debug_op_name,tensorflow/tensorflow/python/debug/lib/debug_gradients.py,37,function, 2424,_parse_grad_debug_op_name,tensorflow/tensorflow/python/debug/lib/debug_gradients.py,42,function,"Parse the name of a debug gradient op. Args: op_name: the name of the debug gradient op. Returns: 1) The UUID of the GradientsDebugger that created the debug gradient op. 2) Name of the original tensor whose gradient is debugged by the debug gradient op." 2425,GradientsDebugger,tensorflow/tensorflow/python/debug/lib/debug_gradients.py,68,class,"Gradients Debugger. Allows retrieval of gradient tensors created by TensorFlow's automatic differentiation algorithm, i.e., `tf.gradients` and optimizer classes that use it." 2426,clear_gradient_debuggers,tensorflow/tensorflow/python/debug/lib/debug_gradients.py,351,function,Clear all globally registered gradient debuggers. 2427,_identify_gradient_grad,tensorflow/tensorflow/python/debug/lib/debug_gradients.py,357,function,Gradient function for the DebugIdentity op. 2428,_identify_gradient_grad_ref,tensorflow/tensorflow/python/debug/lib/debug_gradients.py,367,function,Gradient function for the DebugIdentity op. 2429,gradient_values_from_dump,tensorflow/tensorflow/python/debug/lib/debug_gradients.py,372,function,"Find gradient values from a `DebugDumpDir` object. Args: grad_debugger: the `tf_debug.GradientsDebugger` instance to be used. x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor on the denominator of the differentiation. dump: A `tfdbg.DebugDumpDir` object. Returns: If this `GradientsDebugger` instance has the gradient tensor of `x_tensor` registered: a list of `numpy.ndarray` representing the value of the gradient tensor from `dump`. The list could be empty, if the gradient tensor is not executed in the `tf.Session.run()` call that generated the `dump`. The list could also contain multiple values of the gradient tensor, e.g., if gradient tensor is computed repeatedly in a `tf.while_loop` during the run that generated the `dump`. Raises: LookupError: If this `GradientsDebugger` instance does not have the gradient tensor of `x_tensor` registered. ValueError: If this `GradientsDebugger` has a `tf.Graph` object that does not match the `tf.Graph` object of the `dump`. TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`." 2430,IdentifyGradientTest,tensorflow/tensorflow/python/debug/lib/debug_gradients_test.py,40,class, 2431,ReconstructNonDebugGraphTest,tensorflow/tensorflow/python/debug/lib/debug_graph_reconstruction_test.py,40,class, 2432,parse_node_or_tensor_name,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,25,function,"Get the node name from a string that can be node or tensor name. Args: name: An input node name (e.g., ""node_a"") or tensor name (e.g., ""node_a:0""), as a str. Returns: 1) The node name, as a str. If the input name is a tensor name, i.e., consists of a colon, the final colon and the following output slot will be stripped. 2) If the input name is a tensor name, the output slot, as an int. If the input name is not a tensor name, None." 2433,get_node_name,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,49,function, 2434,get_output_slot,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,54,function,"Get the output slot number from the name of a graph element. If element_name is a node name without output slot at the end, 0 will be assumed. Args: element_name: (`str`) name of the graph element in question. Returns: (`int`) output slot number." 2435,is_copy_node,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,70,function,"Determine whether a node name is that of a debug Copy node. Such nodes are inserted by TensorFlow core upon request in RunOptions.debug_options.debug_tensor_watch_opts. Args: node_name: Name of the node. Returns: A bool indicating whether the input argument is the name of a debug Copy node." 2436,is_debug_node,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,86,function,"Determine whether a node name is that of a debug node. Such nodes are inserted by TensorFlow core upon request in RunOptions.debug_options.debug_tensor_watch_opts. Args: node_name: Name of the node. Returns: A bool indicating whether the input argument is the name of a debug node." 2437,parse_debug_node_name,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,101,function,"Parse the name of a debug node. Args: node_name: Name of the debug node. Returns: 1. Name of the watched node, as a str. 2. Output slot index of the watched tensor, as an int. 3. Index of the debug node, as an int. 4. Name of the debug op, as a str, e.g, ""DebugIdentity"". Raises: ValueError: If the input node name is not a valid debug node name." 2438,GraphTracingReachedDestination,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,142,class, 2439,DFSGraphTracer,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,146,class,Graph input tracer using depth-first search. 2440,_infer_device_name,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,223,function,Infer device name from a partition GraphDef. 2441,DebugGraph,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,237,class,Represents a debugger-decorated graph. 2442,reconstruct_non_debug_graph_def,tensorflow/tensorflow/python/debug/lib/debug_graphs.py,481,function,"Reconstruct original (non-debugger-decorated) partition GraphDef. This method strips the input `tf.compat.v1.GraphDef` of the Copy* and Debug*-type nodes inserted by the debugger. The reconstructed partition graph is identical to the original (i.e., non-debugger-decorated) partition graph except in the following respects: 1) The exact names of the runtime-inserted internal nodes may differ. These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops. 2) As a consequence of 1, the nodes that receive input directly from such send- and recv-type ops will have different input names. 3) The parallel_iteration attribute of while-loop Enter ops are set to 1. Args: debug_graph_def: The debugger-decorated `tf.compat.v1.GraphDef`, with the debugger-inserted Copy* and Debug* nodes. Returns: The reconstructed `tf.compat.v1.GraphDef` stripped of the debugger-inserted nodes." 2443,ParseNodeOrTensorNameTest,tensorflow/tensorflow/python/debug/lib/debug_graphs_test.py,25,class, 2444,GetNodeNameAndOutputSlotTest,tensorflow/tensorflow/python/debug/lib/debug_graphs_test.py,42,class, 2445,NodeNameChecksTest,tensorflow/tensorflow/python/debug/lib/debug_graphs_test.py,56,class, 2446,ParseDebugNodeNameTest,tensorflow/tensorflow/python/debug/lib/debug_graphs_test.py,79,class, 2447,_grappler_enabled_session_config,tensorflow/tensorflow/python/debug/lib/debug_grappler_test.py,37,function,"Constructs a Session config proto that explicitly enables Grappler. Returns: A config proto that obtains extra safety for the unit tests in this file by ensuring that the relevant Grappler rewrites are always enabled." 2448,SessionDebugGrapplerInteractionTest,tensorflow/tensorflow/python/debug/lib/debug_grappler_test.py,51,class, 2449,EventListenerStub,tensorflow/tensorflow/python/debug/lib/debug_service_pb2_grpc.py,30,class,"EventListener: Receives Event protos, e.g., from debugged TensorFlow runtime(s)." 2450,EventListenerServicer,tensorflow/tensorflow/python/debug/lib/debug_service_pb2_grpc.py,58,class,"EventListener: Receives Event protos, e.g., from debugged TensorFlow runtime(s)." 2451,add_EventListenerServicer_to_server,tensorflow/tensorflow/python/debug/lib/debug_service_pb2_grpc.py,91,function, 2452,add_debug_tensor_watch,tensorflow/tensorflow/python/debug/lib/debug_utils.py,26,function,"Add watch on a `Tensor` to `RunOptions`. N.B.: 1. Under certain circumstances, the `Tensor` may not get actually watched (e.g., if the node of the `Tensor` is constant-folded during runtime). 2. For debugging purposes, the `parallel_iteration` attribute of all `tf.while_loop`s in the graph are set to 1 to prevent any node from being executed multiple times concurrently. This change does not affect subsequent non-debugged runs of the same `tf.while_loop`s. Args: run_options: An instance of `config_pb2.RunOptions` to be modified. node_name: (`str`) name of the node to watch. output_slot: (`int`) output slot index of the tensor from the watched node. debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s). Can be a `list` of `str` or a single `str`. The latter case is equivalent to a `list` of `str` with only one element. For debug op types with customizable attributes, each debug op string can optionally contain a list of attribute names, in the syntax of: debug_op_name(attr_name_1=attr_value_1;attr_name_2=attr_value_2;...) debug_urls: (`str` or `list` of `str`) URL(s) to send debug values to, e.g., `file:///tmp/tfdbg_dump_1`, `grpc://localhost:12345`. tolerate_debug_op_creation_failures: (`bool`) Whether to tolerate debug op creation failures by not throwing exceptions. global_step: (`int`) Optional global_step count for this debug tensor watch." 2453,watch_graph,tensorflow/tensorflow/python/debug/lib/debug_utils.py,82,function,"Add debug watches to `RunOptions` for a TensorFlow graph. To watch all `Tensor`s on the graph, let both `node_name_regex_allowlist` and `op_type_regex_allowlist` be the default (`None`). N.B.: 1. Under certain circumstances, the `Tensor` may not get actually watched (e.g., if the node of the `Tensor` is constant-folded during runtime). 2. For debugging purposes, the `parallel_iteration` attribute of all `tf.while_loop`s in the graph are set to 1 to prevent any node from being executed multiple times concurrently. This change does not affect subsequent non-debugged runs of the same `tf.while_loop`s. Args: run_options: An instance of `config_pb2.RunOptions` to be modified. graph: An instance of `ops.Graph`. debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s) to use. debug_urls: URLs to send debug values to. Can be a list of strings, a single string, or None. The case of a single string is equivalent to a list consisting of a single string, e.g., `file:///tmp/tfdbg_dump_1`, `grpc://localhost:12345`. For debug op types with customizable attributes, each debug op name string can optionally contain a list of attribute names, in the syntax of: debug_op_name(attr_name_1=attr_value_1;attr_name_2=attr_value_2;...) node_name_regex_allowlist: Regular-expression allowlist for node_name, e.g., `""(weight_[0-9]+|bias_.*)""` op_type_regex_allowlist: Regular-expression allowlist for the op type of nodes, e.g., `""(Variable|Add)""`. If both `node_name_regex_allowlist` and `op_type_regex_allowlist` are set, the two filtering operations will occur in a logical `AND` relation. In other words, a node will be included if and only if it hits both allowlists. tensor_dtype_regex_allowlist: Regular-expression allowlist for Tensor data type, e.g., `""^int.*""`. This allowlist operates in logical `AND` relations to the two allowlists above. tolerate_debug_op_creation_failures: (`bool`) whether debug op creation failures (e.g., due to dtype incompatibility) are to be tolerated by not throwing exceptions. global_step: (`int`) Optional global_step count for this debug tensor watch. reset_disk_byte_usage: (`bool`) whether to reset the tracked disk byte usage to zero (default: `False`)." 2454,watch_graph_with_denylists,tensorflow/tensorflow/python/debug/lib/debug_utils.py,202,function,"Add debug tensor watches, denylisting nodes and op types. This is similar to `watch_graph()`, but the node names and op types are denylisted, instead of allowlisted. N.B.: 1. Under certain circumstances, the `Tensor` may not get actually watched (e.g., if the node of the `Tensor` is constant-folded during runtime). 2. For debugging purposes, the `parallel_iteration` attribute of all `tf.while_loop`s in the graph are set to 1 to prevent any node from being executed multiple times concurrently. This change does not affect subsequent non-debugged runs of the same `tf.while_loop`s. Args: run_options: An instance of `config_pb2.RunOptions` to be modified. graph: An instance of `ops.Graph`. debug_ops: (`str` or `list` of `str`) name(s) of the debug op(s) to use. See the documentation of `watch_graph` for more details. debug_urls: URL(s) to send debug values to, e.g., `file:///tmp/tfdbg_dump_1`, `grpc://localhost:12345`. node_name_regex_denylist: Regular-expression denylist for node_name. This should be a string, e.g., `""(weight_[0-9]+|bias_.*)""`. op_type_regex_denylist: Regular-expression denylist for the op type of nodes, e.g., `""(Variable|Add)""`. If both node_name_regex_denylist and op_type_regex_denylist are set, the two filtering operations will occur in a logical `OR` relation. In other words, a node will be excluded if it hits either of the two denylists; a node will be included if and only if it hits neither of the denylists. tensor_dtype_regex_denylist: Regular-expression denylist for Tensor data type, e.g., `""^int.*""`. This denylist operates in logical `OR` relations to the two allowlists above. tolerate_debug_op_creation_failures: (`bool`) whether debug op creation failures (e.g., due to dtype incompatibility) are to be tolerated by not throwing exceptions. global_step: (`int`) Optional global_step count for this debug tensor watch. reset_disk_byte_usage: (`bool`) whether to reset the tracked disk byte usage to zero (default: `False`)." 2455,DebugUtilsTest,tensorflow/tensorflow/python/debug/lib/debug_utils_test.py,35,class, 2456,DebugIdentityV2OpTest,tensorflow/tensorflow/python/debug/lib/debug_v2_ops_test.py,43,class,"Tests for DebugIdentityV2Op: when DebugEventsWriter is initialized. DebugEventsWriter being initialized prior to DebugIdentityV2 ops being invoked for the first time is the typical case (e.g., tfdbg2 running on a local machine with only local devices.)" 2457,DebugIdentityV2OpUninitializedWriterTest,tensorflow/tensorflow/python/debug/lib/debug_v2_ops_test.py,231,class,"Tests for DebugIdentityV2Op: when DebugEventsWriter is not initialized. This case can occur when DebugIdentityV2Ops are running on a remote TensorFlow server (e.g., a TPU worker)." 2458,DebugNumericSummaryV2Test,tensorflow/tensorflow/python/debug/lib/debug_v2_ops_test.py,287,class, 2459,DistributedSessionDebugTest,tensorflow/tensorflow/python/debug/lib/dist_session_debug_grpc_test.py,48,class,Test the debugging of distributed sessions. 2460,is_op_type_function,tensorflow/tensorflow/python/debug/lib/dumping_callback.py,61,function, 2461,_debug_identity_v2_grad,tensorflow/tensorflow/python/debug/lib/dumping_callback.py,66,function,Gradient function for the DebugIdentityV2 op. 2462,_get_tfdbg_run_id,tensorflow/tensorflow/python/debug/lib/dumping_callback.py,72,function, 2463,_get_id,tensorflow/tensorflow/python/debug/lib/dumping_callback.py,76,function,Get a short unique ID. 2464,_concrete_tensor_to_proto,tensorflow/tensorflow/python/debug/lib/dumping_callback.py,81,function, 2465,_DumpingCallback,tensorflow/tensorflow/python/debug/lib/dumping_callback.py,85,class,An object holding the states surrounding the dumping callback. 2466,enable_dump_debug_info,tensorflow/tensorflow/python/debug/lib/dumping_callback.py,687,function,"Enable dumping debugging information from a TensorFlow program. The debugging information is dumped to a directory on the file system specified as `dump_root`. The dumped debugging information can be ingested by debugger UIs. The files in the dump directory contain the following information: - TensorFlow Function construction (e.g., compilation of Python functions decorated with @tf.function), the op types, names (if available), context, the input and output tensors, and the associated stack traces. - Execution of TensorFlow operations (ops) and Functions and their stack traces, op types, names (if available) and contexts. In addition, depending on the value of the `tensor_debug_mode` argument (see Args section below), the value(s) of the output tensors or more concise summaries of the tensor values will be dumped. - A snapshot of Python source files involved in the execution of the TensorFlow program. Once enabled, the dumping can be disabled with the corresponding `disable_dump_debug_info()` method under the same Python namespace. Calling this method more than once with the same `dump_root` is idempotent. Calling this method more than once with different `tensor_debug_mode`s leads to a `ValueError`. Calling this method more than once with different `circular_buffer_size`s leads to a `ValueError`. Calling this method with a different `dump_root` abolishes the previously-enabled `dump_root`. Usage example: ```py tf.debugging.experimental.enable_dump_debug_info('/tmp/my-tfdbg-dumps') # Code to build, train and run your TensorFlow model... ``` NOTE: If your code is running on TPUs, be sure to call `tf.config.set_soft_device_placement(True)` before calling `tf.debugging.experimental.enable_dump_debug_info()` as this API uses automatic outside compilation on TPUs. For example: ```py tf.config.set_soft_device_placement(True) tf.debugging.experimental.enable_dump_debug_info( logdir, tensor_debug_mode=""FULL_HEALTH"") resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') strategy = tf.distribute.TPUStrategy(resolver) with strategy.scope(): # ... ``` Args: dump_root: The directory path where the dumping information will be written. tensor_debug_mode: Debug mode for tensor values, as a string. The currently supported options are: - ""NO_TENSOR"": (Default) Only traces the output tensors of all executed ops (including those executed eagerly at the Python level or as a part of a TensorFlow graph) and functions, while not extracting any information from the values of the tensors. - ""CURT_HEALTH"": For each floating-dtype tensor (e.g., tensors of dtypes such as `float32`, `float64` and `bfloat16`), extracts a binary bit indicating whether it contains any -infinity, +infinity or NaN. - ""CONCISE_HEALTH"": For each floating-dtype tensor, extract total element count, and counts of -infinity, +infinity and NaN elements. - ""FULL_HEALTH"": For each floating-dtype tensor, extracts the dtype, rank (number of dimensions), total element count, and counts of -infinity, +infinity and NaN elements. - ""SHAPE"": For each tensor (regardless of dtype), extracts its dtype, rank, total element count and shape. circular_buffer_size: Size of the circular buffers for execution events. These circular buffers are designed to reduce the overhead of debugging dumping. They hold the most recent debug events concerning eager execution of ops and `tf.function`s and traces of tensor values computed inside `tf.function`s. They are written to the file system only when the proper flushing method is called (see description of return values below). Expected to be an integer. If <= 0, the circular-buffer behavior will be disabled, i.e., the execution debug events will be written to the file writers in the same way as non-execution events such as op creations and source-file snapshots. op_regex: Dump data from only the tensors from op types that matches to the regular expression (through Python's `re.match()`). ""Op type"" refers to the names of the TensorFlow operations (e.g., ""MatMul"", ""LogSoftmax""), which may repeat in a TensorFlow function. It does *not* refer to the names of nodes (e.g., ""dense/MatMul"", ""dense_1/MatMul_1"") which are unique within a function. - Example 1: Dump tensor data from only MatMul and Relu ops `op_regex=""^(MatMul|Relu)$""`. - Example 2: Dump tensors from all ops *except* Relu: `op_regex=""(?!^Relu$)""`. This filter operates in a logical AND relation with `tensor_dtypes`. tensor_dtypes: Dump data from only the tensors of which the specified dtypes. This optional argument can be in any of the following format: - a list or tuple of `DType` objects or strings that can be converted to `DType` objects via `tf.as_dtype()`. Examples: - `tensor_dtype=[tf.float32, tf.float64]`, - `tensor_dtype=[""float32"", ""float64""]`, - `tensor_dtypes=(tf.int32, tf.bool)`, - `tensor_dtypes=(""int32"", ""bool"")` - a callable that takes a single `DType` argument and returns a Python `boolean` indicating whether the dtype is to be included in the data dumping. Examples: - `tensor_dtype=lambda dtype: dtype.is_integer`. This filter operates in a logical AND relation with `op_regex`. Returns: A DebugEventsWriter instance used by the dumping callback. The caller may use its flushing methods, including `FlushNonExecutionFiles()` and `FlushExecutionFiles()`." 2467,disable_dump_debug_info,tensorflow/tensorflow/python/debug/lib/dumping_callback.py,878,function,"Disable the currently-enabled debugging dumping. If the `enable_dump_debug_info()` method under the same Python namespace has been invoked before, calling this method disables it. If no call to `enable_dump_debug_info()` has been made, calling this method is a no-op. Calling this method more than once is idempotent." 2468,DumpingCallbackTest,tensorflow/tensorflow/python/debug/lib/dumping_callback_test.py,53,class, 2469,DumpingCallbackTestBase,tensorflow/tensorflow/python/debug/lib/dumping_callback_test_lib.py,33,class,Base test-case class for tfdbg v2 callbacks. 2470,_state_change,tensorflow/tensorflow/python/debug/lib/grpc_debug_server.py,41,function, 2471,EventListenerBaseStreamHandler,tensorflow/tensorflow/python/debug/lib/grpc_debug_server.py,50,class,Per-stream handler of EventListener gRPC streams. 2472,EventListenerBaseServicer,tensorflow/tensorflow/python/debug/lib/grpc_debug_server.py,103,class,Base Python class for gRPC debug server. 2473,_get_dump_file_path,tensorflow/tensorflow/python/debug/lib/grpc_debug_test_server.py,47,function,"Get the file path of the dump file for a debug node. Args: dump_root: (str) Root dump directory. device_name: (str) Name of the device that the debug node resides on. debug_node_name: (str) Name of the debug node, e.g., cross_entropy/Log:0:DebugIdentity. Returns: (str) Full path of the dump file." 2474,EventListenerTestStreamHandler,tensorflow/tensorflow/python/debug/lib/grpc_debug_test_server.py,75,class,Implementation of EventListenerBaseStreamHandler that dumps to file. 2475,EventListenerTestServicer,tensorflow/tensorflow/python/debug/lib/grpc_debug_test_server.py,217,class,An implementation of EventListenerBaseServicer for testing. 2476,start_server_on_separate_thread,tensorflow/tensorflow/python/debug/lib/grpc_debug_test_server.py,368,function,"Create a test gRPC debug server and run on a separate thread. Args: dump_to_filesystem: (bool) whether the debug server will dump debug data to the filesystem. server_start_delay_sec: (float) amount of time (in sec) to delay the server start up for. poll_server: (bool) whether the server will be polled till success on startup. blocking: (bool) whether the server should be started in a blocking mode. toggle_watch_on_core_metadata: A list of (node_name, output_slot, debug_op) tuples to toggle the watchpoint status during the on_core_metadata calls (optional). Returns: server_port: (int) Port on which the server runs. debug_server_url: (str) grpc:// URL to the server. server_dump_dir: (str) The debug server's dump directory. server_thread: The server Thread object. server: The `EventListenerTestServicer` object. Raises: ValueError: If polling the server process for ready state is not successful within maximum polling count." 2477,_poll_server_till_success,tensorflow/tensorflow/python/debug/lib/grpc_debug_test_server.py,428,function,"Poll server until success or exceeding max polling count. Args: max_attempts: (int) How many times to poll at maximum sleep_per_poll_sec: (float) How many seconds to sleep for after each unsuccessful poll. debug_server_url: (str) gRPC URL to the debug server. dump_dir: (str) Dump directory to look for files in. If None, will directly check data from the server object. server: The server object. gpu_memory_fraction: (float) Fraction of GPU memory to be allocated for the Session used in server polling. Returns: (bool) Whether the polling succeeded within max_polls attempts." 2478,LargeGraphAndLargeTensorsDebugTest,tensorflow/tensorflow/python/debug/lib/grpc_large_data_test.py,41,class, 2479,parse_cluster_spec,tensorflow/tensorflow/python/debug/lib/grpc_tensorflow_server.py,45,function,"Parse content of cluster_spec string and inject info into cluster protobuf. Args: cluster_spec: cluster specification string, e.g., ""local|localhost:2222;localhost:2223"" cluster: cluster protobuf. verbose: If verbose logging is requested. Raises: ValueError: if the cluster_spec string is invalid." 2480,main,tensorflow/tensorflow/python/debug/lib/grpc_tensorflow_server.py,91,function, 2481,ProfileDatum,tensorflow/tensorflow/python/debug/lib/profiling.py,24,class,Profile data point. 2482,AggregateProfile,tensorflow/tensorflow/python/debug/lib/profiling.py,65,class,Profile summary data for aggregating a number of ProfileDatum. 2483,AggregateProfile,tensorflow/tensorflow/python/debug/lib/profiling_test.py,27,class, 2484,SessionDebugFileTest,tensorflow/tensorflow/python/debug/lib/session_debug_file_test.py,38,class, 2485,SessionDebugConcurrentTest,tensorflow/tensorflow/python/debug/lib/session_debug_file_test.py,118,class, 2486,GrpcDebugServerTest,tensorflow/tensorflow/python/debug/lib/session_debug_grpc_test.py,48,class, 2487,SessionDebugGrpcTest,tensorflow/tensorflow/python/debug/lib/session_debug_grpc_test.py,96,class, 2488,SessionDebugConcurrentTest,tensorflow/tensorflow/python/debug/lib/session_debug_grpc_test.py,321,class, 2489,SessionDebugGrpcGatingTest,tensorflow/tensorflow/python/debug/lib/session_debug_grpc_test.py,358,class,Test server gating of debug ops. 2490,DelayedDebugServerTest,tensorflow/tensorflow/python/debug/lib/session_debug_grpc_test.py,736,class, 2491,SessionDebugMultiGPUTest,tensorflow/tensorflow/python/debug/lib/session_debug_multi_gpu_test.py,37,class, 2492,no_rewrite_session_config,tensorflow/tensorflow/python/debug/lib/session_debug_testlib.py,58,function, 2493,_RNNCellForTest,tensorflow/tensorflow/python/debug/lib/session_debug_testlib.py,67,class,RNN cell for testing. 2494,SessionDebugTestBase,tensorflow/tensorflow/python/debug/lib/session_debug_testlib.py,88,class,Base class for unit tests of tfdbg running with tf.Session. 2495,DebugConcurrentRunCallsTest,tensorflow/tensorflow/python/debug/lib/session_debug_testlib.py,1476,class,Test for debugging concurrent Session.run() calls. 2496,_load_debugged_source_file,tensorflow/tensorflow/python/debug/lib/source_remote.py,34,function, 2497,_string_to_id,tensorflow/tensorflow/python/debug/lib/source_remote.py,47,function, 2498,_format_origin_stack,tensorflow/tensorflow/python/debug/lib/source_remote.py,53,function,"Format a traceback stack for a `CallTraceback` proto. Args: origin_stack: The stack list as returned by `traceback.extract_stack()`. call_traceback_proto: A `CallTraceback` proto whose fields are to be populated." 2499,_source_file_paths_outside_tensorflow_py_library,tensorflow/tensorflow/python/debug/lib/source_remote.py,76,function,"Extract source file paths outside TensorFlow Python library. Args: code_defs: An iterable of `CodeDef` protos, i.e., an iterable of stack traces. id_to_string: A proto map from integer ids to strings. Returns: An iterable of source file paths outside the TensorFlow Python library." 2500,_send_call_tracebacks,tensorflow/tensorflow/python/debug/lib/source_remote.py,98,function,"Send the tracebacks of a TensorFlow execution call. To gRPC debug server(s). This applies to graph execution (`tf.Session.run()`) calls and eager execution calls. If `send_source`, also sends the underlying source files outside the TensorFlow library. Args: destinations: gRPC destination addresses, a `str` or a `list` of `str`s, e.g., ""localhost:4242"". If a `list`, gRPC requests containing the same `CallTraceback` proto payload will be sent to all the destinations. origin_stack: The traceback stack for the origin of the execution call. For graph execution, this is the traceback of the `tf.Session.run()` invocation. For eager execution, this is the traceback of the Python line that executes the eager operation. is_eager_execution: (`bool`) whether an eager execution call (i.e., not a `tf.Session.run` or derived methods) is being sent. call_key: The key of the execution call, as a string. For graph execution, this is a string describing the feeds, fetches (and targets) names of the `tf.Session.run` call. For eager execution, this is ignored. graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`), which contains op tracebacks, if applicable. send_source: Whether the source files involved in the op tracebacks but outside the TensorFlow library are to be sent." 2501,send_graph_tracebacks,tensorflow/tensorflow/python/debug/lib/source_remote.py,176,function,"Send the tracebacks of a graph execution call to debug server(s). Args: destinations: gRPC destination addresses, a `str` or a `list` of `str`s, e.g., ""localhost:4242"". If a `list`, gRPC requests containing the same `CallTraceback` proto payload will be sent to all the destinations. run_key: A string describing the feeds, fetches (and targets) names of the `tf.Session.run` call. origin_stack: The traceback of the `tf.Session.run()` invocation. graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`), which contains op tracebacks. send_source: Whether the source files involved in the op tracebacks but outside the TensorFlow library are to be sent." 2502,send_eager_tracebacks,tensorflow/tensorflow/python/debug/lib/source_remote.py,200,function,"Send the tracebacks of an eager execution call to debug server(s). Args: destinations: gRPC destination addresses, a `str` or a `list` of `str`s, e.g., ""localhost:4242"". If a `list`, gRPC requests containing the same origin_stack: The traceback of the eager operation invocation. send_source: Whether the source files involved in the op tracebacks but outside the TensorFlow library are to be sent." 2503,line_number_above,tensorflow/tensorflow/python/debug/lib/source_remote_test.py,42,function, 2504,SendTracebacksTest,tensorflow/tensorflow/python/debug/lib/source_remote_test.py,46,class, 2505,_norm_abs_path,tensorflow/tensorflow/python/debug/lib/source_utils.py,43,function, 2506,is_extension_uncompiled_python_source,tensorflow/tensorflow/python/debug/lib/source_utils.py,47,function, 2507,is_extension_compiled_python_source,tensorflow/tensorflow/python/debug/lib/source_utils.py,52,function, 2508,_convert_watch_key_to_tensor_name,tensorflow/tensorflow/python/debug/lib/source_utils.py,57,function, 2509,guess_is_tensorflow_py_library,tensorflow/tensorflow/python/debug/lib/source_utils.py,61,function,"Guess whether a Python source file is a part of the tensorflow library. Special cases: 1) Returns False for unit-test files in the library (*_test.py), 2) Returns False for files under python/debug/examples. Args: py_file_path: full path of the Python source file in question. Returns: (`bool`) Whether the file is inferred to be a part of the tensorflow library." 2510,load_source,tensorflow/tensorflow/python/debug/lib/source_utils.py,86,function,"Load the content of a Python source code file. This function covers the following case: 1. source_file_path points to an existing Python (.py) file on the file system. 2. source_file_path is a path within a .par file (i.e., a zip-compressed, self-contained Python executable). Args: source_file_path: Path to the Python source file to read. Returns: A length-2 tuple: - Lines of the source file, as a `list` of `str`s. - The width of the string needed to show the line number in the file. This is calculated based on the number of lines in the source file. Raises: IOError: if loading is unsuccessful." 2511,_try_load_par_source,tensorflow/tensorflow/python/debug/lib/source_utils.py,123,function,"Try loading the source code inside a .par file. A .par file is a zip-compressed, self-contained Python executable. It contains the content of individual Python source files that can be read only through extracting from the zip file. Args: source_file_path: The full path to the file inside the .par file. This path should include the path to the .par file itself, followed by the intra-par path, e.g., ""/tmp/my_executable.par/org-tensorflow/tensorflow/python/foo/bar.py"". Returns: If successful, lines of the source file as a `list` of `str`s. Else, `None`." 2512,annotate_source,tensorflow/tensorflow/python/debug/lib/source_utils.py,156,function,"Annotate a Python source file with a list of ops created at each line. (The annotation doesn't change the source file itself.) Args: dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph has been loaded. source_file_path: (`str`) Path to the source file being annotated. do_dumped_tensors: (`str`) Whether dumped Tensors, instead of ops are to be used to annotate the source file. file_stack_top: (`bool`) Whether only the top stack trace in the specified source file is to be annotated. min_line: (`None` or `int`) The 1-based line to start annotate the source file from (inclusive). max_line: (`None` or `int`) The 1-based line number to end the annotation at (exclusive). Returns: A `dict` mapping 1-based line number to a list of op name(s) created at that line, or tensor names if `do_dumped_tensors` is True. Raises: ValueError: If the dump object does not have a Python graph set." 2513,list_source_files_against_dump,tensorflow/tensorflow/python/debug/lib/source_utils.py,223,function,"Generate a list of source files with information regarding ops and tensors. Args: dump: (`DebugDumpDir`) A `DebugDumpDir` object of which the Python graph has been loaded. path_regex_allowlist: A regular-expression filter for source file path. node_name_regex_allowlist: A regular-expression filter for node names. Returns: A list of tuples regarding the Python source files involved in constructing the ops and tensors contained in `dump`. Each tuple is: (source_file_path, is_tf_library, num_nodes, num_tensors, num_dumps, first_line) is_tf_library: (`bool`) A guess of whether the file belongs to the TensorFlow Python library. num_nodes: How many nodes were created by lines of this source file. These include nodes with dumps and those without. num_tensors: How many Tensors were created by lines of this source file. These include Tensors with dumps and those without. num_dumps: How many debug Tensor dumps were from nodes (and Tensors) that were created by this source file. first_line: The first line number (1-based) that created any nodes or Tensors in this source file. The list is sorted by ascending order of source_file_path. Raises: ValueError: If the dump object does not have a Python graph set." 2514,annotate_source_against_profile,tensorflow/tensorflow/python/debug/lib/source_utils.py,324,function,"Annotate a Python source file with profiling information at each line. (The annotation doesn't change the source file itself.) Args: profile_data: (`list` of `ProfileDatum`) A list of `ProfileDatum`. source_file_path: (`str`) Path to the source file being annotated. node_name_filter: Regular expression to filter by node name. op_type_filter: Regular expression to filter by op type. min_line: (`None` or `int`) The 1-based line to start annotate the source file from (inclusive). max_line: (`None` or `int`) The 1-based line number to end the annotation at (exclusive). Returns: A `dict` mapping 1-based line number to a the namedtuple `profiling.LineOrFuncProfileSummary`." 2515,line_number_above,tensorflow/tensorflow/python/debug/lib/source_utils_test.py,47,function,"Get lineno of the AST node immediately above this function's call site. It is assumed that there is no empty line(s) between the call site and the preceding AST node. Returns: The lineno of the preceding AST node, at the same level of the AST. If the preceding AST spans multiple lines: - In Python 3.8+, the lineno of the first line is returned. - In older Python versions, the lineno of the last line is returned." 2516,_find_preceding_ast_node,tensorflow/tensorflow/python/debug/lib/source_utils_test.py,74,function,Find the ast node immediately before and not including lineno. 2517,GuessIsTensorFlowLibraryTest,tensorflow/tensorflow/python/debug/lib/source_utils_test.py,85,class, 2518,SourceHelperTest,tensorflow/tensorflow/python/debug/lib/source_utils_test.py,141,class, 2519,ListSourceAgainstDumpTest,tensorflow/tensorflow/python/debug/lib/source_utils_test.py,324,class, 2520,DumpingDebugWrapperDiskUsageLimitTest,tensorflow/tensorflow/python/debug/wrappers/disk_usage_test.py,36,class, 2521,DumpingDebugWrapperSession,tensorflow/tensorflow/python/debug/wrappers/dumping_wrapper.py,31,class,Debug Session wrapper that dumps debug data to filesystem. 2522,DumpingDebugWrapperSessionTest,tensorflow/tensorflow/python/debug/wrappers/dumping_wrapper_test.py,44,class, 2523,_check_type,tensorflow/tensorflow/python/debug/wrappers/framework.py,119,function,"Check if an object is of the expected type. Args: obj: The object being checked. expected_types: (`type` or an iterable of `type`s) The expected `type`(s) of obj. Raises: TypeError: If obj is not an instance of expected_type." 2524,OnSessionInitRequest,tensorflow/tensorflow/python/debug/wrappers/framework.py,135,class,"Request to an on-session-init callback. This callback is invoked during the __init__ call to a debug-wrapper session." 2525,OnSessionInitAction,tensorflow/tensorflow/python/debug/wrappers/framework.py,152,class,Enum-like values for possible action to take on session init. 2526,OnSessionInitResponse,tensorflow/tensorflow/python/debug/wrappers/framework.py,168,class,Response from an on-session-init callback. 2527,OnRunStartRequest,tensorflow/tensorflow/python/debug/wrappers/framework.py,181,class,"Request to an on-run-start callback. This callback is invoked during a run() call of the debug-wrapper session, immediately after the run() call counter is incremented." 2528,OnRunStartAction,tensorflow/tensorflow/python/debug/wrappers/framework.py,212,class,Enum-like values for possible action to take on start of a run() call. 2529,OnRunStartResponse,tensorflow/tensorflow/python/debug/wrappers/framework.py,226,class,"Request from an on-run-start callback. The caller of the callback can use this response object to specify what action the debug-wrapper session actually takes on the run() call." 2530,OnRunEndRequest,tensorflow/tensorflow/python/debug/wrappers/framework.py,274,class,"Request to an on-run-end callback. The callback is invoked immediately before the wrapped run() call ends." 2531,OnRunEndResponse,tensorflow/tensorflow/python/debug/wrappers/framework.py,308,class,Response from an on-run-end callback. 2532,BaseDebugWrapperSession,tensorflow/tensorflow/python/debug/wrappers/framework.py,318,class,"Base class of debug-wrapper session classes. Concrete classes that inherit from this class need to implement the abstract methods such as on_session_init, on_run_start and on_run_end." 2533,WatchOptions,tensorflow/tensorflow/python/debug/wrappers/framework.py,835,class,Type for return values of watch_fn. 2534,NonInteractiveDebugWrapperSession,tensorflow/tensorflow/python/debug/wrappers/framework.py,885,class,"Base class for non-interactive (i.e., non-CLI) debug wrapper sessions." 2535,TestDebugWrapperSession,tensorflow/tensorflow/python/debug/wrappers/framework_test.py,47,class,A concrete implementation of BaseDebugWrapperSession for test. 2536,TestDebugWrapperSessionBadAction,tensorflow/tensorflow/python/debug/wrappers/framework_test.py,91,class,"A concrete implementation of BaseDebugWrapperSession for test. This class intentionally puts a bad action value in OnSessionInitResponse and/or in OnRunStartAction to test the handling of such invalid cases." 2537,DebugWrapperSessionTest,tensorflow/tensorflow/python/debug/wrappers/framework_test.py,145,class, 2538,_is_public_method_name,tensorflow/tensorflow/python/debug/wrappers/framework_test.py,411,function, 2539,SessionWrapperPublicMethodParityTest,tensorflow/tensorflow/python/debug/wrappers/framework_test.py,416,class, 2540,publish_traceback,tensorflow/tensorflow/python/debug/wrappers/grpc_wrapper.py,31,function,"Publish traceback and source code if graph version is new. `graph.version` is compared with `old_graph_version`. If the former is higher (i.e., newer), the graph traceback and the associated source code is sent to the debug server at the specified gRPC URLs. Args: debug_server_urls: A single gRPC debug server URL as a `str` or a `list` of debug server URLs. graph: A Python `tf.Graph` object. feed_dict: Feed dictionary given to the `Session.run()` call. fetches: Fetches from the `Session.run()` call. old_graph_version: Old graph version to compare to. Returns: If `graph.version > old_graph_version`, the new graph version as an `int`. Else, the `old_graph_version` is returned." 2541,GrpcDebugWrapperSession,tensorflow/tensorflow/python/debug/wrappers/grpc_wrapper.py,69,class,Debug Session wrapper that send debug data to gRPC stream(s). 2542,_signal_handler,tensorflow/tensorflow/python/debug/wrappers/grpc_wrapper.py,144,function, 2543,register_signal_handler,tensorflow/tensorflow/python/debug/wrappers/grpc_wrapper.py,154,function, 2544,TensorBoardDebugWrapperSession,tensorflow/tensorflow/python/debug/wrappers/grpc_wrapper.py,162,class,"A tfdbg Session wrapper that can be used with TensorBoard Debugger Plugin. This wrapper is the same as `GrpcDebugWrapperSession`, except that it uses a predefined `watch_fn` that 1) uses `DebugIdentity` debug ops with the `gated_grpc` attribute set to `True` to allow the interactive enabling and disabling of tensor breakpoints. 2) watches all tensors in the graph. This saves the need for the user to define a `watch_fn`." 2545,LocalCLIDebugHook,tensorflow/tensorflow/python/debug/wrappers/hooks.py,30,class,"Command-line-interface debugger hook. Can be used as a hook for `tf.compat.v1.train.MonitoredSession`s and `tf.estimator.Estimator`s. Provides a substitute for `tfdbg.LocalCLIDebugWrapperSession` in cases where the session is not directly available." 2546,DumpingDebugHook,tensorflow/tensorflow/python/debug/wrappers/hooks.py,151,class,"A debugger hook that dumps debug data to filesystem. Can be used as a hook for `tf.compat.v1.train.MonitoredSession`s and `tf.estimator.Estimator`s." 2547,GrpcDebugHook,tensorflow/tensorflow/python/debug/wrappers/hooks.py,223,class,"A hook that streams debugger-related events to any grpc_debug_server. For example, the debugger data server is a grpc_debug_server. The debugger data server writes debugger-related events it receives via GRPC to logdir. This enables debugging features in Tensorboard such as health pills. When the arguments of debug_utils.watch_graph changes, strongly consider changing arguments here too so that features are available to tflearn users. Can be used as a hook for `tf.compat.v1.train.MonitoredSession`s and `tf.estimator.Estimator`s." 2548,TensorBoardDebugHook,tensorflow/tensorflow/python/debug/wrappers/hooks.py,305,class,"A tfdbg hook that can be used with TensorBoard Debugger Plugin. This hook is the same as `GrpcDebugHook`, except that it uses a predefined `watch_fn` that 1) uses `DebugIdentity` debug ops with the `gated_grpc` attribute set to `True`, to allow the interactive enabling and disabling of tensor breakpoints. 2) watches all tensors in the graph. This saves the need for the user to define a `watch_fn`." 2549,LocalCLIDebugWrapperSession,tensorflow/tensorflow/python/debug/wrappers/local_cli_wrapper.py,43,class,"Concrete subclass of BaseDebugWrapperSession implementing a local CLI. This class has all the methods that a `session.Session` object has, in order to support debugging with minimal code changes. Invoking its `run()` method will launch the command-line interface (CLI) of tfdbg." 2550,LocalCLIDebuggerWrapperSessionForTest,tensorflow/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py,52,class,"Subclasses the wrapper class for testing. Overrides its CLI-related methods for headless testing environments. Inserts observer variables for assertions." 2551,LocalCLIDebugWrapperSessionTest,tensorflow/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py,136,class, 2552,_flatten_tensors,tensorflow/tensorflow/python/distribute/all_reduce.py,31,function,"Check tensors for isomorphism and flatten. Args: tensors: list of `tf.Tensor` which must all have the same shape. Returns: tensors: a list of `tf.Tensor` which are flattened (1D) views of tensors shape: the original shape of each element of input tensors Raises: ValueError: tensors are empty or non-isomorphic or have unknown shape." 2553,_reshape_tensors,tensorflow/tensorflow/python/distribute/all_reduce.py,60,function,"Reshape tensors flattened by _flatten_tensors. Args: tensors: list of `tf.Tensor` of identical length 1D tensors. shape: list of integers describing the desired shape. Product of the elements must equal the length of each tensor. Returns: list of `tf.Tensor` which are the reshaped inputs." 2554,_padded_split,tensorflow/tensorflow/python/distribute/all_reduce.py,78,function,"Like split for 1D tensors but pads-out case where len % pieces != 0. Args: tensor: `tf.Tensor` that must be 1D. pieces: a positive integer specifying the number of pieces into which tensor should be split. Returns: list of `tf.Tensor` of length pieces, which hold the values of thin input tensor, in order. The final tensor may be zero-padded on the end to make its size equal to those of all of the other tensors. Raises: ValueError: The input tensor is not 1D." 2555,_strip_padding,tensorflow/tensorflow/python/distribute/all_reduce.py,131,function,"Strip the suffix padding added by _padded_split. Args: tensors: list of `tf.Tensor` of identical length 1D tensors. pad_len: number of elements to be stripped from the end of each tensor. Returns: list of `tf.Tensor` which are the stripped inputs. Raises: ValueError: tensors must be a non-empty list of 1D tensors, and each must be longer than pad_len." 2556,_ragged_split,tensorflow/tensorflow/python/distribute/all_reduce.py,160,function,"Like split for 1D tensors but allows case where len % pieces != 0. Args: tensor: `tf.Tensor` that must be 1D. pieces: a positive integer specifying the number of pieces into which tensor should be split. Returns: list of `tf.Tensor` of length pieces, which hold the values of the input tensor, in order. The final tensor may be shorter than the others, which will all be of equal length. Raises: ValueError: input tensor must be 1D." 2557,_ring_permutations,tensorflow/tensorflow/python/distribute/all_reduce.py,193,function,"""Generate an array of device index arrays, one for each subchunk. In the basic ring reduction algorithm there are size(T)/num_devices data chunks and each device process one chunk per tick, i.e. sending one chunk and receiving one chunk. The idea of subchunking is that each device processes num_subchunks smaller data regions per tick, and the ring rank permutation is different for each subchunk index so that a device is potentially sending to and receiving from num_subchunks different other devices at each tick. Where multiple independent data channels exist between devices, this strategy supplies a method of using them in parallel. Args: num_workers: number of worker tasks num_subchunks: number of subchunks into which to divide each per-GPU chunk. gpu_perm: an array of integers in [0, num_gpus-1] giving the default ring order of GPUs at each worker. Other permutations will be generated by rotating this array and splicing together per-worker instances. Raises: ValueError: the number of subchunks may not exceed the number of GPUs. Returns: pred_by_s_d: list of lists that maps (by index) from (subchunk, dev) to preceding device in the permutation for that subchunk. The device index of GPU i at worker j is i + (j * num_gpus). rank_by_s_d: list of lists that maps (by index) from (subchunk, dev) to local rank of device d in the permutation for that subchunk." 2558,build_ring_all_reduce,tensorflow/tensorflow/python/distribute/all_reduce.py,254,function,"Construct a subgraph performing a ring-style all-reduce of input_tensors. Args: input_tensors: a list of `tf.Tensor` objects, which must all have the same shape and type. num_workers: number of worker tasks spanned by input_tensors. num_subchunks: number of subchunks each device should process in one tick. gpu_perm: a list of ints giving a ring-wise rank ordering of GPUs at each worker. All workers must have the same number of GPUs with the same rank ordering. If NVLINK is available, this should be a ring order supported by NVLINK edges. red_op: a binary operator for elementwise reduction. un_op: an optional unary operator to apply to fully reduced values. Raises: ValueError: empty input_tensors or they don't all have same size. Returns: a list of `tf.Tensor` identical sum-reductions of input_tensors." 2559,_build_ring_gather,tensorflow/tensorflow/python/distribute/all_reduce.py,297,function,"Construct a subgraph for the first (reduction) pass of ring all-reduce. Args: input_tensors: a list of `tf.Tensor` 1D input tensors of same shape and type. devices: array of device name strings num_subchunks: number of subchunks each device should process in one tick. pred_by_s_d: as produced by _ring_permutations rank_by_s_d: as produced by _ring_permutations red_op: a binary operator for elementwise reduction Raises: ValueError: tensors must all be one dimensional. Returns: list of list of `tf.Tensor` of (partially) reduced values where exactly num_subchunks chunks at each device are fully reduced." 2560,_apply_unary_to_chunks,tensorflow/tensorflow/python/distribute/all_reduce.py,359,function,"Apply a unary op to each tensor in chunks_by_dev, on same device. Args: f: a unary function over `tf.Tensor`. chunks_by_dev: list of lists of `tf.Tensor`. Returns: new list of lists of `tf.Tensor` with the same structure as chunks_by_dev containing the derived tensors." 2561,_build_ring_scatter,tensorflow/tensorflow/python/distribute/all_reduce.py,377,function,"Construct subgraph for second (scatter) pass of ring all-reduce. Args: pred_by_s_d: as produced by _ring_permutations rank_by_s_d: as produced by _ring_permutations chunks_by_dev: list of list of `tf.Tensor` indexed by ints (device, chunk) Raises: ValueError: chunks_by_dev is not well-formed Returns: list of `tf.Tensor` which are the fully reduced tensors, one at each device corresponding to the outer dimension of chunks_by_dev." 2562,build_recursive_hd_all_reduce,tensorflow/tensorflow/python/distribute/all_reduce.py,426,function,"Construct a subgraph for recursive halving-doubling all-reduce. The recursive halving-doubling algorithm is described in (Thakur et al., 2015). The concept is to arrange the participating n devices in a linear sequence where devices exchange data pairwise with one other device in each round. During the gather phase there are lg(n) rounds where devices exchange increasingly smaller sub-tensors with another device at increasingly greater distances, until at the top each device has 1/n of the fully reduced values. During the scatter phase each device exchanges its fully reduced sub-tensor (which doubles in length at each round) with one other device at increasingly smaller distances until each device has all of the fully reduced values. Note: this preliminary version requires that len(input_tensors) be a power of 2. TODO(tucker): relax this restriction. Also, the number of elements in each tensor must be divisible by 2^h where h is the number of hops in each phase. This will also be relaxed in the future with edge-case specific logic. Args: input_tensors: list of `tf.Tensor` to be elementwise reduced. red_op: a binary elementwise reduction Op. un_op: an optional unary elementwise Op to apply to reduced values. Returns: list of `tf.Tensor` which are the fully reduced tensors, one at each device of input_tensors. Raises: ValueError: num_devices not a power of 2, or tensor len not divisible by 2 the proper number of times. References: Optimization of Collective Communication Operations in MPICH: [Thakur et al., 2005] (https://journals.sagepub.com/doi/abs/10.1177/1094342005051521) ([pdf](http://wwwi10.lrr.in.tum.de/~gerndt/home/Teaching/HPCSeminar/mpich_multi_coll.pdf))" 2563,_build_recursive_hd_gather,tensorflow/tensorflow/python/distribute/all_reduce.py,480,function,"Construct the gather phase of recursive halving-doubling all-reduce. Args: input_tensors: list of `tf.Tensor` to be elementwise reduced. devices: a list of strings naming the devices hosting input_tensors, which will also be used to host the (partial) reduction values. red_op: a binary elementwise reduction Op. Returns: list of `tf.Tensor` which are the fully reduced tensor shards. Raises: ValueError: num_devices not a power of 2, or tensor len not divisible by 2 the proper number of times." 2564,_build_recursive_hd_scatter,tensorflow/tensorflow/python/distribute/all_reduce.py,521,function,"Construct the scatter phase of recursive halving-doubling all-reduce. Args: input_tensors: list of `tf.Tensor` that are fully-reduced shards. devices: a list of strings naming the devices on which the reconstituted full tensors should be placed. Returns: list of `tf.Tensor` which are the fully reduced tensors." 2565,build_shuffle_all_reduce,tensorflow/tensorflow/python/distribute/all_reduce.py,558,function,"Construct a subgraph for shuffle all-reduce. Shuffle reduce is essentially the algorithm implemented when using parameter servers. Suppose tensor length is n, there are d devices and g gather shards. Each device sends a n/g length sub-tensor to each gather shard. The gather shards perform a reduction across d fragments, then broadcast the result back to each device. The devices then join the g fully reduced fragments they receive from the shards. The gather shards could perform d-1 pairwise reductions, or one d-way reduction. The first is better where reduction Op time is low compared to transmission time, the second better in the other case. Args: input_tensors: list of `tf.Tensor` values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: an n-array elementwise reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of `tf.Tensor` which are the fully reduced tensors." 2566,_build_shuffle_gather,tensorflow/tensorflow/python/distribute/all_reduce.py,592,function,"Construct the gather (concentrate and reduce) phase of shuffle all-reduce. Args: input_tensors: list of `tf.Tensor` values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: the binary reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of `tf.Tensor` which are the fully reduced shards. Raises: ValueError: inputs not well-formed." 2567,_build_shuffle_scatter,tensorflow/tensorflow/python/distribute/all_reduce.py,629,function,"Build the scatter phase of shuffle all-reduce. Args: reduced_shards: list of `tf.Tensor` fully reduced shards dst_devices: list of names of devices at which the fully-reduced value should be reconstituted. Returns: list of `tf.Tensor` scattered tensors." 2568,_split_by_task,tensorflow/tensorflow/python/distribute/all_reduce.py,648,function,"Partition devices and values by common task. Args: devices: list of device name strings values: list of `tf.Tensor` of same length as devices. Returns: (per_task_devices, per_task_values) where both values are lists of lists with isomorphic structure: the outer list is indexed by task, and the inner list has length of the number of values belonging to that task. per_task_devices contains the specific devices to which the values are local, and per_task_values contains the corresponding values. Raises: ValueError: devices must be same length as values." 2569,build_nccl_all_reduce,tensorflow/tensorflow/python/distribute/all_reduce.py,685,function,"Build a subgraph that does one full all-reduce, using NCCL. Args: input_tensors: list of `tf.Tensor` of same-shape and type values to be reduced. red_op: binary elementwise reduction operator. Must be one of {tf.add} un_op: optional unary elementwise Op to apply to fully-reduce values. Returns: list of `tf.Tensor` of reduced values. Raises: ValueError: red_op not supported." 2570,_build_nccl_hybrid,tensorflow/tensorflow/python/distribute/all_reduce.py,714,function,"Construct a subgraph for NCCL hybrid all-reduce. Args: input_tensors: list of `tf.Tensor` of same-shape and type values to be reduced. red_op: binary elementwise reduction operator. upper_level_f: function for reducing one value per worker, across workers. Returns: list of `tf.Tensor` of reduced values. Raises: ValueError: inputs not well-formed." 2571,_reduce_non_singleton,tensorflow/tensorflow/python/distribute/all_reduce.py,765,function,"If len(input_tensors) > 1, apply red_f, else apply un_op." 2572,build_nccl_then_ring,tensorflow/tensorflow/python/distribute/all_reduce.py,779,function,"Construct hybrid of NCCL within workers, Ring across workers." 2573,build_nccl_then_recursive_hd,tensorflow/tensorflow/python/distribute/all_reduce.py,788,function,"Construct hybrid of NCCL within workers, Recursive-HD across workers." 2574,build_nccl_then_shuffle,tensorflow/tensorflow/python/distribute/all_reduce.py,794,function,"Construct hybrid of NCCL within workers, Shuffle across workers." 2575,_build_shuffle_hybrid,tensorflow/tensorflow/python/distribute/all_reduce.py,803,function,"Construct a subgraph for Shuffle hybrid all-reduce. Args: input_tensors: list of `tf.Tensor` of same-shape and type values to be reduced. gather_devices: list of device names on which to host gather shards. red_op: binary elementwise reduction operator. upper_level_f: function for reducing one value per worker, across workers. Returns: list of `tf.Tensor` of reduced values. Raises: ValueError: inputs not well-formed." 2576,build_shuffle_then_ring,tensorflow/tensorflow/python/distribute/all_reduce.py,845,function,"Construct hybrid of Shuffle within workers, Ring across workers." 2577,build_shuffle_then_shuffle,tensorflow/tensorflow/python/distribute/all_reduce.py,857,function,"Construct hybrid of Shuffle within workers, Shuffle across workers." 2578,AllReduceTest,tensorflow/tensorflow/python/distribute/all_reduce_test.py,38,class, 2579,CentralStorageStrategy,tensorflow/tensorflow/python/distribute/central_storage_strategy.py,28,class,"A one-machine strategy that puts all variables on a single device. Variables are assigned to local CPU or the only GPU. If there is more than one GPU, compute operations (other than variable update operations) will be replicated across all GPUs. For Example: ``` strategy = tf.distribute.experimental.CentralStorageStrategy() # Create a dataset ds = tf.data.Dataset.range(5).batch(2) # Distribute that dataset dist_dataset = strategy.experimental_distribute_dataset(ds) with strategy.scope(): @tf.function def train_step(val): return val + 1 # Iterate over the distributed dataset for x in dist_dataset: # process dataset elements strategy.run(train_step, args=(x,)) ```" 2580,CentralStorageStrategyV1,tensorflow/tensorflow/python/distribute/central_storage_strategy.py,256,class, 2581,_create_checkpoints,tensorflow/tensorflow/python/distribute/checkpoint_utils_test.py,42,function, 2582,CheckpointUtilsWithDistributionStrategyTest,tensorflow/tensorflow/python/distribute/checkpoint_utils_test.py,58,class, 2583,TrainingCheckpointTests,tensorflow/tensorflow/python/distribute/checkpointing_test.py,32,class, 2584,CollectiveAllReduceStrategy,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy.py,48,class,"A distribution strategy for synchronous training on multiple workers. This strategy implements synchronous distributed training across multiple workers, each with potentially multiple GPUs. Similar to `tf.distribute.MirroredStrategy`, it replicates all variables and computations to each local device. The difference is that it uses a distributed collective implementation (e.g. all-reduce), so that multiple workers can work together. You need to launch your program on each worker and configure `cluster_resolver` correctly. For example, if you are using `tf.distribute.cluster_resolver.TFConfigClusterResolver`, each worker needs to have its corresponding `task_type` and `task_id` set in the `TF_CONFIG` environment variable. Your program runs on each worker as-is. Note that collectives require each worker to participate. All `tf.distribute` and non `tf.distribute` API may use collectives internally, e.g. checkpointing and saving since reading a `tf.Variable` with `tf.VariableSynchronization.ON_READ` all-reduces the value. Therefore it's recommended to run exactly the same program on each worker. Dispatching based on `task_type` or `task_id` of the worker is error-prone. `cluster_resolver.num_accelerators()` determines the number of GPUs the strategy uses. If it's zero, the strategy uses the CPU. All workers need to use the same number of devices, otherwise the behavior is undefined. This strategy is not intended for TPU. Use `tf.distribute.experimental.TPUStrategy` instead. __Saving__ You need to save and checkpoint on all workers instead of just one. This is because variables whose synchronization=ON_READ triggers aggregation during saving. It's recommended to save to a different path on each worker to avoid race conditions. Each worker saves the same thing. See [Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#model_saving_and_loading) tutorial for examples. __Known Issues__ * `tf.distribute.cluster_resolver.TFConfigClusterResolver` does not return the correct number of accelerators. The strategy uses all available GPUs if `cluster_resolver` is `tf.distribute.cluster_resolver.TFConfigClusterResolver` or `None`. * In eager mode, the strategy needs to be created before calling any other Tensorflow API." 2585,CollectiveAllReduceStrategyV1,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy.py,153,class, 2586,CollectiveAllReduceExtended,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy.py,176,class,Implementation of CollectiveAllReduceStrategy. 2587,create_test_objects,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy_test.py,59,function, 2588,CollectiveAllReduceStrategyTestBase,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy_test.py,86,class, 2589,DistributedCollectiveAllReduceStrategyTest,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy_test.py,266,class, 2590,DistributedCollectiveAllReduceStrategyTestWithChief,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy_test.py,444,class, 2591,LocalCollectiveAllReduceStrategy,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy_test.py,468,class, 2592,LogicalDeviceTest,tensorflow/tensorflow/python/distribute/collective_all_reduce_strategy_test.py,564,class, 2593,Hints,tensorflow/tensorflow/python/distribute/collective_util.py,25,class,"Hints for collective operations like AllReduce. This can be passed to methods like `tf.distribute.get_replica_context().all_reduce()` to optimize collective operation performance. Note that these are only hints, which may or may not change the actual behavior. Some options only apply to certain strategy and are ignored by others. One common optimization is to break gradients all-reduce into multiple packs so that weight updates can overlap with gradient all-reduce. Example: ```python hints = tf.distribute.experimental.CollectiveHints( bytes_per_pack=50 * 1024 * 1024) grads = tf.distribute.get_replica_context().all_reduce( 'sum', grads, experimental_hints=hints) optimizer.apply_gradients(zip(grads, vars), experimental_aggregate_gradients=False) ```" 2594,DistributionParameter,tensorflow/tensorflow/python/distribute/combinations.py,53,class,"Transforms arguments of type `NamedDistribution`. Convert all arguments of type `NamedDistribution` to the value of their `strategy` property." 2595,ClusterParameters,tensorflow/tensorflow/python/distribute/combinations.py,69,class,"Adds cluster parameters if a `NamedDistribution` has it. It needs to be before DistributionParameter." 2596,NamedGPUCombination,tensorflow/tensorflow/python/distribute/combinations.py,96,class,"Enable tests to request GPU hardware and skip non-GPU combinations. This class expects test_combinations to be generated with `NamedDistribution` wrapping instances of `tf.distribute.Strategy`. Optionally, the `required_gpus` argument is supported. GPU hardware is required, if its value is `True` or > 0. Attributes: GPU_TEST: The environment is considered to have GPU hardware available if the name of the program contains ""test_gpu"" or ""test_xla_gpu""." 2597,GPUCombination,tensorflow/tensorflow/python/distribute/combinations.py,138,class,NamedGPUCombination that passes `tf.distribute.Strategy` to the tests. 2598,NamedTPUCombination,tensorflow/tensorflow/python/distribute/combinations.py,148,class,"Allow to request TPU hardware and skip non-TPU combinations. This class expects test_combinations to be generated with `NamedDistribution` wrapping instances of `tf.distribute.Strategy`. Optionally, the `required_tpus` parameter is supported. TPU hardware is required, if its argument is `True` or > 0. Optionally, the `use_cloud_tpu` parameter is supported. If TPU hardware is required by `required_tpus`, it specifically must be a Cloud TPU (specified with `--tpu`) if `use_cloud_tpu` is `True`. Attributes: TPU_TEST: The environment is considered to have TPU hardware available if the name of the program contains ""test_tpu""." 2599,TPUCombination,tensorflow/tensorflow/python/distribute/combinations.py,210,class,NamedTPUCombination that passes `tf.distribute.Strategy` to the tests. 2600,NamedDistribution,tensorflow/tensorflow/python/distribute/combinations.py,220,class,Wraps a `tf.distribute.Strategy` and adds a name for test titles. 2601,concat,tensorflow/tensorflow/python/distribute/combinations.py,279,function,Concats combinations. 2602,generate,tensorflow/tensorflow/python/distribute/combinations.py,287,function,"Distributed adapter of `framework.combinations_lib.generate`. All tests with distributed strategy should use this one instead of `framework.test_combinations.generate`. This function has support of strategy combinations, GPU/TPU and multi worker support. See `framework.test_combinations_lib.generate` for usage." 2603,main,tensorflow/tensorflow/python/distribute/combinations.py,331,function,Tests must call this main(). 2604,_test_runner,tensorflow/tensorflow/python/distribute/combinations.py,345,function,"Executes the test with the given test_id. This is a simple wrapper around TestRunner to be used with multi_process_runner. Similar to test.main(), but it executes only one test specified by test_id and returns whether the test succeeds. If the test fails, the function prints failures and errors to stdout. Args: test_id: TestCase.id() Returns: A boolean indicates whether the test succeeds." 2605,_multi_worker_test,tensorflow/tensorflow/python/distribute/combinations.py,385,function,"Decorate test_method so that it runs in each worker. We use `multi_process_runner` to simulate multiple workers. Since we run the this function in the main process and all worker processes, this decoration behaves differently in the main process and worker procssses. In the main process, it spawns subprocesses and runs the test on each of them; in a worker process, it executes test in the same way as a normal test, e.g. setUp()/tearDown() are called before/after the test. Args: test_method: a function which must be a test method. Returns: Decorated `test_method`. Note that the decorated function has additional arguments." 2606,_num_total_workers,tensorflow/tensorflow/python/distribute/combinations.py,467,function,Returns the number of workers including the chief. 2607,_multi_worker_session,tensorflow/tensorflow/python/distribute/combinations.py,474,function,"Returns a context manager that enters a session that is configured for the MultiWorkerMirroredStrategy. Args: kwargs: a dict. Keyword arguments passed to the test. Returns: A context manager. If MultiWorkerMirroredStrategy is the one and only one strategy in kwargs and it's in graph mode, it's the seesion that is configured for that strategy. Otherwise, it's a no-op context manager." 2608,ClusterParametersTest,tensorflow/tensorflow/python/distribute/combinations_test.py,33,class, 2609,ClusterParametersShouldFailTest,tensorflow/tensorflow/python/distribute/combinations_test.py,95,class, 2610,CombinationsExpectedFailureTest,tensorflow/tensorflow/python/distribute/combinations_test.py,123,class, 2611,CombinationsOnClassMultiWorkerExpectedFailureTest,tensorflow/tensorflow/python/distribute/combinations_test.py,150,class, 2612,check_destinations,tensorflow/tensorflow/python/distribute/cross_device_ops.py,49,function,"Checks whether `destinations` is not empty. Args: destinations: a `DistributedValues`, variable, or string object. Returns: Boolean which is True if `destinations` is not empty." 2613,validate_destinations,tensorflow/tensorflow/python/distribute/cross_device_ops.py,65,function,Validates the `destination` is one of expected types. 2614,reduce_non_distributed_value,tensorflow/tensorflow/python/distribute/cross_device_ops.py,79,function,Reduce a non-DistributedValue `value` to `destinations`. 2615,_make_tensor_into_per_replica,tensorflow/tensorflow/python/distribute/cross_device_ops.py,108,function,Converts a single tensor into a PerReplica object. 2616,_normalize_value_destination_pairs,tensorflow/tensorflow/python/distribute/cross_device_ops.py,123,function,Converts each tensor into a PerReplica object in the input list. 2617,_validate_value_destination_pairs,tensorflow/tensorflow/python/distribute/cross_device_ops.py,144,function, 2618,get_devices_from,tensorflow/tensorflow/python/distribute/cross_device_ops.py,159,function, 2619,_devices_match,tensorflow/tensorflow/python/distribute/cross_device_ops.py,167,function, 2620,_all_devices_match,tensorflow/tensorflow/python/distribute/cross_device_ops.py,172,function, 2621,simple_broadcast,tensorflow/tensorflow/python/distribute/cross_device_ops.py,181,function,Broadcast `value` to `destinations` using simple copies. 2622,_simple_reduce,tensorflow/tensorflow/python/distribute/cross_device_ops.py,196,function, 2623,CrossDeviceOps,tensorflow/tensorflow/python/distribute/cross_device_ops.py,217,class,Base class for cross-device reduction and broadcasting algorithms. 2624,ReductionToOneDevice,tensorflow/tensorflow/python/distribute/cross_device_ops.py,409,class,"Always do reduction to one device first and then do broadcasting. Batch reduction is done by reduction on each element one by one. ``` mirrored_strategy = tf.distribute.MirroredStrategy( cross_device_ops=tf.distribute.ReductionToOneDevice()) ```" 2625,_group_value_by_device,tensorflow/tensorflow/python/distribute/cross_device_ops.py,457,function,"Group values into sublists by their devices. This grouping is needed to call the all-reduce library because it expects a list of the following form: [[(grad0_gpu0, v0_gpu0), (grad1_gpu0, v1_gpu0), (grad2_gpu0, v2_gpu0) ...], [(grad0_gpu1, v0_gpu1), (grad1_gpu1, v1_gpu1), (grad2_gpu1, v2_gpu1) ...], [(grad0_gpu2, v0_gpu2), (grad1_gpu0, v1_gpu2), (grad2_gpu0, v2_gpu2) ...], ... ] Args: per_replica_values: a list of PerReplica objects. Returns: a list of lists, each sublist has components for its corresponding device of PerReplica objects, paired with a None." 2626,_ungroup_and_make_mirrored,tensorflow/tensorflow/python/distribute/cross_device_ops.py,485,function,"Ungroup results from all-reduce and make Mirrored objects. Each all-reduce result will be divided by the number of destinations before Mirrored objects are created if reduce_op is ""mean"". Args: grouped_reduced: a list of lists, each sublist has components for each device, paired with a None. It is the result from cross_device_utils.aggregate_gradients_using*. destinations: a value to colocate the result with. reduce_op: Indicates how values will be aggregated. Accepted values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`. num_between_graph_workers: number of workers in the between-graph replication. Returns: a list of Mirrored objects." 2627,_ConcatAndSplitPacker,tensorflow/tensorflow/python/distribute/cross_device_ops.py,520,class,Concatenate and split tensors for reduction. 2628,_pack_tensors,tensorflow/tensorflow/python/distribute/cross_device_ops.py,624,function,Pack tensors if specified. 2629,_unpack_tensors,tensorflow/tensorflow/python/distribute/cross_device_ops.py,635,function,Unpack tensors if they are packed before all-reduce. 2630,AllReduceCrossDeviceOps,tensorflow/tensorflow/python/distribute/cross_device_ops.py,642,class,Reduction using all-reduce. 2631,NcclAllReduce,tensorflow/tensorflow/python/distribute/cross_device_ops.py,747,class,Reduction using NCCL all-reduce. 2632,HierarchicalCopyAllReduce,tensorflow/tensorflow/python/distribute/cross_device_ops.py,773,class,"Reduction using hierarchical copy all-reduce. It reduces to one GPU along edges in some hierarchy and broadcasts back to each GPU along the same path. Before performing all-reduce, tensors will be repacked or aggregated for more efficient cross-device transportation. This is a reduction created for Nvidia DGX-1 which assumes GPUs connects like that on DGX-1 machine. If you have different GPU inter-connections, it is likely that it would be slower than `tf.distribute.ReductionToOneDevice`." 2633,MultiWorkerAllReduce,tensorflow/tensorflow/python/distribute/cross_device_ops.py,804,class,All-reduce algorithms for distributed TensorFlow. 2634,CollectiveCommunication,tensorflow/tensorflow/python/distribute/cross_device_ops.py,916,class,"Communication choices for CollectiveOps. * `AUTO`: Default to runtime's automatic choices. * `RING`: TensorFlow's ring algorithms for all-reduce and all-gather. * `NCCL`: Use ncclAllReduce for all-reduce, and ring algorithms for all-gather." 2635,CollectiveAllReduce,tensorflow/tensorflow/python/distribute/cross_device_ops.py,932,class,"All-reduce cross device ops using collective ops. In the between-graph replicated training, it will still do all-reduces across all workers and then put results on the right destinations." 2636,choose_the_best,tensorflow/tensorflow/python/distribute/cross_device_ops.py,1167,function,"Find the best CrossDeviceOps locally given a `tf.compat.v1.ConfigProto`. Args: devices: a list of devices passed to `tf.distribute.Strategy`. session_config: a `tf.compat.v1.ConfigProto` or `None`. If `None`, it will make decision based on all logical devices. Returns: A subclass of `CrossDeviceOps`." 2637,_get_devices,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,54,function, 2638,_make_per_replica,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,64,function, 2639,_fake_mirrored,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,84,function,"Create a faked Mirrored object for testing. All components of the returned Mirrored have the same objects, which is not true in reality." 2640,_make_indexed_slices,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,100,function, 2641,_make_mirrored_indexed_slices,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,109,function, 2642,CrossDeviceOpsTestBase,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,120,class, 2643,SingleWorkerCrossDeviceOpsTest,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,280,class, 2644,MultiWorkerCrossDeviceOpsTest,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,434,class, 2645,CollectiveAllReduceTest,tensorflow/tensorflow/python/distribute/cross_device_ops_test.py,488,class, 2646,aggregate_gradients_using_nccl,tensorflow/tensorflow/python/distribute/cross_device_utils.py,42,function,Aggregate gradients using nccl allreduce. 2647,aggregate_gradients_using_hierarchical_copy,tensorflow/tensorflow/python/distribute/cross_device_utils.py,56,function,"Aggregate gradients using hierarchical copies. Args: avail_devices: available GPU devices. replica_grads: List of lists of (gradient, variable) tuples. The outer list is over replicas. The inner list is over individual gradients. Returns: The list of (aggregated_gradient, variable), where the gradient has been summed across all replicas and the variable is chosen from the first replica." 2648,aggregate_single_gradient_using_copy,tensorflow/tensorflow/python/distribute/cross_device_utils.py,138,function,"Calculate the average gradient for a shared variable across all replicas. Note that this function provides a synchronization point across all replicas. Args: grad_and_vars: A list or tuple of (gradient, variable) tuples. Each (gradient, variable) pair within the outer list represents the gradient of the variable calculated for a single replica, and the number of pairs equals the number of replicas. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all replicas. The variable is chosen from the first replica. The has_nan_or_inf indicates the grads has nan or inf." 2649,group_device_names,tensorflow/tensorflow/python/distribute/cross_device_utils.py,173,function,"Group device names into groups of group_size. Args: devices: a list of canonical device strings. group_size: integer which is equal to or greater than 1. Returns: list of lists of devices, where each inner list is group_size long, and each device appears at least once in an inner list. If len(devices) % group_size == 0 then each device will appear exactly once. Raises: ValueError: if group_size > len(devices)" 2650,split_grads_by_size,tensorflow/tensorflow/python/distribute/cross_device_utils.py,200,function,"Break gradients into two sets according to tensor size. Args: threshold_size: int size cutoff for small vs large tensor. device_grads: List of lists of (gradient, variable) tuples. The outer list is over devices. The inner list is over individual gradients. Returns: small_grads: Subset of device_grads where shape is <= threshold_size elements. large_grads: Subset of device_grads where shape is > threshold_size elements." 2651,CollectiveKeys,tensorflow/tensorflow/python/distribute/cross_device_utils.py,241,class,"Class that manages collective keys. We need to manage three different keys for collective: *Group key*: an integer key to identify the set of cooperative devices. Collective ops work under the same set of devices must using the same group key. *Instance key*: an integer key to identify the set of same counterpart of tensors on different devices in a device group that need to be all-reduced. ""Graph key"": an integer key that is unique key graph. This is used to support multiple graphs per client session. It must be non-zero and set in the `config` argument of each call to `session.run`." 2652,build_collective_reduce,tensorflow/tensorflow/python/distribute/cross_device_utils.py,321,function,"Build a subgraph that does one full all-reduce, using the collective Op. If called in eager mode, it's required to supply a list of async executors for each input Tensor. Args: input_tensors: tensors within a single worker graph that are to be reduced together; must be one per device. devices: a list of device strings to run the collective on. group_size: total number of devices globally that will be doing this same reduction. The reduction will actually include the corresponding tensors at all these workers. collective_keys: a CollectiveKeys object. reduction_op: string naming the reduction op. unary_op: string naming the unary final op. communication_hint: string providing hint to runtime for choosing collective implementation. control_inputs: if not None, add control edges between control_inputs and (index-wise) corresponding collective_reduce tensors executors: a list of async executor. Required for eager execution. Returns: An array of final tensors, one per device, computed by the full reduction. Raises: ValueError: There must be at least two tensors over all the workers." 2653,build_collective_gather,tensorflow/tensorflow/python/distribute/cross_device_utils.py,391,function,"Build a subgraph that does one full all-gather, using the collective Op. This method must be called in graph mode or inside a tf.function. Args: input_tensors: tensors within a single worker graph that are to be gathered together; must be one per device. devices: a list of device strings to run the collective on. group_size: total number of devices globally that will be doing this same gathering. The gathering will actually include the corresponding tensors at all these workers. collective_keys: a CollectiveKeys object. communication_hint: string providing hint to runtime for choosing collective implementation. control_inputs: if not None, add control edges between control_inputs and (index-wise) corresponding collective_gather tensors Returns: An array of final tensors, one per device, computed by the full gather." 2654,build_collective_gather_indexed_slices,tensorflow/tensorflow/python/distribute/cross_device_utils.py,442,function,"Build a subgraph that all-gathers IndexedSlices using the collective Op. This method must be called in graph mode or inside a tf.function. Args: input_slices_list: a list of IndexedSlices within a single worker graph that are to be gathered together; must be one per device. devices: a list of device strings to run the collective on. group_size: total number of devices globally that will be doing this same gathering. The gathering will actually include the corresponding tensors at all these workers. collective_keys: a CollectiveKeys object. communication_hint: string providing hint to runtime for choosing collective implementation. control_inputs: if not None, add control edges between control_inputs and (index-wise) corresponding collective_reduce tensors Returns: An array of final IndexedSlices, one per device, computed by the full gather. Raises: ValueError: if control_inputs is not None and doesn't match the length and devices of inputs." 2655,sum_grad_and_var_all_reduce,tensorflow/tensorflow/python/distribute/cross_device_utils.py,549,function,Apply all-reduce algorithm over specified gradient tensors. 2656,sum_gradients_all_reduce,tensorflow/tensorflow/python/distribute/cross_device_utils.py,590,function,"Apply all-reduce algorithm over specified gradient tensors. Args: dev_prefixes: list of prefix strings to use to generate PS device names. replica_grads: the gradients to reduce. num_workers: number of worker processes across entire job. alg: the all-reduce algorithm to apply. num_shards: alg-specific sharding factor. gpu_indices: indices of local GPUs in order usable for ring-reduce. Returns: list of reduced tensors" 2657,extract_ranges,tensorflow/tensorflow/python/distribute/cross_device_utils.py,632,function,"Extract consecutive ranges and singles from index_list. Args: index_list: List of monotone increasing non-negative integers. range_size_limit: Largest size range to return. If a larger consecutive range exists, it will be returned as multiple ranges. Returns: (ranges, singles) where ranges is a list of [first, last] pairs of consecutive elements in index_list, and singles is all of the other elements, in original order." 2658,pack_range,tensorflow/tensorflow/python/distribute/cross_device_utils.py,672,function,"Form the concatenation of a specified range of gradient tensors. Args: key: Value under which to store meta-data in packing that will be used later to restore the grad_var list structure. packing: Dict holding data describing packed ranges of small tensors. grad_vars: List of (grad, var) pairs for one replica. rng: A pair of integers giving the first, last indices of a consecutive range of tensors to be packed. Returns: A tensor that is the concatenation of all the specified small tensors." 2659,unpack_grad_tuple,tensorflow/tensorflow/python/distribute/cross_device_utils.py,704,function,"Unpack a previously packed collection of gradient tensors. Args: gv: A (grad, var) pair to be unpacked. gpt: A GradPackTuple describing the packing operation that produced gv. Returns: A list of (grad, var) pairs corresponding to the values that were originally packed into gv, maybe following subsequent operations like reduction." 2660,pack_small_tensors,tensorflow/tensorflow/python/distribute/cross_device_utils.py,727,function,"Concatenate small gradient tensors together for reduction. Args: replica_grads: List of lists of (gradient, variable) tuples. max_bytes: Int giving max number of bytes in a tensor that may be considered small. max_group: Int giving max number of small tensors that may be concatenated into one new tensor. Returns: new_replica_grads, packing where new_replica_grads is identical to replica_grads except that all feasible small_tensors have been removed from their places and concatenated into larger tensors that are now in the front of the list for each replica, and packing contains the data necessary to restore the replica_grads structure. Look through the first replica for gradients of the same type (float), and small size, that are all sequential. For each such group, replace by a new tensor that is a flattened concatenation. Note that the corresponding variable will be absent, which doesn't matter because it isn't used during all-reduce. Requires: Every gv_list in replicas must have isomorphic structure including identical tensor sizes and types." 2661,unpack_small_tensors,tensorflow/tensorflow/python/distribute/cross_device_utils.py,783,function,"Undo the structure alterations to replica_grads done by pack_small_tensors. Args: replica_grads: List of List of (grad, var) tuples. packing: A dict generated by pack_small_tensors describing the changes it made to replica_grads. Returns: new_replica_grads: identical to replica_grads except that concatenations of small tensors have been split apart and returned to their original positions, paired with their original variables." 2662,aggregate_tensors_or_indexed_slices,tensorflow/tensorflow/python/distribute/cross_device_utils.py,815,function,Aggregate tensors using `accumulation_fn` and IndexedSlices via concat. 2663,divide_by_n_tensors_or_indexed_slices,tensorflow/tensorflow/python/distribute/cross_device_utils.py,823,function, 2664,copy_tensor_or_indexed_slices_to_device,tensorflow/tensorflow/python/distribute/cross_device_utils.py,832,function, 2665,contains_indexed_slices,tensorflow/tensorflow/python/distribute/cross_device_utils.py,844,function,Check whether the value is `IndexedSlices` or contains `IndexedSlices`. 2666,is_indexed_slices,tensorflow/tensorflow/python/distribute/cross_device_utils.py,856,function, 2667,split_by_sparsity,tensorflow/tensorflow/python/distribute/cross_device_utils.py,863,function,"Split values into dense and sparse values. Args: values: a list of tensors or `PerReplica`s. Returns: Four lists: a list of dense values, a list of their indices in `values` and a list of sparse values, a list of their indices in `values`." 2668,stitch_values,tensorflow/tensorflow/python/distribute/cross_device_utils.py,888,function,"Stitch values together according to their indices. Args: values_and_indices_list: a list of tuples of values and indices indicating the values and positions in the returned list. Returns: a stitched list of values." 2669,per_replica_num_elements,tensorflow/tensorflow/python/distribute/cross_device_utils.py,911,function,"Returns the static number of elements of one replica. Args: per_replica: A PerReplica of Tensor or IndexedSlices. Returns: Number of elements. None if some replica has a different or unknown shape." 2670,pack_by_size,tensorflow/tensorflow/python/distribute/cross_device_utils.py,930,function,"Packs `per_replica_list` into chunks of `bytes_per_pack`. The method preserves the original order of `per_replica_list`. The packing is best effort, each pack could have more or less bytes than `bytes_per_pack`. It only packs values with known shape. Note that, the usage is different from `cross_device_ops._pack_tensors`, this function is intended to work with the ScopeAllocator style batching used in `CollectiveAllReduce`. Args: per_replica_list: A list of PerReplica. bytes_per_pack: Bytes per pack. Returns: A list of packs of PerReplica. All values are packed into one pack if `bytes_per_pack` is zero or any of the value has unknown shape." 2671,_control_input,tensorflow/tensorflow/python/distribute/cross_device_utils.py,972,function,"Returns the `idx`-th item in control_inputs to be used in ops.control_dependencies. This is a helper function for building collective ops. Args: devices: a list of device strings the collective run on. control_inputs: a list or None. idx: the index into `inputs` and `control_inputs`. Returns: A one item list of the `idx`-th element of `control_inputs`, or an empty list if `control_inputs` is None." 2672,IndexedSlicesUtilsTest,tensorflow/tensorflow/python/distribute/cross_device_utils_test.py,36,class, 2673,PackBySizeTest,tensorflow/tensorflow/python/distribute/cross_device_utils_test.py,142,class, 2674,get_dataset_from_tensor_slices,tensorflow/tensorflow/python/distribute/custom_training_loop_gradient_test.py,34,function, 2675,AssertFlattenedMixin,tensorflow/tensorflow/python/distribute/custom_training_loop_gradient_test.py,42,class,Mixin for specialized asserts. 2676,GradientTapeTest,tensorflow/tensorflow/python/distribute/custom_training_loop_gradient_test.py,65,class, 2677,get_dataset_from_tensor_slices,tensorflow/tensorflow/python/distribute/custom_training_loop_input_test.py,43,function, 2678,AssertFlattenedMixin,tensorflow/tensorflow/python/distribute/custom_training_loop_input_test.py,51,class,Mixin for specialized asserts. 2679,InputIterationTest,tensorflow/tensorflow/python/distribute/custom_training_loop_input_test.py,74,class, 2680,canonicalize,tensorflow/tensorflow/python/distribute/device_util.py,27,function,"Canonicalize device string. If d has missing components, the rest would be deduced from the `default` argument or from '/replica:0/task:0/device:CPU:0'. For example: If d = '/cpu:0', default='/job:worker/task:1', it returns '/job:worker/replica:0/task:1/device:CPU:0'. If d = '/cpu:0', default='/job:worker', it returns '/job:worker/replica:0/task:0/device:CPU:0'. If d = '/gpu:0', default=None, it returns '/replica:0/task:0/device:GPU:0'. Note: This uses ""job:localhost"" as the default if executing eagerly. Args: d: a device string or tf.config.LogicalDevice default: a string for default device if d doesn't have all components. Returns: a canonicalized device string." 2681,resolve,tensorflow/tensorflow/python/distribute/device_util.py,79,function,Canonicalize `d` with current device as default. 2682,_FakeNodeDef,tensorflow/tensorflow/python/distribute/device_util.py,84,class,A fake NodeDef for _FakeOperation. 2683,_FakeOperation,tensorflow/tensorflow/python/distribute/device_util.py,94,class,A fake Operation object to pass to device functions. 2684,current,tensorflow/tensorflow/python/distribute/device_util.py,110,function,Return a string (not canonicalized) for the current device. 2685,get_host_for_device,tensorflow/tensorflow/python/distribute/device_util.py,122,function,Returns the corresponding host device for the given device. 2686,local_devices_from_num_gpus,tensorflow/tensorflow/python/distribute/device_util.py,130,function,Returns device strings for local GPUs or CPU. 2687,DeviceUtilTest,tensorflow/tensorflow/python/distribute/device_util_test.py,33,class, 2688,DistributeConfig,tensorflow/tensorflow/python/distribute/distribute_config.py,24,class,"A config tuple for distribution strategies. Attributes: train_distribute: a `DistributionStrategy` object for training. eval_distribute: an optional `DistributionStrategy` object for evaluation. remote_cluster: a dict, `ClusterDef` or `ClusterSpec` object specifying the cluster configurations. If this is given, the `train_and_evaluate` method will be running as a standalone client which connects to the cluster for training." 2689,_TaskType,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,40,class, 2690,CoordinatorMode,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,50,class,Specify how distribute coordinator runs. 2691,_Barrier,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,64,class,A reusable barrier class for worker synchronization. 2692,_get_num_workers,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,95,function,Gets number of workers including chief. 2693,_WorkerContext,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,103,class,"The worker context class. This context object provides configuration information for each task. One context manager with a worker context object will be created per invocation to the `worker_fn` where `get_current_worker_context` can be called to access the worker context object." 2694,_run_single_worker,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,326,function,Runs a single worker by calling `worker_fn` under context. 2695,_split_cluster_for_evaluator,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,363,function,Split the cluster for evaluator since it needn't talk to other tasks. 2696,_run_std_server,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,385,function,Runs a standard server. 2697,_run_between_graph_client,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,454,function,Runs a standalone client for between-graph replication. 2698,_run_in_graph_client,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,497,function,Runs a standalone client for in-graph replication. 2699,_configure_session_config_for_std_servers,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,529,function,"Call strategy's `configure` to mutate the session_config. The session_config is currently needed as default config for a TensorFlow server. In the future, we should be able to remove this method and only pass the session config to a client session." 2700,run_standard_tensorflow_server,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,555,function,"Starts a standard TensorFlow server. This method parses configurations from ""TF_CONFIG"" environment variable and starts a TensorFlow server. The ""TF_CONFIG"" is typically a json string and must have information of the cluster and the role of the server in the cluster. One example is: TF_CONFIG='{ ""cluster"": { ""worker"": [""host1:2222"", ""host2:2222"", ""host3:2222""], ""ps"": [""host4:2222"", ""host5:2222""] }, ""task"": {""type"": ""worker"", ""index"": 1} }' This ""TF_CONFIG"" specifies there are 3 workers and 2 ps tasks in the cluster and the current role is worker 1. Valid task types are ""chief"", ""worker"", ""ps"" and ""evaluator"" and you can have at most one ""chief"" and at most one ""evaluator"". An optional key-value can be specified is ""rpc_layer"". The default value is ""grpc"". Args: session_config: an optional `tf.compat.v1.ConfigProto` object. Users can pass in the session config object to configure server-local devices. Returns: a `tf.distribute.Server` object which has already been started. Raises: ValueError: if the ""TF_CONFIG"" environment is not complete." 2701,run_distribute_coordinator,tensorflow/tensorflow/python/distribute/distribute_coordinator.py,631,function,"Runs the coordinator for distributed TensorFlow. This function runs a split coordinator for distributed TensorFlow in its default mode, i.e the STANDALONE_CLIENT mode. Given a `cluster_spec` specifying server addresses and their roles in a cluster, this coordinator will figure out how to set them up, give the underlying function the right targets for master sessions via a scope object and coordinate their training. The cluster consisting of standard servers needs to be brought up either with the standard server binary or with a binary running distribute coordinator with `task_type` set to non-client type which will then turn into standard servers. In addition to be the distribute coordinator, this is also the source of configurations for each job in the distributed training. As there are multiple ways to configure a distributed TensorFlow cluster, its context object provides these configurations so that users or higher-level APIs don't have to figure out the configuration for each job by themselves. In the between-graph replicated training, this coordinator will create multiple threads and each calls the `worker_fn` which is supposed to create its own graph and connect to one worker master given by its context object. In the in-graph replicated training, it has only one thread calling this `worker_fn`. Another mode is the INDEPENDENT_WORKER mode where each server runs a distribute coordinator which will start a standard server and optionally runs `worker_fn` depending whether it is between-graph training or in-graph replicated training. The `strategy` object is expected to be a DistributionStrategy object which has implemented methods needed by distributed coordinator such as `configure(session_config, cluster_spec, task_type, task_id)` which configures the strategy object for a specific task and `experimental_should_init` property which instructs the distribute coordinator whether to run init ops for a task. The distribute coordinator will make a copy of the `strategy` object, call its `configure` method and pass it to `worker_fn` as an argument. The `worker_fn` defines the training logic and is called under its own worker context which can be accessed to via `get_current_worker_context`. A worker context provides access to configurations for each task, e.g. the task_type, task_id, master target and so on. Since `worker_fn` will be called in a thread and possibly multiple times, caller should be careful when it accesses global data. For example, it is unsafe to define flags in a `worker_fn` or to define different environment variables for different `worker_fn`s. The `worker_fn` for the between-graph replication is defined as if there is only one worker corresponding to the `worker_fn` and possibly ps jobs. For example, when training with parameter servers, it assigns variables to parameter servers and all other operations to that worker. In the in-graph replication case, the `worker_fn` has to define operations for all worker jobs. Using a distribution strategy can simplify the `worker_fn` by not having to worry about the replication and device assignment of variables and operations. This method is intended to be invoked by high-level APIs so that users don't have to explicitly call it to run this coordinator. For those who don't use high-level APIs, to change a program to use this coordinator, wrap everything in a the program after global data definitions such as commandline flag definition into the `worker_fn` and get task-specific configurations from the worker context. The `cluster_spec` can be either passed by the argument or parsed from the ""TF_CONFIG"" environment variable. Example of a TF_CONFIG: ``` cluster = {'chief': ['host0:2222'], 'ps': ['host1:2222', 'host2:2222'], 'worker': ['host3:2222', 'host4:2222', 'host5:2222']} os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster}) ``` If `cluster_spec` is not given in any format, it becomes local training and this coordinator will connect to a local session. For evaluation, if ""evaluator"" exists in the cluster_spec, a separate thread will be created to call `eval_fn` with its `task_type` set to ""evaluator"". If `eval_fn` is not defined, fall back to `worker_fn`. This implies that evaluation will be done on a single machine if there is an ""evaluator"" task. If ""evaluator"" doesn't exist in the cluster_spec, it entirely depends on the `worker_fn` for how to do evaluation. Args: worker_fn: the function to be called. The function should accept a `strategy` object and will be given access to a context object via a context manager scope. strategy: a DistributionStrategy object specifying whether it should run between-graph replicated training or not, whether to run init ops, etc. This object will also be configured given `session_config`, `cluster_spec`, `task_type` and `task_id`. eval_fn: optional function for ""evaluator"" task. If `eval_fn` is not passed in but a ""evaluator"" task is found in the `cluster_spec`, the `worker_fn` will be used for this task. eval_strategy: optional DistributionStrategy object for ""evaluator"" task. mode: in which mode this distribute coordinator runs. cluster_spec: a dict, ClusterDef or ClusterSpec specifying servers and roles in a cluster. If not set or empty, fall back to local training. task_type: the current task type, optional if this is a client. task_id: the current task id, optional if this is a client. session_config: an optional `tf.compat.v1.ConfigProto` object which will be passed to `strategy`'s `configure` method and used to create a session. rpc_layer: optional string, the protocol for RPC, e.g. ""grpc"". Raises: ValueError: if `cluster_spec` is supplied but not a dict or a ClusterDef or a ClusterSpec. Returns: In the client job, return the value returned by `worker_fn` if it is in-graph replication or INDEPENDENT_WORKER mode; return None otherwise." 2702,get_current_worker_context,tensorflow/tensorflow/python/distribute/distribute_coordinator_context.py,26,function,Returns the current task context. 2703,_bytes_to_str,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,70,function, 2704,_strip_protocol,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,77,function, 2705,MockExtended,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,85,class, 2706,MockStrategy,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,98,class, 2707,MockServer,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,141,class, 2708,DistributeCoordinatorTestBase,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,163,class, 2709,DistributeCoordinatorTestStandaloneMode,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,429,class, 2710,DistributeCoordinatorTestIndependentWorkerMode,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,596,class, 2711,StrategyConfigureTest,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,830,class, 2712,RunStandardTensorflowServerTest,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,916,class, 2713,new_init,tensorflow/tensorflow/python/distribute/distribute_coordinator_test.py,954,function, 2714,get_update_replica_id,tensorflow/tensorflow/python/distribute/distribute_lib.py,242,function,Get the current device if in a `tf.distribute.Strategy.update()` call. 2715,UpdateContext,tensorflow/tensorflow/python/distribute/distribute_lib.py,250,class,Context manager when you are in `update()` or `update_non_slot()`. 2716,get_loss_reduction,tensorflow/tensorflow/python/distribute/distribute_lib.py,273,function,"`tf.distribute.ReduceOp` corresponding to the last loss reduction. This is used to decide whether loss should be scaled in optimizer (used only for estimator + v1 optimizer use case). Returns: `tf.distribute.ReduceOp` corresponding to the last loss reduction for estimator and v1 optimizer use case. `tf.distribute.ReduceOp.SUM` otherwise." 2717,_require_cross_replica_or_default_context_extended,tensorflow/tensorflow/python/distribute/distribute_lib.py,298,function,Verify in cross-replica context. 2718,_wrong_strategy_scope,tensorflow/tensorflow/python/distribute/distribute_lib.py,315,function, 2719,require_replica_context,tensorflow/tensorflow/python/distribute/distribute_lib.py,327,function,Verify in `replica_ctx` replica context. 2720,_require_strategy_scope_strategy,tensorflow/tensorflow/python/distribute/distribute_lib.py,342,function,Verify in a `strategy.scope()` in this thread. 2721,_require_strategy_scope_extended,tensorflow/tensorflow/python/distribute/distribute_lib.py,349,function,Verify in a `distribution_strategy.scope()` in this thread. 2722,_CurrentDistributionContext,tensorflow/tensorflow/python/distribute/distribute_lib.py,363,class,"Context manager setting the current `tf.distribute.Strategy`. Also: overrides the variable creator and optionally the current device." 2723,InputReplicationMode,tensorflow/tensorflow/python/distribute/distribute_lib.py,434,class,"Replication mode for input function. * `PER_WORKER`: The input function will be called on each worker independently, creating as many input pipelines as number of workers. Replicas will dequeue from the local Dataset on their worker. `tf.distribute.Strategy` doesn't manage any state sharing between such separate input pipelines." 2724,InputContext,tensorflow/tensorflow/python/distribute/distribute_lib.py,447,class,"A class wrapping information needed by an input function. This is a context class that is passed to the user's input function and contains information about the compute replicas and input pipelines. The number of compute replicas (in sync training) helps compute the local batch size from the desired global batch size for each replica. The input pipeline information can be used to return a different subset of the input in each replica (for e.g. shard the input pipeline, use a different input source etc)." 2725,ValueContext,tensorflow/tensorflow/python/distribute/distribute_lib.py,520,class,"A class wrapping information needed by a distribute function. This is a context class that is passed to the `value_fn` in `strategy.experimental_distribute_values_from_function` and contains information about the compute replicas. The `num_replicas_in_sync` and `replica_id` can be used to customize the value on each replica. Example usage: 1. Directly constructed. >>> def value_fn(context): ... return context.replica_id_in_sync_group/context.num_replicas_in_sync >>> context = tf.distribute.experimental.ValueContext( ... replica_id_in_sync_group=2, num_replicas_in_sync=4) >>> per_replica_value = value_fn(context) >>> per_replica_value 0.5 2. Passed in by `experimental_distribute_values_from_function`. >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> def value_fn(value_context): ... return value_context.num_replicas_in_sync >>> distributed_values = ( ... strategy.experimental_distribute_values_from_function( ... value_fn)) >>> local_result = strategy.experimental_local_results(distributed_values) >>> local_result (2, 2)" 2726,RunOptions,tensorflow/tensorflow/python/distribute/distribute_lib.py,586,class,"Run options for `strategy.run`. This can be used to hold some strategy specific configs. Attributes: experimental_enable_dynamic_batch_size: Boolean. Only applies to TPUStrategy. Default to True. If True, TPUStrategy will enable dynamic padder to support dynamic batch size for the inputs. Otherwise only static shape inputs are allowed. experimental_bucketizing_dynamic_shape: Boolean. Only applies to TPUStrategy. Default to False. If True, TPUStrategy will automatic bucketize inputs passed into `run` if the input shape is dynamic. This is a performance optimization to reduce XLA recompilation, which should not have impact on correctness." 2727,InputOptions,tensorflow/tensorflow/python/distribute/distribute_lib.py,616,class,"Run options for `experimental_distribute_dataset(s_from_function)`. This can be used to hold some strategy specific configs. ```python # Setup TPUStrategy resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') tf.config.experimental_connect_to_cluster(resolver) tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) dataset = tf.data.Dataset.range(16) distributed_dataset_on_host = ( strategy.experimental_distribute_dataset( dataset, tf.distribute.InputOptions( experimental_prefetch_to_device=False))) ``` Attributes: experimental_prefetch_to_device: Boolean. Defaults to True. If True, dataset elements will be prefetched to accelerator device memory. When False, dataset elements are prefetched to host device memory. Must be False when using TPUEmbedding API." 2728,StrategyBase,tensorflow/tensorflow/python/distribute/distribute_lib.py,657,class,"A state & compute distribution policy on a list of devices. See [the guide](https://www.tensorflow.org/guide/distributed_training) for overview and examples. See `tf.distribute.StrategyExtended` and [`tf.distribute`](https://www.tensorflow.org/api_docs/python/tf/distribute) for a glossory of concepts mentioned on this page such as ""per-replica"", _replica_, and _reduce_. In short: * To use it with Keras `compile`/`fit`, [please read](https://www.tensorflow.org/guide/distributed_training#using_tfdistributestrategy_with_keras). * You may pass descendant of `tf.distribute.Strategy` to `tf.estimator.RunConfig` to specify how a `tf.estimator.Estimator` should distribute its computation. See [guide](https://www.tensorflow.org/guide/distributed_training#using_tfdistributestrategy_with_estimator_limited_support). * Otherwise, use `tf.distribute.Strategy.scope` to specify that a strategy should be used when building an executing your model. (This puts you in the ""cross-replica context"" for this strategy, which means the strategy is put in control of things like variable placement.) * If you are writing a custom training loop, you will need to call a few more methods, [see the guide](https://www.tensorflow.org/guide/distributed_training#using_tfdistributestrategy_with_custom_training_loops): * Start by either creating a `tf.data.Dataset` normally or using `tf.distribute.experimental_make_numpy_dataset` to make a dataset out of a `numpy` array. * Use `tf.distribute.Strategy.experimental_distribute_dataset` to convert a `tf.data.Dataset` to something that produces ""per-replica"" values. If you want to manually specify how the dataset should be partitioned across replicas, use `tf.distribute.Strategy.experimental_distribute_datasets_from_function` instead. * Use `tf.distribute.Strategy.run` to run a function once per replica, taking values that may be ""per-replica"" (e.g. from a `tf.distribute.DistributedDataset` object) and returning ""per-replica"" values. This function is executed in ""replica context"", which means each operation is performed separately on each replica. * Finally use a method (such as `tf.distribute.Strategy.reduce`) to convert the resulting ""per-replica"" values into ordinary `Tensor`s. A custom training loop can be as simple as: ``` with my_strategy.scope(): @tf.function def distribute_train_epoch(dataset): def replica_fn(input): # process input and return result return result total_result = 0 for x in dataset: per_replica_result = my_strategy.run(replica_fn, args=(x,)) total_result += my_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_result, axis=None) return total_result dist_dataset = my_strategy.experimental_distribute_dataset(dataset) for _ in range(EPOCHS): train_result = distribute_train_epoch(dist_dataset) ``` This takes an ordinary `dataset` and `replica_fn` and runs it distributed using a particular `tf.distribute.Strategy` named `my_strategy` above. Any variables created in `replica_fn` are created using `my_strategy`'s policy, and library functions called by `replica_fn` can use the `get_replica_context()` API to implement distributed-specific behavior. You can use the `reduce` API to aggregate results across replicas and use this as a return value from one iteration over a `tf.distribute.DistributedDataset`. Or you can use `tf.keras.metrics` (such as loss, accuracy, etc.) to accumulate metrics across steps in a given epoch. See the [custom training loop tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training) for a more detailed example. Note: `tf.distribute.Strategy` currently does not support TensorFlow's partitioned variables (where a single variable is split across multiple devices) at this time." 2729,Strategy,tensorflow/tensorflow/python/distribute/distribute_lib.py,1584,class, 2730,StrategyV1,tensorflow/tensorflow/python/distribute/distribute_lib.py,1828,class,"A list of devices with a state & compute distribution policy. See [the guide](https://www.tensorflow.org/guide/distribute_strategy) for overview and examples. Note: Not all `tf.distribute.Strategy` implementations currently support TensorFlow's partitioned variables (where a single variable is split across multiple devices) at this time." 2731,StrategyExtendedV2,tensorflow/tensorflow/python/distribute/distribute_lib.py,1999,class,"Additional APIs for algorithms that need to be distribution-aware. Note: For most usage of `tf.distribute.Strategy`, there should be no need to call these methods, since TensorFlow libraries (such as optimizers) already call these methods when needed on your behalf. Some common use cases of functions on this page: * _Locality_ `tf.distribute.DistributedValues` can have the same _locality_ as a _distributed variable_, which leads to a mirrored value residing on the same devices as the variable (as opposed to the compute devices). Such values may be passed to a call to `tf.distribute.StrategyExtended.update` to update the value of a variable. You may use `tf.distribute.StrategyExtended.colocate_vars_with` to give a variable the same locality as another variable. You may convert a ""PerReplica"" value to a variable's locality by using `tf.distribute.StrategyExtended.reduce_to` or `tf.distribute.StrategyExtended.batch_reduce_to`. * _How to update a distributed variable_ A distributed variable is variables created on multiple devices. As discussed in the [glossary](https://www.tensorflow.org/api_docs/python/tf/distribute), mirrored variable and SyncOnRead variable are two examples. The standard pattern for updating distributed variables is to: 1. In your function passed to `tf.distribute.Strategy.run`, compute a list of (update, variable) pairs. For example, the update might be a gradient of the loss with respect to the variable. 2. Switch to cross-replica mode by calling `tf.distribute.get_replica_context().merge_call()` with the updates and variables as arguments. 3. Call `tf.distribute.StrategyExtended.reduce_to(VariableAggregation.SUM, t, v)` (for one variable) or `tf.distribute.StrategyExtended.batch_reduce_to` (for a list of variables) to sum the updates. 4. Call `tf.distribute.StrategyExtended.update(v)` for each variable to update its value. Steps 2 through 4 are done automatically by class `tf.keras.optimizers.Optimizer` if you call its `tf.keras.optimizers.Optimizer.apply_gradients` method in a replica context. In fact, a higher-level solution to update a distributed variable is by calling `assign` on the variable as you would do to a regular `tf.Variable`. You can call the method in both _replica context_ and _cross-replica context_. For a _mirrored variable_, calling `assign` in _replica context_ requires you to specify the `aggregation` type in the variable constructor. In that case, the context switching and sync described in steps 2 through 4 are handled for you. If you call `assign` on _mirrored variable_ in _cross-replica context_, you can only assign a single value or assign values from another mirrored variable or a mirrored `tf.distribute.DistributedValues`. For a _SyncOnRead variable_, in _replica context_, you can simply call `assign` on it and no aggregation happens under the hood. In _cross-replica context_, you can only assign a single value to a SyncOnRead variable. One example case is restoring from a checkpoint: if the `aggregation` type of the variable is `tf.VariableAggregation.SUM`, it is assumed that replica values were added before checkpointing, so at the time of restoring, the value is divided by the number of replicas and then assigned to each replica; if the `aggregation` type is `tf.VariableAggregation.MEAN`, the value is assigned to each replica directly." 2732,StrategyExtendedV1,tensorflow/tensorflow/python/distribute/distribute_lib.py,2618,class, 2733,ReplicaContext,tensorflow/tensorflow/python/distribute/distribute_lib.py,2829,class,"A class with a collection of APIs that can be called in a replica context. You can use `tf.distribute.get_replica_context` to get an instance of `ReplicaContext`, which can only be called inside the function passed to `tf.distribute.Strategy.run`. >>> strategy = tf.distribute.MirroredStrategy(['GPU:0', 'GPU:1']) >>> def func(): ... replica_context = tf.distribute.get_replica_context() ... return replica_context.replica_id_in_sync_group >>> strategy.run(func) PerReplica:{ 0: , 1: }" 2734,_batch_reduce_destination,tensorflow/tensorflow/python/distribute/distribute_lib.py,3017,function,Returns the destinations for batch all-reduce. 2735,_DefaultDistributionStrategyV1,tensorflow/tensorflow/python/distribute/distribute_lib.py,3032,class,Default `tf.distribute.Strategy` if none is explicitly selected. 2736,_DefaultDistributionStrategy,tensorflow/tensorflow/python/distribute/distribute_lib.py,3048,class,Default `tf.distribute.Strategy` if none is explicitly selected. 2737,_DefaultDistributionContext,tensorflow/tensorflow/python/distribute/distribute_lib.py,3064,class,Context manager setting the default `tf.distribute.Strategy`. 2738,_DefaultDistributionExtended,tensorflow/tensorflow/python/distribute/distribute_lib.py,3101,class,Implementation of _DefaultDistributionStrategy. 2739,_from_proto_fn,tensorflow/tensorflow/python/distribute/distribute_lib.py,3273,function, 2740,_TestReplicaContext,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,44,class, 2741,_get_test_variable,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,50,function, 2742,_test_input_fn,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,58,function, 2743,_TestStrategy,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,63,class, 2744,_TestExtended,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,69,class, 2745,_assert_in_default_state,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,131,function, 2746,_run_in_and_out_of_scope,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,140,function, 2747,TestStrategyTest,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,159,class, 2748,_TestStrategy2,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,440,class, 2749,_TestExtended2,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,446,class, 2750,DefaultDistributionStrategyTest,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,452,class, 2751,InputContextTest,tensorflow/tensorflow/python/distribute/distribute_lib_test.py,571,class, 2752,regroup,tensorflow/tensorflow/python/distribute/distribute_utils.py,32,function,"Makes a nest per-replica into a nest of PerReplica/Mirrored values. Args: values: Values to regroup wrap_class: Class that `values` be wrapped in. always_wrap: Always wrap the `values` in `wrap_class` even if the values are the same except for DistributeVariable. Returns: Wrapped `values`." 2753,select_replica,tensorflow/tensorflow/python/distribute/distribute_utils.py,127,function,Specialize a nest of regular & per-replica values for one replica. 2754,select_replica_mirrored,tensorflow/tensorflow/python/distribute/distribute_utils.py,143,function,Specialize a nest of regular & mirrored values for one replica. 2755,update_regroup,tensorflow/tensorflow/python/distribute/distribute_utils.py,162,function,"Regroup for an update, with dependencies to ensure all updates execute." 2756,value_container,tensorflow/tensorflow/python/distribute/distribute_utils.py,195,function,"Returns the container that this per-replica `value` belongs to. Args: val: A value returned by `call_for_each_replica()` or a variable created in `scope()`. Returns: A container that `value` belongs to. If value does not belong to any container (including the case of container having been destroyed), returns the value itself." 2757,is_distributed_variable,tensorflow/tensorflow/python/distribute/distribute_utils.py,217,function,Determine if a variable is ds variable or TPU mirrored variable. 2758,_validate_colocate_extended,tensorflow/tensorflow/python/distribute/distribute_utils.py,222,function, 2759,validate_colocate_distributed_variable,tensorflow/tensorflow/python/distribute/distribute_utils.py,231,function, 2760,validate_colocate,tensorflow/tensorflow/python/distribute/distribute_utils.py,239,function, 2761,create_mirrored_variable,tensorflow/tensorflow/python/distribute/distribute_utils.py,248,function, 2762,_nested_value,tensorflow/tensorflow/python/distribute/distribute_utils_test.py,38,function, 2763,RegroupAndSelectDeviceTest,tensorflow/tensorflow/python/distribute/distribute_utils_test.py,42,class, 2764,_get_base_dirpath,tensorflow/tensorflow/python/distribute/distributed_file_utils.py,56,function, 2765,_is_temp_dir,tensorflow/tensorflow/python/distribute/distributed_file_utils.py,61,function, 2766,_get_temp_dir,tensorflow/tensorflow/python/distribute/distributed_file_utils.py,65,function, 2767,write_dirpath,tensorflow/tensorflow/python/distribute/distributed_file_utils.py,74,function,"Returns the writing dir that should be used to save file distributedly. `dirpath` would be created if it doesn't exist. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing dir path that should be used to save with distribution." 2768,remove_temp_dirpath,tensorflow/tensorflow/python/distribute/distributed_file_utils.py,102,function,"Removes the temp path after writing is finished. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used." 2769,write_filepath,tensorflow/tensorflow/python/distribute/distributed_file_utils.py,125,function,"Returns the writing file path to be used to save file distributedly. Directory to contain `filepath` would be created if it doesn't exist. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing filepath that should be used to save file with distribution." 2770,remove_temp_dir_with_filepath,tensorflow/tensorflow/python/distribute/distributed_file_utils.py,142,function,"Removes the temp path for file after writing is finished. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used." 2771,DistributedFileUtilsTest,tensorflow/tensorflow/python/distribute/distributed_file_utils_test.py,25,class, 2772,_ThreadMode,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,41,class, 2773,_CrossReplicaThreadMode,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,49,class, 2774,_InReplicaThreadMode,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,55,class, 2775,_push_per_thread_mode,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,61,function, 2776,_pop_per_thread_mode,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,65,function, 2777,_DefaultReplicaThreadMode,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,69,class,"Type of default value returned by `_get_per_thread_mode()`. Used when the thread-local stack is empty." 2778,_get_per_thread_mode,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,80,function, 2779,get_replica_context,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,92,function,"Returns the current `tf.distribute.ReplicaContext` or `None`. Returns `None` if in a cross-replica context. Note that execution: 1. starts in the default (single-replica) replica context (this function will return the default `ReplicaContext` object); 2. switches to cross-replica context (in which case this will return `None`) when entering a `with tf.distribute.Strategy.scope():` block; 3. switches to a (non-default) replica context inside `strategy.run(fn, ...)`; 4. if `fn` calls `get_replica_context().merge_call(merge_fn, ...)`, then inside `merge_fn` you are back in the cross-replica context (and again this function will return `None`). Most `tf.distribute.Strategy` methods may only be executed in a cross-replica context, in a replica context you should use the API of the `tf.distribute.ReplicaContext` object returned by this method instead. ``` assert tf.distribute.get_replica_context() is not None # default with strategy.scope(): assert tf.distribute.get_replica_context() is None def f(): replica_context = tf.distribute.get_replica_context() # for strategy assert replica_context is not None tf.print(""Replica id: "", replica_context.replica_id_in_sync_group, "" of "", replica_context.num_replicas_in_sync) strategy.run(f) ``` Returns: The current `tf.distribute.ReplicaContext` object when in a replica context scope, else `None`. Within a particular block, exactly one of these two things will be true: * `get_replica_context()` returns non-`None`, or * `tf.distribute.is_cross_replica_context()` returns True." 2780,get_cross_replica_context,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,139,function,"Returns the current tf.distribute.Strategy if in a cross-replica context. DEPRECATED: Please use `in_cross_replica_context()` and `get_strategy()` instead. Returns: Returns the current `tf.distribute.Strategy` object in a cross-replica context, or `None`. Exactly one of `get_replica_context()` and `get_cross_replica_context()` will return `None` in a particular block." 2781,in_cross_replica_context,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,156,function,"Returns `True` if in a cross-replica context. See `tf.distribute.get_replica_context` for details. ``` assert not tf.distribute.in_cross_replica_context() with strategy.scope(): assert tf.distribute.in_cross_replica_context() def f(): assert not tf.distribute.in_cross_replica_context() strategy.run(f) ``` Returns: `True` if in a cross-replica context (`get_replica_context()` returns `None`), or `False` if in a replica context (`get_replica_context()` returns non-`None`)." 2782,get_strategy,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,181,function,"Returns the current `tf.distribute.Strategy` object. Typically only used in a cross-replica context: ``` if tf.distribute.in_cross_replica_context(): strategy = tf.distribute.get_strategy() ... ``` Returns: A `tf.distribute.Strategy` object. Inside a `with strategy.scope()` block, it returns `strategy`, otherwise it returns the default (single-replica) `tf.distribute.Strategy` object." 2783,has_strategy,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,201,function,"Return if there is a current non-default `tf.distribute.Strategy`. ``` assert not tf.distribute.has_strategy() with strategy.scope(): assert tf.distribute.has_strategy() ``` Returns: True if inside a `with strategy.scope():`." 2784,get_strategy_and_replica_context,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,216,function, 2785,experimental_set_strategy,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,222,function,"Set a `tf.distribute.Strategy` as current without `with strategy.scope()`. ``` tf.distribute.experimental_set_strategy(strategy1) f() tf.distribute.experimental_set_strategy(strategy2) g() tf.distribute.experimental_set_strategy(None) h() ``` is equivalent to: ``` with strategy1.scope(): f() with strategy2.scope(): g() h() ``` In general, you should use the `with strategy.scope():` API, but this alternative may be convenient in notebooks where you would have to put each cell in a `with strategy.scope():` block. Note: This should only be called outside of any TensorFlow scope to avoid improper nesting. Args: strategy: A `tf.distribute.Strategy` object or None. Raises: RuntimeError: If called inside a `with strategy.scope():`." 2786,enter_or_assert_strategy,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,275,function, 2787,_assert_strategy,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,302,function, 2788,_get_default_strategy,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,313,function, 2789,_get_default_replica_context,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,332,function, 2790,_get_default_replica_mode,tensorflow/tensorflow/python/distribute/distribution_strategy_context.py,342,function, 2791,_count_ps,tensorflow/tensorflow/python/distribute/estimator_training.py,40,function,Counts the number of parameter servers in cluster_spec. 2792,_count_worker,tensorflow/tensorflow/python/distribute/estimator_training.py,49,function,Counts the number of workers (including chief) in cluster_spec. 2793,_get_global_id,tensorflow/tensorflow/python/distribute/estimator_training.py,59,function,Returns the global id of the given task type in a cluster. 2794,_init_run_config_from_worker_context,tensorflow/tensorflow/python/distribute/estimator_training.py,90,function,Initializes run config from distribute coordinator's worker context. 2795,init_run_config,tensorflow/tensorflow/python/distribute/estimator_training.py,127,function,Initializes RunConfig for distribution strategies. 2796,should_run_distribute_coordinator,tensorflow/tensorflow/python/distribute/estimator_training.py,181,function,Checks the config to see whether to run distribute coordinator. 2797,train_and_evaluate,tensorflow/tensorflow/python/distribute/estimator_training.py,203,function,"Run distribute coordinator for Estimator's `train_and_evaluate`. Args: estimator: An `Estimator` instance to train and evaluate. train_spec: A `TrainSpec` instance to specify the training specification. eval_spec: A `EvalSpec` instance to specify the evaluation and export specification. executor_cls: the evaluation executor class of Estimator. Raises: ValueError: if `distribute_coordinator_mode` is None in RunConfig." 2798,estimator_train,tensorflow/tensorflow/python/distribute/estimator_training.py,295,function,Run distribute coordinator for Estimator's `train` method. 2799,estimator_evaluate,tensorflow/tensorflow/python/distribute/estimator_training.py,344,function,Run distribute coordinator for Estimator's `evaluate` method. 2800,get_distributed_dataset,tensorflow/tensorflow/python/distribute/input_lib.py,61,function,"Returns a distributed dataset from the given tf.data.Dataset instance. This is a common function that is used by all strategies to return a distributed dataset. The distributed dataset instance returned is different depending on if we are in a TF 1 or TF 2 context. The distributed dataset instances returned differ from each other in the APIs supported by each of them. Args: dataset: a tf.data.Dataset instance. input_workers: an InputWorkers object which specifies devices on which iterators should be created. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. split_batch_by: Optional integer. If present, we ""split"" each batch of the dataset by `split_batch_by` value. input_context: `InputContext` for sharding. Only pass this in for between graph multi-worker cases where there is only one `input_worker`. In these cases, we will shard based on the `input_pipeline_id` and `num_input_pipelines` in the `InputContext`. Returns: A distributed dataset instance." 2801,get_distributed_datasets_from_function,tensorflow/tensorflow/python/distribute/input_lib.py,106,function,"Returns a distributed dataset from the given input function. This is a common function that is used by all strategies to return a distributed dataset. The distributed dataset instance returned is different depending on if we are in a TF 1 or TF 2 context. The distributed dataset instances returned differ from each other in the APIs supported by each of them. Args: dataset_fn: a function that returns a tf.data.Dataset instance. input_workers: an InputWorkers object which specifies devices on which iterators should be created. input_contexts: A list of `InputContext` instances to be passed to call(s) to `dataset_fn`. Length and order should match worker order in `worker_device_pairs`. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. Returns: A distributed dataset instance." 2802,DistributedIteratorInterface,tensorflow/tensorflow/python/distribute/input_lib.py,146,class,"An iterator over `tf.distribute.DistributedDataset`. `tf.distribute.DistributedIterator` is the primary mechanism for enumerating elements of a `tf.distribute.DistributedDataset`. It supports the Python Iterator protocol, which means it can be iterated over using a for-loop or by fetching individual elements explicitly via `get_next()`. You can create a `tf.distribute.DistributedIterator` by calling `iter` on a `tf.distribute.DistributedDataset` or creating a python loop over a `tf.distribute.DistributedDataset`. Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input) on distributed input for more examples and caveats." 2803,DistributedDatasetInterface,tensorflow/tensorflow/python/distribute/input_lib.py,258,class,"Represents a dataset distributed among devices and machines. A `tf.distribute.DistributedDataset` could be thought of as a ""distributed"" dataset. When you use `tf.distribute` API to scale training to multiple devices or machines, you also need to distribute the input data, which leads to a `tf.distribute.DistributedDataset` instance, instead of a `tf.data.Dataset` instance in the non-distributed case. In TF 2.x, `tf.distribute.DistributedDataset` objects are Python iterables. Note: `tf.distribute.DistributedDataset` instances are *not* of type `tf.data.Dataset`. It only supports two usages we will mention below: iteration and `element_spec`. We don't support any other APIs to transform or inspect the dataset. There are two APIs to create a `tf.distribute.DistributedDataset` object: `tf.distribute.Strategy.experimental_distribute_dataset(dataset)`and `tf.distribute.Strategy.experimental_distribute_datasets_from_function(dataset_fn)`. *When to use which?* When you have a `tf.data.Dataset` instance, and the regular batch splitting (i.e. re-batch the input `tf.data.Dataset` instance with a new batch size that is equal to the global batch size divided by the number of replicas in sync) and autosharding (i.e. the `tf.data.experimental.AutoShardPolicy` options) work for you, use the former API. Otherwise, if you are *not* using a canonical `tf.data.Dataset` instance, or you would like to customize the batch splitting or sharding, you can wrap these logic in a `dataset_fn` and use the latter API. Both API handles prefetch to device for the user. For more details and examples, follow the links to the APIs. There are two main usages of a `DistributedDataset` object: 1. Iterate over it to generate the input for a single device or multiple devices, which is a `tf.distribute.DistributedValues` instance. To do this, you can: * use a pythonic for-loop construct: >>> global_batch_size = 4 >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(4).batch(global_batch_size) >>> dist_dataset = strategy.experimental_distribute_dataset(dataset) >>> @tf.function ... def train_step(input): ... features, labels = input ... return labels - 0.3 * features >>> for x in dist_dataset: ... # train_step trains the model using the dataset elements ... loss = strategy.run(train_step, args=(x,)) ... print(""Loss is"", loss) Loss is PerReplica:{ 0: tf.Tensor( [[0.7] [0.7]], shape=(2, 1), dtype=float32), 1: tf.Tensor( [[0.7] [0.7]], shape=(2, 1), dtype=float32) } Placing the loop inside a `tf.function` will give a performance boost. However `break` and `return` are currently not supported if the loop is placed inside a `tf.function`. We also don't support placing the loop inside a `tf.function` when using `tf.distribute.experimental.MultiWorkerMirroredStrategy` or `tf.distribute.experimental.TPUStrategy` with multiple workers. * use `__iter__` to create an explicit iterator, which is of type `tf.distribute.DistributedIterator` >>> global_batch_size = 4 >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> train_dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(50).batch(global_batch_size) >>> train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset) >>> @tf.function ... def distributed_train_step(dataset_inputs): ... def train_step(input): ... loss = tf.constant(0.1) ... return loss ... per_replica_losses = strategy.run(train_step, args=(dataset_inputs,)) ... return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,axis=None) >>> EPOCHS = 2 >>> STEPS = 3 >>> for epoch in range(EPOCHS): ... total_loss = 0.0 ... num_batches = 0 ... dist_dataset_iterator = iter(train_dist_dataset) ... for _ in range(STEPS): ... total_loss += distributed_train_step(next(dist_dataset_iterator)) ... num_batches += 1 ... average_train_loss = total_loss / num_batches ... template = (""Epoch {}, Loss: {:.4f}"") ... print (template.format(epoch+1, average_train_loss)) Epoch 1, Loss: 0.2000 Epoch 2, Loss: 0.2000 To achieve a performance improvement, you can also wrap the `strategy.run` call with a `tf.range` inside a `tf.function`. This runs multiple steps in a `tf.function`. Autograph will convert it to a `tf.while_loop` on the worker. However, it is less flexible comparing with running a single step inside `tf.function`. For example, you cannot run things eagerly or arbitrary python code within the steps. 2. Inspect the `tf.TypeSpec` of the data generated by `DistributedDataset`. `tf.distribute.DistributedDataset` generates `tf.distribute.DistributedValues` as input to the devices. If you pass the input to a `tf.function` and would like to specify the shape and type of each Tensor argument to the function, you can pass a `tf.TypeSpec` object to the `input_signature` argument of the `tf.function`. To get the `tf.TypeSpec` of the input, you can use the `element_spec` property of the `tf.distribute.DistributedDataset` or `tf.distribute.DistributedIterator` object. For example: >>> global_batch_size = 4 >>> epochs = 1 >>> steps_per_epoch = 1 >>> mirrored_strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> dataset = tf.data.Dataset.from_tensors(([2.])).repeat(100).batch(global_batch_size) >>> dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset) >>> @tf.function(input_signature=[dist_dataset.element_spec]) ... def train_step(per_replica_inputs): ... def step_fn(inputs): ... return tf.square(inputs) ... return mirrored_strategy.run(step_fn, args=(per_replica_inputs,)) >>> for _ in range(epochs): ... iterator = iter(dist_dataset) ... for _ in range(steps_per_epoch): ... output = train_step(next(iterator)) ... print(output) PerReplica:{ 0: tf.Tensor( [[4.] [4.]], shape=(2, 1), dtype=float32), 1: tf.Tensor( [[4.] [4.]], shape=(2, 1), dtype=float32) } Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input) on distributed input for more examples and caveats." 2804,InputWorkers,tensorflow/tensorflow/python/distribute/input_lib.py,462,class,A 1-to-many mapping from input worker devices to compute devices. 2805,_get_next_as_optional,tensorflow/tensorflow/python/distribute/input_lib.py,502,function,Returns an empty dataset indicator and the next input from the iterator. 2806,_is_statically_shaped,tensorflow/tensorflow/python/distribute/input_lib.py,543,function,"Test if an iterator output is statically shaped. For sparse and ragged tensors this only tests the batch dimension. Args: tensor_class: a class from an iterator.output_classes list. shape: a TensorShape from an iterator.output_shapes list. Returns: True if the shape is static, false otherwise." 2807,_get_static_shape,tensorflow/tensorflow/python/distribute/input_lib.py,567,function,Returns a boolean indicating if the input is fully defined. 2808,DistributedIteratorBase,tensorflow/tensorflow/python/distribute/input_lib.py,583,class,Common implementation for all input iterators. 2809,DistributedIteratorV1,tensorflow/tensorflow/python/distribute/input_lib.py,699,class,Input Iterator for a distributed dataset. 2810,DistributedIteratorSpec,tensorflow/tensorflow/python/distribute/input_lib.py,754,class,Type specification for `DistributedIterator`. 2811,DistributedIterator,tensorflow/tensorflow/python/distribute/input_lib.py,849,class,Input Iterator for a distributed dataset. 2812,_IterableInput,tensorflow/tensorflow/python/distribute/input_lib.py,895,class,Base class for iterable inputs for distribution strategies. 2813,DistributedDataset,tensorflow/tensorflow/python/distribute/input_lib.py,935,class,Distributed dataset that supports prefetching to multiple devices. 2814,DistributedDatasetV1,tensorflow/tensorflow/python/distribute/input_lib.py,1043,class,Distributed dataset that supports prefetching to multiple devices. 2815,DistributedDatasetsFromFunction,tensorflow/tensorflow/python/distribute/input_lib.py,1121,class,Inputs created from dataset function. 2816,DistributedDatasetsFromFunctionV1,tensorflow/tensorflow/python/distribute/input_lib.py,1187,class,Inputs created from dataset function. 2817,InputFunctionIterator,tensorflow/tensorflow/python/distribute/input_lib.py,1229,class,Iterator created from input function. 2818,DatasetIterator,tensorflow/tensorflow/python/distribute/input_lib.py,1278,class,Iterator created from input dataset. 2819,_dummy_tensor_fn,tensorflow/tensorflow/python/distribute/input_lib.py,1319,function,A function to create dummy tensors from `value_structure`. 2820,_recover_shape_fn,tensorflow/tensorflow/python/distribute/input_lib.py,1365,function,Recover the shape of `data` the same as shape of `value_structure`. 2821,_SingleWorkerDatasetIteratorBase,tensorflow/tensorflow/python/distribute/input_lib.py,1393,class,Iterator for a single `tf.data.Dataset`. 2822,_SingleWorkerDatasetIteratorSpec,tensorflow/tensorflow/python/distribute/input_lib.py,1491,class,Type specification for `_SingleWorkerOwnedDatasetIterator`. 2823,_SingleWorkerOwnedDatasetIterator,tensorflow/tensorflow/python/distribute/input_lib.py,1533,class,Iterator for a DistributedDataset instance. 2824,_SingleWorkerDatasetIterator,tensorflow/tensorflow/python/distribute/input_lib.py,1632,class,Iterator for a single DistributedDatasetV1 instance. 2825,_SingleWorkerCallableIterator,tensorflow/tensorflow/python/distribute/input_lib.py,1670,class,Iterator for a single tensor-returning callable. 2826,_create_iterators_per_worker,tensorflow/tensorflow/python/distribute/input_lib.py,1703,function,Create a multidevice iterator on each of the workers. 2827,_create_datasets_per_worker_with_input_context,tensorflow/tensorflow/python/distribute/input_lib.py,1723,function,Create device datasets per worker given a dataset function. 2828,_get_batched_dataset,tensorflow/tensorflow/python/distribute/input_lib.py,1736,function,Get the batched dataset from `d`. 2829,_get_batched_dataset_attributes,tensorflow/tensorflow/python/distribute/input_lib.py,1754,function,"Get `batch_size`, `drop_remainder` of dataset." 2830,_get_dataset_attributes,tensorflow/tensorflow/python/distribute/input_lib.py,1777,function,Get the underlying attributes from the dataset object. 2831,MultiStepContext,tensorflow/tensorflow/python/distribute/input_lib.py,1798,class,"A context object that can be used to capture things when running steps. This context object is useful when running multiple steps at a time using the `experimental_run_steps_on_iterator` API. For e.g. it allows the user's step function to specify which outputs to emit at what frequency. Currently it supports capturing output from the last step, as well as capturing non tensor outputs. In the future it will be augmented to support other use cases such as output each N steps." 2832,_create_distributed_tensor_spec,tensorflow/tensorflow/python/distribute/input_lib.py,1900,function,"Create a `tf.TypeSpec` for a given strategy and input `tensor_spec`. Args: strategy: The given `tf.distribute` strategy. tensor_spec: `tf.TensorSpec` of a given value. The batch dimension of the shape should be None if you have partial batches. Returns: A `tf.TypeSpec` that matches the values produced by a given strategy. This can be a `tf.TensorSpec` or a `PerRelicaSpec`." 2833,_replace_per_replica_spec,tensorflow/tensorflow/python/distribute/input_lib.py,1928,function,"If `spec` is a `PerReplicaSpec`, then return its `i`th value_spec." 2834,DistributedIteratorTestBase,tensorflow/tensorflow/python/distribute/input_lib_test.py,59,class, 2835,DistributedIteratorSingleWorkerTest,tensorflow/tensorflow/python/distribute/input_lib_test.py,278,class, 2836,DistributedIteratorTensorTypeTest,tensorflow/tensorflow/python/distribute/input_lib_test.py,691,class,Tests for DistributedDataset with non-dense tensors. 2837,DistributedIteratorMultiWorkerTest,tensorflow/tensorflow/python/distribute/input_lib_test.py,864,class, 2838,DistributedIteratorTest,tensorflow/tensorflow/python/distribute/input_lib_type_spec_test.py,44,class, 2839,InputTypeSpecTest,tensorflow/tensorflow/python/distribute/input_lib_type_spec_test.py,154,class, 2840,RaggedTensorDistributedIteratorTest,tensorflow/tensorflow/python/distribute/input_lib_type_spec_test.py,252,class, 2841,_check_type_spec_structure,tensorflow/tensorflow/python/distribute/input_lib_type_spec_test.py,430,function,Verifies that `x` has the same structure as its `TypeSpec`. 2842,auto_shard_dataset,tensorflow/tensorflow/python/distribute/input_ops.py,30,function,"Shard the input pipeline by sharding the underlying list of files. Args: dataset: A `tf.data.Dataset` instance, typically the result of a bunch of dataset transformations. num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of shards operating in parallel. Same usage as in `tf.data.Dataset.shard`. index: A `tf.int64` scalar `tf.Tensor`, representing the worker index. Same usage as in `tf.data.Dataset.shard`. Returns: A modified `Dataset` obtained by updating the pipeline sharded by the files. The input dataset will be returned if we cannot automatically determine a good way to shard the input dataset." 2843,_clone_dataset,tensorflow/tensorflow/python/distribute/input_ops.py,56,function,Returns a cloned version of `dataset`. 2844,_get_op_def,tensorflow/tensorflow/python/distribute/input_ops.py,64,function, 2845,_clone_helper,tensorflow/tensorflow/python/distribute/input_ops.py,68,function,"Helper method that recursively clones `op_to_clone`. Args: op_to_clone: The op we want to clone. variant_tensor_ops: A list of ops that we have to clone along the way. Returns: A dictionary mapping old_ops to new_ops created. Includes op_to_clone as a key." 2846,AutoShardDatasetTest,tensorflow/tensorflow/python/distribute/input_ops_test.py,37,class, 2847,_TestDataset,tensorflow/tensorflow/python/distribute/input_ops_test.py,261,class, 2848,CloneDatasetTest,tensorflow/tensorflow/python/distribute/input_ops_test.py,274,class, 2849,_labeled_dataset_fn,tensorflow/tensorflow/python/distribute/metrics_v1_test.py,32,function, 2850,_boolean_dataset_fn,tensorflow/tensorflow/python/distribute/metrics_v1_test.py,43,function, 2851,_threshold_dataset_fn,tensorflow/tensorflow/python/distribute/metrics_v1_test.py,56,function, 2852,_regression_dataset_fn,tensorflow/tensorflow/python/distribute/metrics_v1_test.py,69,function, 2853,all_combinations,tensorflow/tensorflow/python/distribute/metrics_v1_test.py,75,function, 2854,tpu_combinations,tensorflow/tensorflow/python/distribute/metrics_v1_test.py,86,function, 2855,MetricsV1Test,tensorflow/tensorflow/python/distribute/metrics_v1_test.py,97,class, 2856,_replica_id_tensor,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,42,function, 2857,_in_run,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,49,function, 2858,_outside_run_graph,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,54,function, 2859,MirroredFunctionStrategy,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,61,class,"Mirrors vars to distribute across multiple devices and machines. This strategy uses one replica per device and sync replication for its multi-GPU version. Unlike `tf.distribute.MirroredStrategy`, it creates a function for a single replica, and calls that function repeatedly instead of recording the operations for each replica separately." 2860,MirroredFunctionExtended,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,82,class,Implementation of MirroredFunctionStrategy. 2861,FnMergedValue,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,146,class, 2862,_wrap_tensors,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,152,function, 2863,_unwrap_tensors,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,158,function, 2864,MirroredFunctionReplicaContext,tensorflow/tensorflow/python/distribute/mirrored_function_strategy.py,164,class,ReplicaContext used in MirroredFunctionStrategy. 2865,MirroredFunctionStrategyTest,tensorflow/tensorflow/python/distribute/mirrored_function_strategy_test.py,32,class, 2866,call_for_each_replica,tensorflow/tensorflow/python/distribute/mirrored_run.py,45,function,"Call `fn` on each worker devices(replica). It's highly recommended to wrap the call to this function inside a `tf.function`, otherwise the performance is poor. Args: strategy: `tf.distribute.Strategy`. fn: function to call on each worker devices. args: positional arguments to `fn`. kwargs: keyword arguments to `fn`. Returns: Wrapped returned value of `fn` from all replicas." 2867,_enter_graph,tensorflow/tensorflow/python/distribute/mirrored_run.py,104,function,Context manager for selecting a graph and maybe eager mode. 2868,_cpu_device,tensorflow/tensorflow/python/distribute/mirrored_run.py,118,function, 2869,_RequestedStop,tensorflow/tensorflow/python/distribute/mirrored_run.py,124,class, 2870,_call_for_each_replica,tensorflow/tensorflow/python/distribute/mirrored_run.py,128,function,"Run `fn` in separate threads, once per replica/worker device. Args: distribution: the DistributionStrategy object. fn: function to run (will be run once per replica, each in its own thread). args: positional arguments for `fn` kwargs: keyword arguments for `fn`. Returns: Merged return value of `fn` across all replicas. Raises: RuntimeError: If fn() calls get_replica_context().merge_call() a different number of times from the available devices." 2871,_MirroredReplicaThread,tensorflow/tensorflow/python/distribute/mirrored_run.py,242,class,A thread that runs() a function on a device. 2872,_MirroredReplicaContext,tensorflow/tensorflow/python/distribute/mirrored_run.py,361,class,ReplicaContext for synchronized replica. 2873,_is_device_list_single_worker,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,49,function,"Checks whether the devices list is for single or multi-worker. Args: devices: a list of device strings or tf.config.LogicalDevice objects, for either local or for remote devices. Returns: a boolean indicating whether these device strings are for local or for remote. Raises: ValueError: if device strings are not consistent." 2874,_cluster_spec_to_device_list,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,82,function,Returns a device list given a cluster spec. 2875,_group_device_list,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,98,function,"Groups the devices list by task_type and task_id. Args: devices: a list of device strings for remote devices. Returns: a dict of list of device strings mapping from task_type to a list of devices for the task_type in the ascending order of task_id." 2876,_is_gpu_device,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,127,function, 2877,_infer_num_gpus_per_worker,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,131,function,"Infers the number of GPUs on each worker. Currently to make multi-worker cross device ops work, we need all workers to have the same number of GPUs. Args: devices: a list of device strings, can be either local devices or remote devices. Returns: number of GPUs per worker. Raises: ValueError if workers have different number of GPUs or GPU indices are not consecutive and starting from 0." 2878,all_local_devices,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,171,function, 2879,all_devices,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,178,function, 2880,MirroredStrategy,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,188,class,"Synchronous training across multiple replicas on one machine. This strategy is typically used for training on one machine with multiple GPUs. For TPUs, use `tf.distribute.TPUStrategy`. To use `MirroredStrategy` with multiple workers, please refer to `tf.distribute.experimental.MultiWorkerMirroredStrategy`. For example, a variable created under a `MirroredStrategy` is a `MirroredVariable`. If no devices are specified in the constructor argument of the strategy then it will use all the available GPUs. If no GPUs are found, it will use the available CPUs. Note that TensorFlow treats all CPUs on a machine as a single device, and uses threads internally for parallelism. >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> with strategy.scope(): ... x = tf.Variable(1.) >>> x MirroredVariable:{ 0: , 1: } While using distribution strategies, all the variable creation should be done within the strategy's scope. This will replicate the variables across all the replicas and keep them in sync using an all-reduce algorithm. Variables created inside a `MirroredStrategy` which is wrapped with a `tf.function` are still `MirroredVariables`. >>> x = [] >>> @tf.function # Wrap the function with tf.function. ... def create_variable(): ... if not x: ... x.append(tf.Variable(1.)) ... return x[0] >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> with strategy.scope(): ... _ = create_variable() ... print(x[0]) MirroredVariable:{ 0: , 1: } `experimental_distribute_dataset` can be used to distribute the dataset across the replicas when writing your own training loop. If you are using `.fit` and `.compile` methods available in `tf.keras`, then `tf.keras` will handle the distribution for you. For example: ```python my_strategy = tf.distribute.MirroredStrategy() with my_strategy.scope(): @tf.function def distribute_train_epoch(dataset): def replica_fn(input): # process input and return result return result total_result = 0 for x in dataset: per_replica_result = my_strategy.run(replica_fn, args=(x,)) total_result += my_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_result, axis=None) return total_result dist_dataset = my_strategy.experimental_distribute_dataset(dataset) for _ in range(EPOCHS): train_result = distribute_train_epoch(dist_dataset) ``` Args: devices: a list of device strings such as `['/gpu:0', '/gpu:1']`. If `None`, all available GPUs are used. If no GPUs are found, CPU is used. cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not set, `NcclAllReduce()` will be used by default. One would customize this if NCCL isn't available or if a special implementation that exploits the particular hardware is available." 2881,MirroredStrategyV1,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,279,class, 2882,MirroredExtended,tensorflow/tensorflow/python/distribute/mirrored_strategy.py,292,class,Implementation of MirroredStrategy. 2883,MirroredTwoDeviceDistributionTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,74,class, 2884,one_device_combinations,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,278,function, 2885,MirroredOneDeviceDistributionTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,288,class, 2886,MirroredStrategyVariableCreatorStackTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,327,class, 2887,MirroredStrategyCallForEachReplicaTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,371,class, 2888,MirroredStrategyNameScopeTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,505,class, 2889,MirroredThreeDeviceDistributionTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,649,class, 2890,MirroredVariableUpdateTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,671,class, 2891,MirroredAndSyncOnReadVariableInitializerTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,917,class, 2892,SyncOnReadVariableAssignTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,961,class, 2893,MockModel,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1015,class, 2894,MirroredStrategyDefunTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1036,class, 2895,MultiWorkerMirroredStrategyTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1157,class, 2896,RemoteSingleWorkerMirroredStrategyGraph,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1256,class, 2897,MultiWorkerMirroredStrategyTestWithChief,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1279,class, 2898,MirroredVariableStopGradientTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1346,class, 2899,FunctionTest,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1370,class, 2900,_replica_id,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1427,function, 2901,_replica_id_as_int,tensorflow/tensorflow/python/distribute/mirrored_strategy_test.py,1434,function, 2902,_replica_id,tensorflow/tensorflow/python/distribute/mirrored_variable_test.py,43,function, 2903,_mimic_two_cpus,tensorflow/tensorflow/python/distribute/mirrored_variable_test.py,50,function, 2904,MirroredVariableCreationTest,tensorflow/tensorflow/python/distribute/mirrored_variable_test.py,72,class,"Base class that tests mirrored variable creator. Currently it assumes all strategy objects have two replicas." 2905,AssignMovingAveragesTest,tensorflow/tensorflow/python/distribute/moving_averages_test.py,53,class, 2906,ExponentialMovingAverageTest,tensorflow/tensorflow/python/distribute/moving_averages_test.py,188,class, 2907,Process,tensorflow/tensorflow/python/distribute/multi_process_lib.py,35,class,A process simulating a worker for testing multi-worker training. 2908,test_main,tensorflow/tensorflow/python/distribute/multi_process_lib.py,44,function,Main function to be called within `__main__` of a test file. 2909,initialized,tensorflow/tensorflow/python/distribute/multi_process_lib.py,50,function,Returns whether the module is initialized. 2910,MultiProcessRunner,tensorflow/tensorflow/python/distribute/multi_process_runner.py,101,class,"A utility class to start multiple processes to simulate a cluster. We need to use multiple processes to simulate a cluster in TF 2.0 tests because TF 2.0 has some process-global data structures that have to be separated by processes. We also need child processes to test out our fault tolerance because shutting down a standard TensorFlow server within its process is not supported. Note: the main test program that uses this runner class must run main program via `test_main` defined in this file. Using this runner in non-test binaries is not supported yet. This class is not thread-safe. Child processes will inherit TF2 behavior flag." 2911,_Process,tensorflow/tensorflow/python/distribute/multi_process_runner.py,595,class,A modified `multiprocessing.Process` that can set up environment variables. 2912,_ProcFunc,tensorflow/tensorflow/python/distribute/multi_process_runner.py,617,class,Represents a callable to run in a subprocess. 2913,MultiProcessPoolRunner,tensorflow/tensorflow/python/distribute/multi_process_runner.py,730,class,"A utility class to start a process pool to simulate a cluster. It's similar to MultiProcessRunner, but uses a pool of processes to avoid the expensive initialization cost of Tensorflow." 2914,_pool_runner_worker,tensorflow/tensorflow/python/distribute/multi_process_runner.py,845,function,"Function that runs on the workers in a pool. It listens for callables to run and returns the result until `conn` is closed. It captures the exceptions during executing the callable and return it through `conn`. Args: initializer: A callable to execute during startup. conn: A multiprocessing.Connection object to listen for tasks and send results." 2915,_run_contained,tensorflow/tensorflow/python/distribute/multi_process_runner.py,872,function,"Runs `proc_func` with `args` and `kwargs`. The function returns _ProcessStatusInfo which captures the return value and the exception. Args: proc_func: The function to be run. args: Optional positional arguments to be supplied in `proc_func`. kwargs: Optional keyword arguments to be supplied in `proc_func`. Returns: a _ProcessStatusInfo." 2916,SubprocessTimeoutError,tensorflow/tensorflow/python/distribute/multi_process_runner.py,908,class,"An error that indicates there is at least one subprocess timing out. When this is raised, a `MultiProcessRunnerResult` object can be retrieved by `SubprocessTimeoutError`'s mpr_result attribute. See `MultiProcessRunner.join()` for more information." 2917,UnexpectedSubprocessExitError,tensorflow/tensorflow/python/distribute/multi_process_runner.py,921,class,"An error indicating there is at least one subprocess with unexpected exit. When this is raised, a `MultiProcessRunnerResult` object can be retrieved by `UnexpectedSubprocessExitError`'s mpr_result attribute. See `MultiProcessRunner.join()` for more information." 2918,_set_tf_config,tensorflow/tensorflow/python/distribute/multi_process_runner.py,934,function,Set TF_CONFIG environment variable. 2919,run,tensorflow/tensorflow/python/distribute/multi_process_runner.py,948,function,"Runs functions in local child processes. It is a convenience method that creates a `MultiProcessRunner` object and invokes `start` and `join` method. Please see these methods for detailed documentations. Returns: A MultiProcessRunnerResult object returned from `MultiProcessRunner.join()`." 2920,barrier,tensorflow/tensorflow/python/distribute/multi_process_runner.py,985,function, 2921,test_main,tensorflow/tensorflow/python/distribute/multi_process_runner.py,994,function,Main function to be called within `__main__` of a test file. 2922,MultiProcessRunnerNoInitTest,tensorflow/tensorflow/python/distribute/multi_process_runner_no_init_test.py,26,class, 2923,proc_func_that_adds_task_type_in_return_data,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,36,function, 2924,proc_func_that_errors,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,40,function, 2925,proc_func_that_does_nothing,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,44,function, 2926,proc_func_that_adds_simple_return_data,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,48,function, 2927,proc_func_that_returns_args_and_kwargs,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,52,function, 2928,proc_func_with_barrier,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,56,function, 2929,proc_func_that_returns_pid,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,60,function, 2930,proc_func_that_sets_global,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,67,function, 2931,MultiProcessRunnerTest,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,74,class, 2932,MultiProcessPoolRunnerTest,tensorflow/tensorflow/python/distribute/multi_process_runner_test.py,392,class, 2933,MultiWorkerContinuousRunTest,tensorflow/tensorflow/python/distribute/multi_worker_continuous_run_test.py,47,class, 2934,pick_unused_port,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,64,function,Returns an unused and unassigned local port. 2935,_create_cluster,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,83,function,Creates and starts local servers and returns the cluster_spec dict. 2936,create_in_process_cluster,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,148,function,Create an in-process cluster that consists of only standard server. 2937,create_cluster_spec,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,205,function,Create a cluster spec with tasks with unused local ports. 2938,skip_if_grpc_server_cant_be_started,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,233,function, 2939,MultiWorkerTestBase,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,245,class,Base class for testing multi node strategy and dataset. 2940,SingleWorkerTestBaseGraph,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,367,class,Base class for testing remote single worker strategy graph and dataset. 2941,SingleWorkerTestBaseEager,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,375,class,Base class for testing remote single worker strategy eager and dataset. 2942,DummySession,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,387,class, 2943,MockOsEnv,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,396,class,A class that allows per-thread TF_CONFIG. 2944,IndependentWorkerTestBase,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,442,class,Testing infra for independent workers. 2945,MultiWorkerMultiProcessTest,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,548,class,Testing infra for independent workers using multiple processes. 2946,get_tf_config_task,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,638,function, 2947,get_tf_config_cluster_spec,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,642,function, 2948,get_task_type,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,646,function, 2949,get_task_index,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,650,function, 2950,is_chief,tensorflow/tensorflow/python/distribute/multi_worker_test_base.py,654,function, 2951,normalize_cluster_spec,tensorflow/tensorflow/python/distribute/multi_worker_util.py,26,function,"Makes `cluster_spec` into a `ClusterSpec` object. Args: cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. Returns: a `ClusterSpec` object. Raises: ValueError: if `cluster_spec` is not a dict or a `ClusterSpec` or a `ClusterDef`." 2952,_validate_cluster_spec,tensorflow/tensorflow/python/distribute/multi_worker_util.py,50,function,"Validates `cluster_spec`. It checks: 0) None of `cluster_spec`, `task_type`, and `task_id` is `None`. 1) task type is one of ""chief"", ""worker"" or ""evaluator"". 2) whether there is such a task type as `task_type` in the `cluster_spec`. The only exception is `evaluator`. In other words, it is still a valid configuration when `task_type` is `evaluator` but it doesn't appear in `cluster_spec`. This is to be compatible with `TF_CONFIG` in Estimator. 3) whether there is at most one ""chief"" job. 4) whether there is at most one ""evaluator"" job. 5) whether the `task_id` is smaller than the number of tasks for that particular `task_type`. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated. task_type: string indicating the type of the task. task_id: task_id: the id of the `task_type` in this cluster. Throws: ValueError: if `cluster_spec` fails any check." 2953,is_chief,tensorflow/tensorflow/python/distribute/multi_worker_util.py,97,function,"Returns whether the given task is chief in the cluster. Since there is at most one evaluator and the evaluator itself should be independent of the training cluster, the evaluator job is also a chief job on its own. If this is currently running under a `_WorkerContext` of distribute coordinator, the arguments can be omitted as the result is already available. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the cluster configurations. task_type: the task type in the cluster. task_id: the task id in the cluster. Returns: a boolean indicating whether the given task is chief. Raises: ValueError: if `task_type` is not in the `cluster_spec` or `task_id` exceeds the maximum id of the `task_type`." 2954,collective_leader,tensorflow/tensorflow/python/distribute/multi_worker_util.py,137,function,"Return the job name for the leader of for collective ops. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the cluster configurations. task_type: the task type in the cluster. task_id: the task id in the cluster. Returns: a string indicating the leader job name or empty string if no need to set leader job." 2955,worker_count,tensorflow/tensorflow/python/distribute/multi_worker_util.py,171,function,Returns the number of workers in the cluster. 2956,id_in_cluster,tensorflow/tensorflow/python/distribute/multi_worker_util.py,192,function,"Returns a unique id for the task in the `task_type`'s cluster. It returns an id ranging from [0, `worker_count(task_type, task_id)`). Note: this function assumes that ""evaluate"" job is in its own cluster or its own partition of a cluster. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated. task_type: string indicating the type of the task. task_id: the id of the `task_type` in this cluster. Returns: an int indicating the unique id. Throws: ValueError: if `task_type` is not ""chief"", ""worker"" or ""evaluator""." 2957,should_save_checkpoint,tensorflow/tensorflow/python/distribute/multi_worker_util.py,230,function,"Returns whether the current worker should save checkpoints. In multi-worker training, if saving checkpoint is requested by user, or needed for fault-tolerance, the cluster should save checkpoint but not necessarily every worker in the cluster should. TODO(rchao): Consider generalizing this util to be `should_save_file` as there can be other files to save such as summary. Returns: Whether this particular worker in the cluster should save checkpoints." 2958,should_load_checkpoint,tensorflow/tensorflow/python/distribute/multi_worker_util.py,246,function,"Returns whether the current worker should load checkpoints. In multi-worker training, if loading checkpoint is requested by user, or needed for fault-tolerance, the cluster should load checkpoint but not necessarily every worker in the cluster should. Returns: Whether this particular worker in the cluster should load checkpoints." 2959,wait_for_other_workers,tensorflow/tensorflow/python/distribute/multi_worker_util.py,259,function,Waits for other workers to reach the same call to this method. 2960,has_worker_context,tensorflow/tensorflow/python/distribute/multi_worker_util.py,264,function,Returns whether a worker context has been entered. 2961,NormalizeClusterSpecTest,tensorflow/tensorflow/python/distribute/multi_worker_util_test.py,27,class, 2962,IsChiefTest,tensorflow/tensorflow/python/distribute/multi_worker_util_test.py,81,class, 2963,NumWorkersTest,tensorflow/tensorflow/python/distribute/multi_worker_util_test.py,114,class, 2964,IdInClusterTest,tensorflow/tensorflow/python/distribute/multi_worker_util_test.py,152,class, 2965,CollectiveLeaderTest,tensorflow/tensorflow/python/distribute/multi_worker_util_test.py,203,class, 2966,ClusterSpecValidationTest,tensorflow/tensorflow/python/distribute/multi_worker_util_test.py,241,class, 2967,init_var_from_numpy,tensorflow/tensorflow/python/distribute/numpy_dataset.py,32,function,Initialize `input_var` to `numpy_input` using `session` in graph mode. 2968,one_host_numpy_dataset,tensorflow/tensorflow/python/distribute/numpy_dataset.py,76,function,Create a dataset on `colocate_with` from `numpy_input`. 2969,SingleDevice,tensorflow/tensorflow/python/distribute/numpy_dataset.py,94,class,Used with `colocate_with` to create a non-mirrored variable. 2970,InitVarFromNumpyTest,tensorflow/tensorflow/python/distribute/numpy_dataset_test.py,29,class, 2971,OneDeviceStrategy,tensorflow/tensorflow/python/distribute/one_device_strategy.py,41,class,"A distribution strategy for running on a single device. Using this strategy will place any variables created in its scope on the specified device. Input distributed through this strategy will be prefetched to the specified device. Moreover, any functions called via `strategy.run` will also be placed on the specified device as well. Typical usage of this strategy could be testing your code with the tf.distribute.Strategy API before switching to other strategies which actually distribute to multiple devices/machines. For example: ``` strategy = tf.distribute.OneDeviceStrategy(device=""/gpu:0"") with strategy.scope(): v = tf.Variable(1.0) print(v.device) # /job:localhost/replica:0/task:0/device:GPU:0 def step_fn(x): return x * 2 result = 0 for i in range(10): result += strategy.run(step_fn, args=(i,)) print(result) # 90 ```" 2972,OneDeviceStrategyV1,tensorflow/tensorflow/python/distribute/one_device_strategy.py,241,class, 2973,OneDeviceExtended,tensorflow/tensorflow/python/distribute/one_device_strategy.py,255,class,Implementation of OneDeviceStrategy. 2974,_OneDeviceReplicaContext,tensorflow/tensorflow/python/distribute/one_device_strategy.py,458,class,ReplicaContext for OneDeviceStrategy. 2975,OneDeviceStrategyTest,tensorflow/tensorflow/python/distribute/one_device_strategy_test.py,39,class, 2976,OneDeviceStrategyOnRemoteWorkerTest,tensorflow/tensorflow/python/distribute/one_device_strategy_test.py,168,class, 2977,PackedDistributedVariable,tensorflow/tensorflow/python/distribute/packed_distributed_variable.py,28,class,"A variable which packs multiple variables distributed across devices. It's only supported when eager execution is enabled. For op-by-op execution, use an unpacked handle on the current device; for function execution, use the packed handle to reduce the overhead of function calls." 2978,PackedVarAndDevice,tensorflow/tensorflow/python/distribute/packed_distributed_variable.py,247,class,Holds a packed distributed variable and a device. 2979,_tensor_conversion_packed_var_and_device,tensorflow/tensorflow/python/distribute/packed_distributed_variable.py,341,function, 2980,PackedDistributedVariableTest,tensorflow/tensorflow/python/distribute/packed_distributed_variable_test.py,31,class, 2981,ParameterServerStrategy,tensorflow/tensorflow/python/distribute/parameter_server_strategy.py,52,class,"An asynchronous multi-worker parameter server tf.distribute strategy. This strategy requires two roles: workers and parameter servers. Variables and updates to those variables will be assigned to parameter servers and other operations are assigned to workers. When each worker has more than one GPU, operations will be replicated on all GPUs. Even though operations may be replicated, variables are not and each worker shares a common view for which parameter server a variable is assigned to. By default it uses `TFConfigClusterResolver` to detect configurations for multi-worker training. This requires a 'TF_CONFIG' environment variable and the 'TF_CONFIG' must have a cluster spec. This class assumes each worker is running the same code independently, but parameter servers are running a standard server. This means that while each worker will synchronously compute a single gradient update across all GPUs, updates between workers proceed asynchronously. Operations that occur only on the first replica (such as incrementing the global step), will occur on the first replica *of every worker*. It is expected to call `call_for_each_replica(fn, ...)` for any operations which potentially can be replicated across replicas (i.e. multiple GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra caution needs to be taken: 1) It is generally not recommended to open a device scope under the strategy's scope. A device scope (i.e. calling `tf.device`) will be merged with or override the device for operations but will not change the device for variables. 2) It is also not recommended to open a colocation scope (i.e. calling `tf.compat.v1.colocate_with`) under the strategy's scope. For colocating variables, use `strategy.extended.colocate_vars_with` instead. Colocation of ops will possibly create device assignment conflicts. Note: This strategy only works with the Estimator API. Pass an instance of this strategy to the `experimental_distribute` argument when you create the `RunConfig`. This instance of `RunConfig` should then be passed to the `Estimator` instance on which `train_and_evaluate` is called. For Example: ``` strategy = tf.distribute.experimental.ParameterServerStrategy() run_config = tf.estimator.RunConfig( experimental_distribute.train_distribute=strategy) estimator = tf.estimator.Estimator(config=run_config) tf.estimator.train_and_evaluate(estimator,...) ```" 2982,ParameterServerStrategyV1,tensorflow/tensorflow/python/distribute/parameter_server_strategy.py,154,class, 2983,ParameterServerStrategyExtended,tensorflow/tensorflow/python/distribute/parameter_server_strategy.py,172,class,Implementation of ParameterServerStrategy and CentralStorageStrategy. 2984,_get_replica_id_integer,tensorflow/tensorflow/python/distribute/parameter_server_strategy_test.py,66,function, 2985,create_test_objects,tensorflow/tensorflow/python/distribute/parameter_server_strategy_test.py,73,function, 2986,ParameterServerStrategyTestBase,tensorflow/tensorflow/python/distribute/parameter_server_strategy_test.py,102,class, 2987,ParameterServerStrategyTest,tensorflow/tensorflow/python/distribute/parameter_server_strategy_test.py,568,class, 2988,ParameterServerStrategyWithChiefTest,tensorflow/tensorflow/python/distribute/parameter_server_strategy_test.py,823,class, 2989,CentralStorageStrategyTest,tensorflow/tensorflow/python/distribute/parameter_server_strategy_test.py,891,class, 2990,AggregatingVariable,tensorflow/tensorflow/python/distribute/ps_values.py,35,class,A wrapper around a variable that aggregates updates across replicas. 2991,_tensor_conversion_aggregate,tensorflow/tensorflow/python/distribute/ps_values.py,307,function, 2992,AggregatingVariableTest,tensorflow/tensorflow/python/distribute/ps_values_test.py,38,class, 2993,ReduceOp,tensorflow/tensorflow/python/distribute/reduce_util.py,28,class,"Indicates how a set of values should be reduced. * `SUM`: Add all the values. * `MEAN`: Take the arithmetic mean (""average"") of the values." 2994,get_gpus,tensorflow/tensorflow/python/distribute/remote_mirrored_strategy_eager_test.py,29,function, 2995,RemoteSingleWorkerMirroredStrategyEager,tensorflow/tensorflow/python/distribute/remote_mirrored_strategy_eager_test.py,48,class, 2996,ShardedVariable,tensorflow/tensorflow/python/distribute/sharded_variable.py,30,class,"A container for `Variables` that should be treated as shards. Variables that are too large to fit on a single device (e.g., large embeddings) may need to be sharded over multiple devices. This class maintains a list of smaller variables that can be independently stored on separate devices (eg, multiple parameter servers), and saves and restores those variables as if they were a single larger variable. Objects of this class can be saved with a given number of shards and then restored from a checkpoint into a different number of shards. Objects of this class can be saved to SavedModel format using `tf.saved_model.save`. The SavedModel can be used by programs like TF serving APIs. It is not yet supported to load the SavedModel with `tf.saved_model.load`. Since `ShardedVariable` can be saved and then restored to different number of shards depending on the restore environments, for example, TF serving APIs would restore to one shard for serving efficiency, when using `ShardedVariable` in a tf.function, one should generally not assume it has the same number of shards across save and load. Sharding is only supported along the first dimension. >>> class Model(tf.Module): ... def __init__(self): ... self.sharded_variable = ShardedVariable([ ... tf.Variable([3.0], dtype=tf.float32), ... tf.Variable([2.0], dtype=tf.float32) ... ]) ... ... @tf.function(input_signature=[tf.TensorSpec([], dtype=tf.int32)]) ... def fn(self, x): ... return tf.nn.embedding_lookup(self.sharded_variable.variables, x) ... ... @tf.function(input_signature=[tf.TensorSpec([], dtype=tf.int32)]) ... def serve_fn(self, x): ... return tf.nn.embedding_lookup(self.sharded_variable.variables, x) >>> >>> model = Model() >>> model.fn(1).numpy() 2.0 >>> tf.saved_model.save(model, export_dir='/tmp/saved_model', ... signatures=model.serve_fn)" 2997,_load_and_run,tensorflow/tensorflow/python/distribute/sharded_variable_test.py,42,function,Load a SavedModel into a TF 1.x-style graph and run `signature_key`. 2998,ShardedVariableTest,tensorflow/tensorflow/python/distribute/sharded_variable_test.py,63,class, 2999,_canonicalize_variable_name,tensorflow/tensorflow/python/distribute/shared_variable_creator.py,27,function, 3000,make_fn,tensorflow/tensorflow/python/distribute/shared_variable_creator.py,38,function,"Construct the variable creator function for device `device_id`. Constructs custom variable creator functions for the given device. On first device (device_id == 0), it creates the variable using the `next_creator`, and stores it in the provided `shared_variable_store`. On all other devices (device_id > 0), it tries to re-use the variable already created with the same name. If no such variable exists, it throws an error. Additionally, we de-uniquify variable names before checking for matches. This helps re-use variables which are intended to be the same but have different names due to variable uniquification happening upstream. Since this might mean we may have multiple variables with the same canonical name, we store them in a list per canonical name and return them in the same order as well. Args: shared_variable_store: A dictionary that we will use to store variables created on the first device, and re-used by creators for other devices. device_id: Integer index of the device whose creator should be constructed. Returns: An appropriate creator function based on device_id." 3001,CanonicalizeVariableNameTest,tensorflow/tensorflow/python/distribute/shared_variable_creator_test.py,27,class, 3002,SharedVariableCreatorTest,tensorflow/tensorflow/python/distribute/shared_variable_creator_test.py,47,class, 3003,single_loss_example,tensorflow/tensorflow/python/distribute/single_loss_example.py,32,function,Build a very simple network to use in tests and examples. 3004,minimize_loss_example,tensorflow/tensorflow/python/distribute/single_loss_example.py,54,function,Example of non-distribution-aware legacy code. 3005,batchnorm_example,tensorflow/tensorflow/python/distribute/single_loss_example.py,82,function,Example of non-distribution-aware legacy code with batch normalization. 3006,Step,tensorflow/tensorflow/python/distribute/step_fn.py,25,class,Interface for performing each step of a training algorithm. 3007,StandardInputStep,tensorflow/tensorflow/python/distribute/step_fn.py,45,class,"Step with a standard implementation of input handling. Args: dataset_fn: a function that returns a tf.data Dataset that produces the input for the model." 3008,StandardSingleLossStep,tensorflow/tensorflow/python/distribute/step_fn.py,61,class,"A step function that implements a training step for a feed forward network. An instance of this class is intended to be used as a callable: ```python ... step = step_fn.StandardSingleLossStep( dataset, loss_fn, optimizer, distribution) # Run a single training step on a given DistributionStrategy: step(distribution) ... ``` Args: dataset_fn: a function that returns a tf.data Dataset that produces the input for the model. loss_fn: a function that takes a context and inputs as arguments. It returns the loss for those inputs. `context` is an instance of `values.MultiStepContext` that will be passed when `loss_fn` is run. `context` can be used to specify the outputs to be returned from `loss_fn`, among other things. optimizer: an optimizer that implements an update rule. distribution: a `DistributionStrategy` object." 3009,_get_tpu_strategy_creator,tensorflow/tensorflow/python/distribute/strategy_combinations.py,47,function, 3010,_get_multi_worker_mirrored_creator,tensorflow/tensorflow/python/distribute/strategy_combinations.py,95,function, 3011,_shutdown_at_exit,tensorflow/tensorflow/python/distribute/strategy_combinations.py,234,function, 3012,set_virtual_cpus_to_at_least,tensorflow/tensorflow/python/distribute/strategy_combinations.py,253,function,Create virtual CPU devices if they haven't yet been created. 3013,strategy_minus_tpu_combinations,tensorflow/tensorflow/python/distribute/strategy_combinations.py,332,function, 3014,tpu_strategy_combinations,tensorflow/tensorflow/python/distribute/strategy_combinations.py,337,function, 3015,all_strategy_combinations,tensorflow/tensorflow/python/distribute/strategy_combinations.py,341,function, 3016,all_strategy_minus_default_and_tpu_combinations,tensorflow/tensorflow/python/distribute/strategy_combinations.py,345,function, 3017,all_strategy_combinations_minus_default,tensorflow/tensorflow/python/distribute/strategy_combinations.py,354,function, 3018,VirtualDevicesTest,tensorflow/tensorflow/python/distribute/strategy_combinations_test.py,34,class, 3019,StrategyCombinationsTest,tensorflow/tensorflow/python/distribute/strategy_combinations_test.py,69,class, 3020,StrategyReduceTest,tensorflow/tensorflow/python/distribute/strategy_common_test.py,39,class, 3021,DistributedCollectiveAllReduceStrategyTest,tensorflow/tensorflow/python/distribute/strategy_common_test.py,80,class, 3022,StrategyClusterResolverTest,tensorflow/tensorflow/python/distribute/strategy_common_test.py,195,class, 3023,StrategyReduceTest,tensorflow/tensorflow/python/distribute/strategy_reduce_test.py,31,class, 3024,_TestException,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,58,class, 3025,_maybe_run_in_function,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,64,function, 3026,_raise_exception_fn,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,73,function, 3027,_merge_raises_fn,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,79,function, 3028,_call_raises_fn,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,86,function, 3029,_merge_call_raises_fn,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,93,function, 3030,_call_merge_raises_fn,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,100,function, 3031,_merge_call_merge_raises_fn,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,108,function, 3032,_events_from_logdir,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,112,function,Reads summary events from log directory. 3033,create_variable_like_keras_layer,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,126,function,Utitlity for create variables that works like variable in keras layer. 3034,is_optimizer_v2_instance,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,134,function, 3035,DistributionTestBase,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,141,class,Some tests that should work with any DistributionStrategy. 3036,OneDeviceDistributionTestBase,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,484,class,Some tests that should work with any one-device DistributionStrategy. 3037,TwoDeviceDistributionTestBase,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,605,class,Some tests that should work with any two-device DistributionStrategy. 3038,RemoteSingleWorkerMirroredStrategyBase,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,721,class,Tests for a Remote single worker. 3039,_all_sum,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,796,function, 3040,_all_mean,tensorflow/tensorflow/python/distribute/strategy_test_lib.py,801,function, 3041,skip_summary,tensorflow/tensorflow/python/distribute/summary_op_util.py,27,function,"Determines if summary should be skipped. If using multiple replicas in distributed strategy, skip summaries on all replicas except the first one (replica_id=0). Returns: True if the summary is skipped; False otherwise." 3042,gather,tensorflow/tensorflow/python/distribute/test_util.py,33,function,"Gathers value from all workers. This is intended for tests before we implement an official all-gather API. Args: strategy: a `tf.distribute.Strategy`. value: a nested structure of n-dim `tf.distribute.DistributedValue` of `tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica. Cannot contain tf.sparse.SparseTensor. Returns: a (n+1)-dim `tf.Tensor`." 3043,_gather,tensorflow/tensorflow/python/distribute/test_util.py,50,function,Gathers a single value. 3044,GatherTest,tensorflow/tensorflow/python/distribute/test_util_test.py,40,class, 3045,TFFunctionTest,tensorflow/tensorflow/python/distribute/tf_function_test.py,39,class, 3046,maybe_init_scope,tensorflow/tensorflow/python/distribute/tpu_strategy.py,71,function, 3047,validate_run_function,tensorflow/tensorflow/python/distribute/tpu_strategy.py,79,function,Validate the function passed into strategy.run. 3048,TPUStrategyV2,tensorflow/tensorflow/python/distribute/tpu_strategy.py,105,class,"Synchronous training on TPUs and TPU Pods. To construct a TPUStrategy object, you need to run the initialization code as below: >>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') >>> tf.config.experimental_connect_to_cluster(resolver) >>> tf.tpu.experimental.initialize_tpu_system(resolver) >>> strategy = tf.distribute.TPUStrategy(resolver) While using distribution strategies, the variables created within the strategy's scope will be replicated across all the replicas and can be kept in sync using all-reduce algorithms. To run TF2 programs on TPUs, you can either use `.compile` and `.fit` APIs in `tf.keras` with TPUStrategy, or write your own customized training loop by calling `strategy.run` directly. Note that TPUStrategy doesn't support pure eager execution, so please make sure the function passed into `strategy.run` is a `tf.function` or `strategy.run` is called inside a `tf.function` if eager behavior is enabled. See more details in https://www.tensorflow.org/guide/tpu. `experimental_distribute_datasets_from_function` and `experimental_distribute_dataset` APIs can be used to distribute the dataset across the TPU workers when writing your own training loop. If you are using `fit` and `compile` methods available in `tf.keras.Model`, then Keras will handle the distribution for you. An example of writing customized training loop on TPUs: >>> with strategy.scope(): ... model = tf.keras.Sequential([ ... tf.keras.layers.Dense(2, input_shape=(5,)), ... ]) ... optimizer = tf.keras.optimizers.SGD(learning_rate=0.1) >>> def dataset_fn(ctx): ... x = np.random.random((2, 5)).astype(np.float32) ... y = np.random.randint(2, size=(2, 1)) ... dataset = tf.data.Dataset.from_tensor_slices((x, y)) ... return dataset.repeat().batch(1, drop_remainder=True) >>> dist_dataset = strategy.experimental_distribute_datasets_from_function( ... dataset_fn) >>> iterator = iter(dist_dataset) >>> @tf.function() ... def train_step(iterator): ... ... def step_fn(inputs): ... features, labels = inputs ... with tf.GradientTape() as tape: ... logits = model(features, training=True) ... loss = tf.keras.losses.sparse_categorical_crossentropy( ... labels, logits) ... ... grads = tape.gradient(loss, model.trainable_variables) ... optimizer.apply_gradients(zip(grads, model.trainable_variables)) ... ... strategy.run(step_fn, args=(next(iterator),)) >>> train_step(iterator) For the advanced use cases like model parallelism, you can set `experimental_device_assignment` argument when creating TPUStrategy to specify number of replicas and number of logical devices. Below is an example to initialize TPU system with 2 logical devices and 1 replica. >>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') >>> tf.config.experimental_connect_to_cluster(resolver) >>> topology = tf.tpu.experimental.initialize_tpu_system(resolver) >>> device_assignment = tf.tpu.experimental.DeviceAssignment.build( ... topology, ... computation_shape=[1, 1, 1, 2], ... num_replicas=1) >>> strategy = tf.distribute.TPUStrategy( ... resolver, experimental_device_assignment=device_assignment) Then you can run a `tf.add` operation only on logical device 0. >>> @tf.function() ... def step_fn(inputs): ... features, _ = inputs ... output = tf.add(features, features) ... ... # Add operation will be executed on logical device 0. ... output = strategy.experimental_assign_to_logical_device(output, 0) ... return output >>> dist_dataset = strategy.experimental_distribute_datasets_from_function( ... dataset_fn) >>> iterator = iter(dist_dataset) >>> strategy.run(step_fn, args=(next(iterator),))" 3049,TPUStrategy,tensorflow/tensorflow/python/distribute/tpu_strategy.py,284,class,"Synchronous training on TPUs and TPU Pods. To construct a TPUStrategy object, you need to run the initialization code as below: >>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') >>> tf.config.experimental_connect_to_cluster(resolver) >>> tf.tpu.experimental.initialize_tpu_system(resolver) >>> strategy = tf.distribute.experimental.TPUStrategy(resolver) While using distribution strategies, the variables created within the strategy's scope will be replicated across all the replicas and can be kept in sync using all-reduce algorithms. To run TF2 programs on TPUs, you can either use `.compile` and `.fit` APIs in `tf.keras` with TPUStrategy, or write your own customized training loop by calling `strategy.run` directly. Note that TPUStrategy doesn't support pure eager execution, so please make sure the function passed into `strategy.run` is a `tf.function` or `strategy.run` is called inside a `tf.function` if eager behavior is enabled." 3050,TPUStrategyV1,tensorflow/tensorflow/python/distribute/tpu_strategy.py,362,class,TPU distribution strategy implementation. 3051,TPUExtended,tensorflow/tensorflow/python/distribute/tpu_strategy.py,462,class,Implementation of TPUStrategy. 3052,_TPUReplicaContext,tensorflow/tensorflow/python/distribute/tpu_strategy.py,1200,class,Replication Context class for TPU Strategy. 3053,_set_last_step_outputs,tensorflow/tensorflow/python/distribute/tpu_strategy.py,1228,function,Sets the last step outputs on the given context. 3054,get_tpu_cluster_resolver,tensorflow/tensorflow/python/distribute/tpu_strategy_compilation_test.py,36,function, 3055,get_tpu_strategy,tensorflow/tensorflow/python/distribute/tpu_strategy_compilation_test.py,45,function, 3056,TPUStrategyCompilationTest,tensorflow/tensorflow/python/distribute/tpu_strategy_compilation_test.py,55,class, 3057,get_tpu_cluster_resolver,tensorflow/tensorflow/python/distribute/tpu_strategy_test.py,70,function, 3058,get_tpu_strategy,tensorflow/tensorflow/python/distribute/tpu_strategy_test.py,79,function, 3059,TPUTest,tensorflow/tensorflow/python/distribute/tpu_strategy_test.py,89,class, 3060,TPUStrategyTest,tensorflow/tensorflow/python/distribute/tpu_strategy_test.py,146,class, 3061,TPUStrategyDataPrefetchTest,tensorflow/tensorflow/python/distribute/tpu_strategy_test.py,652,class, 3062,TPUStrategyDistributionTest,tensorflow/tensorflow/python/distribute/tpu_strategy_test.py,752,class, 3063,DeviceAssignmentTest,tensorflow/tensorflow/python/distribute/tpu_strategy_test.py,972,class, 3064,_maybe_enter_graph,tensorflow/tensorflow/python/distribute/tpu_values.py,39,function, 3065,_maybe_on_device,tensorflow/tensorflow/python/distribute/tpu_values.py,51,function, 3066,_make_raw_assign_fn,tensorflow/tensorflow/python/distribute/tpu_values.py,60,function, 3067,TPUVariableMixin,tensorflow/tensorflow/python/distribute/tpu_values.py,77,class,Mixin for TPU variables. 3068,enclosing_tpu_context,tensorflow/tensorflow/python/distribute/tpu_values.py,183,function,"Returns the TPUReplicateContext, which exists inside a tpu.rewrite()." 3069,TPUMirroredVariable,tensorflow/tensorflow/python/distribute/tpu_values.py,200,class,Holds a map from replica to TPU variables whose values are kept in sync. 3070,TPUSyncOnReadVariable,tensorflow/tensorflow/python/distribute/tpu_values.py,287,class,Holds a map from replica to variables whose values are reduced on save. 3071,_on_write_update_replica,tensorflow/tensorflow/python/distribute/values.py,45,function,Updates variables with ON_WRITE synchronization in replica context. 3072,DistributedValues,tensorflow/tensorflow/python/distribute/values.py,76,class,"Base class for representing distributed values. A subclass instance of `tf.distribute.DistributedValues` is created when creating variables within a distribution strategy, iterating a `tf.distribute.DistributedDataset` or through `tf.distribute.Strategy.run`. This base class should never be instantiated directly. `tf.distribute.DistributedValues` contains a value per replica. Depending on the subclass, the values could either be synced on update, synced on demand, or never synced. `tf.distribute.DistributedValues` can be reduced to obtain single value across replicas, as input into `tf.distribute.Strategy.run` or the per-replica values inspected using `tf.distribute.Strategy.experimental_local_results`. Example usage: 1. Created from a `tf.distribute.DistributedDataset`: >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2) >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset)) >>> distributed_values = next(dataset_iterator) 2. Returned by `run`: >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> @tf.function ... def run(): ... ctx = tf.distribute.get_replica_context() ... return ctx.replica_id_in_sync_group >>> distributed_values = strategy.run(run) 3. As input into `run`: >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2) >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset)) >>> distributed_values = next(dataset_iterator) >>> @tf.function ... def run(input): ... return input + 1.0 >>> updated_value = strategy.run(run, args=(distributed_values,)) 4. Reduce value: >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2) >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset)) >>> distributed_values = next(dataset_iterator) >>> reduced_value = strategy.reduce(tf.distribute.ReduceOp.SUM, ... distributed_values, ... axis = 0) 5. Inspect local replica values: >>> strategy = tf.distribute.MirroredStrategy([""GPU:0"", ""GPU:1""]) >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2) >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset)) >>> per_replica_values = strategy.experimental_local_results( ... distributed_values) >>> per_replica_values (, )" 3073,DistributedDelegate,tensorflow/tensorflow/python/distribute/values.py,202,class,A map from device to values; acts as the same type as the values. 3074,PerReplica,tensorflow/tensorflow/python/distribute/values.py,361,class,Holds a map from replica to unsynchronized values. 3075,PerReplicaSpec,tensorflow/tensorflow/python/distribute/values.py,375,class,Type specification for a `PerReplica`. 3076,Mirrored,tensorflow/tensorflow/python/distribute/values.py,407,class,Holds a map from replica to values which are kept in sync. 3077,DistributedVarOp,tensorflow/tensorflow/python/distribute/values.py,421,class,A class that looks like `tf.Operation`. 3078,DistributedVariable,tensorflow/tensorflow/python/distribute/values.py,440,class,Holds a map from replica to variables. 3079,_DistributedVariableSaveable,tensorflow/tensorflow/python/distribute/values.py,874,class,Class for defining how to restore a DistributedVariable. 3080,_MirroredSaveable,tensorflow/tensorflow/python/distribute/values.py,893,class,Class for defining how to restore a MirroredVariable. 3081,MirroredVariable,tensorflow/tensorflow/python/distribute/values.py,915,class,Holds a map from replica to variables whose values are kept in sync. 3082,_SyncOnReadSaveable,tensorflow/tensorflow/python/distribute/values.py,980,class,Class for defining how to restore a SyncOnReadVariable. 3083,SyncOnReadVariable,tensorflow/tensorflow/python/distribute/values.py,1016,class,Holds a map from replica to variables whose values are reduced on save. 3084,_tensor_conversion_distributed_var,tensorflow/tensorflow/python/distribute/values.py,1131,function, 3085,_tensor_conversion_mirrored,tensorflow/tensorflow/python/distribute/values.py,1141,function, 3086,_tensor_conversion_mirrored_val,tensorflow/tensorflow/python/distribute/values.py,1150,function, 3087,_tensor_conversion_sync_on_read,tensorflow/tensorflow/python/distribute/values.py,1160,function, 3088,VariablePolicy,tensorflow/tensorflow/python/distribute/values.py,1168,class,"Policy defining synchronization and aggregation of a distributed variable. Given `synchronization` and `aggregation` parameters set on a `tf.Variable` during variable creation within `tf.distribute` scope, `tf.distribute` creates an appropriate policy object and assigns it to the distributed variable. All variable operations are delegated to the respective policy object." 3089,OnReadPolicy,tensorflow/tensorflow/python/distribute/values.py,1201,class,"Policy defined for `tf.VariableSynchronization.ON_READ` synchronization. This policy is created when `synchronization` is set to `tf.VariableSynchronization.ON_READ` and `aggregation` is set to any of the values allowed by the `tf.VariableAggregation` enum such as `NONE`, `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`when creating a `tf.Variable` in `tf.distribute` scope." 3090,AutoPolicy,tensorflow/tensorflow/python/distribute/values.py,1340,class,"Policy defined for `tf.VariableSynchronization.AUTO` synchronization. This policy is created when `synchronization` is set to `tf.VariableSynchronization.AUTO` and `aggregation` is set to `tf.VariableAggregation.NONE` when creating a `tf.Variable` in `tf.distribute` scope." 3091,OnWritePolicy,tensorflow/tensorflow/python/distribute/values.py,1432,class,"Policy defined for `tf.VariableSynchronization.ON_WRITE` synchronization. This policy is created when the following `synchronization` and `aggregation` parameters are specified when creating a `tf.Variable` in `tf.distribute` scope: * `synchronization` is equal to `tf.VariableSynchronization.AUTO` and aggregation can be any of the following `tf.VariableAggregation` enum values such as `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`. * `synchronization` is equal to `tf.VariableSynchronization.ON_WRITE` and aggregation can be any of the following `tf.VariableAggregation` enum values such as `NONE`, `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`." 3092,_is_mirrored,tensorflow/tensorflow/python/distribute/values.py,1453,function, 3093,_is_sync_on_read,tensorflow/tensorflow/python/distribute/values.py,1460,function, 3094,_in_update_replica,tensorflow/tensorflow/python/distribute/values.py,1467,function, 3095,DistributedValuesTest,tensorflow/tensorflow/python/distribute/values_test.py,67,class, 3096,DistributedDelegateTest,tensorflow/tensorflow/python/distribute/values_test.py,293,class, 3097,_device_str,tensorflow/tensorflow/python/distribute/values_test.py,366,function, 3098,_nested_value,tensorflow/tensorflow/python/distribute/values_test.py,370,function, 3099,_make_mirrored_val,tensorflow/tensorflow/python/distribute/values_test.py,374,function, 3100,_make_mirrored,tensorflow/tensorflow/python/distribute/values_test.py,383,function, 3101,mirrored_and_tpu_strategy_combinations,tensorflow/tensorflow/python/distribute/values_test.py,395,function, 3102,DistributedVariableTest,tensorflow/tensorflow/python/distribute/values_test.py,424,class, 3103,PackedDistributedVariableTest,tensorflow/tensorflow/python/distribute/values_test.py,594,class, 3104,MirroredVariableTest,tensorflow/tensorflow/python/distribute/values_test.py,633,class, 3105,_make_replica_local,tensorflow/tensorflow/python/distribute/values_test.py,1304,function, 3106,SyncOnReadVariablePropertiesTest,tensorflow/tensorflow/python/distribute/values_test.py,1324,class, 3107,strategy_and_run_tf_function_combinations,tensorflow/tensorflow/python/distribute/values_test.py,1358,function, 3108,SyncOnReadVariableTest,tensorflow/tensorflow/python/distribute/values_test.py,1375,class, 3109,SyncOnReadScatterReplicaTest,tensorflow/tensorflow/python/distribute/values_test.py,1907,class, 3110,MirroredTest,tensorflow/tensorflow/python/distribute/values_test.py,2036,class, 3111,PerReplicaTest,tensorflow/tensorflow/python/distribute/values_test.py,2053,class, 3112,_make_index_slices,tensorflow/tensorflow/python/distribute/values_test.py,2166,function, 3113,on_write_assign,tensorflow/tensorflow/python/distribute/values_util.py,31,function, 3114,on_write_assign_add,tensorflow/tensorflow/python/distribute/values_util.py,41,function, 3115,on_write_assign_sub,tensorflow/tensorflow/python/distribute/values_util.py,52,function, 3116,assign_on_each_device,tensorflow/tensorflow/python/distribute/values_util.py,63,function,Update the variable on each replica with the given assign_func and value. 3117,on_read_assign_sub_cross_replica,tensorflow/tensorflow/python/distribute/values_util.py,78,function, 3118,on_read_assign_add_cross_replica,tensorflow/tensorflow/python/distribute/values_util.py,90,function, 3119,on_read_assign_cross_replica,tensorflow/tensorflow/python/distribute/values_util.py,102,function,Return the value of the variable in cross replica context. 3120,scatter_sub,tensorflow/tensorflow/python/distribute/values_util.py,118,function, 3121,scatter_add,tensorflow/tensorflow/python/distribute/values_util.py,127,function, 3122,scatter_mul,tensorflow/tensorflow/python/distribute/values_util.py,136,function, 3123,scatter_div,tensorflow/tensorflow/python/distribute/values_util.py,145,function, 3124,scatter_min,tensorflow/tensorflow/python/distribute/values_util.py,154,function, 3125,scatter_max,tensorflow/tensorflow/python/distribute/values_util.py,163,function, 3126,scatter_update,tensorflow/tensorflow/python/distribute/values_util.py,172,function, 3127,get_current_replica_id_as_int,tensorflow/tensorflow/python/distribute/values_util.py,181,function,"Returns the current replica ID as an integer, or `None`." 3128,assign_on_device,tensorflow/tensorflow/python/distribute/values_util.py,193,function, 3129,assign_add_on_device,tensorflow/tensorflow/python/distribute/values_util.py,198,function, 3130,assign_sub_on_device,tensorflow/tensorflow/python/distribute/values_util.py,203,function, 3131,assert_replica_context,tensorflow/tensorflow/python/distribute/values_util.py,208,function, 3132,apply_aggregation,tensorflow/tensorflow/python/distribute/values_util.py,218,function, 3133,WarmStartingUtilWithDistributionStrategyTest,tensorflow/tensorflow/python/distribute/warm_starting_util_test.py,42,class, 3134,NormalizationTest,tensorflow/tensorflow/python/distribute/zero_batch_test.py,39,class, 3135,format_master_url,tensorflow/tensorflow/python/distribute/cluster_resolver/cluster_resolver.py,35,function, 3136,get_accelerator_devices,tensorflow/tensorflow/python/distribute/cluster_resolver/cluster_resolver.py,42,function,Returns accelerator devices given a master and a configuration. 3137,ClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/cluster_resolver.py,61,class,"Abstract class for all implementations of ClusterResolvers. This defines the skeleton for all implementations of ClusterResolvers. ClusterResolvers are a way for TensorFlow to communicate with various cluster management systems (e.g. GCE, AWS, etc...) and gives TensorFlow necessary information to set up distributed training. By letting TensorFlow communicate with these systems, we will be able to automatically discover and resolve IP addresses for various TensorFlow workers. This will eventually allow us to automatically recover from underlying machine failures and scale TensorFlow worker clusters up and down. Note to Implementors of `tf.distribute.cluster_resolver.ClusterResolver` subclass: In addition to these abstract methods, when task_type, task_id, and rpc_layer attributes are applicable, you should also implement them either as properties with getters or setters, or directly set the attributes `self._task_type`, `self._task_id`, or `self._rpc_layer` so the base class' getters and setters are used. See `tf.distribute.cluster_resolver.SimpleClusterResolver.__init__` for an example. In general, multi-client tf.distribute strategies such as `tf.distribute.experimental.MultiWorkerMirroredStrategy` require task_type and task_id properties to be available in the `ClusterResolver` they are using. On the other hand, these concepts are not applicable in single-client strategies, such as `tf.distribute.experimental.TPUStrategy`, because the program is only expected to be run on one task, so there should not be a need to have code branches according to task type and task id. - task_type is the name of the server's current named job (e.g. 'worker', 'ps' in a distributed parameterized training job). - task_id is the ordinal index of the server within the task type. - rpc_layer is the protocol used by TensorFlow to communicate with other TensorFlow servers in a distributed environment." 3138,SimpleClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/cluster_resolver.py,293,class,"Simple implementation of ClusterResolver that accepts all attributes. Please see the base class for documentation of arguments of its constructor. It is useful if you want to specify some or all attributes. Usage example with `tf.distribute.Strategy`: ```Python cluster = tf.train.ClusterSpec({""worker"": [""worker0.example.com:2222"", ""worker1.example.com:2222""]}) # On worker 0 cluster_resolver = SimpleClusterResolver(cluster, task_type=""worker"", task_id=0, num_accelerators={""GPU"": 8}, rpc_layer=""grpc"") strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=cluster_resolver) # On worker 1 cluster_resolver = SimpleClusterResolver(cluster, task_type=""worker"", task_id=1, num_accelerators={""GPU"": 8}, rpc_layer=""grpc"") strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=cluster_resolver) ```" 3139,UnionClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/cluster_resolver.py,423,class,"Performs a union on underlying ClusterResolvers. This class performs a union given two or more existing ClusterResolvers. It merges the underlying ClusterResolvers, and returns one unified ClusterSpec when cluster_spec is called. The details of the merge function is documented in the cluster_spec function. For additional ClusterResolver properties such as task type, task index, rpc layer, environment, etc..., we will return the value from the first ClusterResolver in the union. An example to combine two cluster resolvers: ```Python cluster_0 = tf.train.ClusterSpec({""worker"": [""worker0.example.com:2222"", ""worker1.example.com:2222""]}) cluster_resolver_0 = SimpleClusterResolver(cluster, task_type=""worker"", task_id=0, rpc_layer=""grpc"") cluster_1 = tf.train.ClusterSpec({""ps"": [""ps0.example.com:2222"", ""ps1.example.com:2222""]}) cluster_resolver_1 = SimpleClusterResolver(cluster, task_type=""ps"", task_id=0, rpc_layer=""grpc"") # Its task type would be ""worker"". cluster_resolver = UnionClusterResolver(cluster_resolver_0, cluster_resolver_1) ``` An example to override the number of GPUs in a TFConfigClusterResolver instance: ```Python tf_config = TFConfigClusterResolver() gpu_override = SimpleClusterResolver(tf_config.cluster_spec(), num_accelerators={""GPU"": 1}) cluster_resolver = UnionResolver(gpu_override, tf_config) ```" 3140,MockBaseClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/cluster_resolver_test.py,34,class, 3141,BaseClusterResolverTest,tensorflow/tensorflow/python/distribute/cluster_resolver/cluster_resolver_test.py,47,class, 3142,UnionClusterResolverTest,tensorflow/tensorflow/python/distribute/cluster_resolver/cluster_resolver_test.py,123,class, 3143,GCEClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver.py,35,class,"ClusterResolver for Google Compute Engine. This is an implementation of cluster resolvers for the Google Compute Engine instance group platform. By specifying a project, zone, and instance group, this will retrieve the IP address of all the instances within the instance group and return a ClusterResolver object suitable for use for distributed TensorFlow. Note: this cluster resolver cannot retrieve `task_type`, `task_id` or `rpc_layer`. To use it with some distribution strategies like `tf.distribute.experimental.MultiWorkerMirroredStrategy`, you will need to specify `task_type` and `task_id` in the constructor. Usage example with tf.distribute.Strategy: ```Python # On worker 0 cluster_resolver = GCEClusterResolver(""my-project"", ""us-west1"", ""my-instance-group"", task_type=""worker"", task_id=0) strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=cluster_resolver) # On worker 1 cluster_resolver = GCEClusterResolver(""my-project"", ""us-west1"", ""my-instance-group"", task_type=""worker"", task_id=1) strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=cluster_resolver) ```" 3144,GCEClusterResolverTest,tensorflow/tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver_test.py,30,class, 3145,KubernetesClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver.py,35,class,"ClusterResolver for Kubernetes. This is an implementation of cluster resolvers for Kubernetes. When given the the Kubernetes namespace and label selector for pods, we will retrieve the pod IP addresses of all running pods matching the selector, and return a ClusterSpec based on that information. Note: it cannot retrieve `task_type`, `task_id` or `rpc_layer`. To use it with some distribution strategies like `tf.distribute.experimental.MultiWorkerMirroredStrategy`, you will need to specify `task_type` and `task_id` by setting these attributes. Usage example with tf.distribute.Strategy: ```Python # On worker 0 cluster_resolver = KubernetesClusterResolver( {""worker"": [""job-name=worker-cluster-a"", ""job-name=worker-cluster-b""]}) cluster_resolver.task_type = ""worker"" cluster_resolver.task_id = 0 strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=cluster_resolver) # On worker 1 cluster_resolver = KubernetesClusterResolver( {""worker"": [""job-name=worker-cluster-a"", ""job-name=worker-cluster-b""]}) cluster_resolver.task_type = ""worker"" cluster_resolver.task_id = 1 strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=cluster_resolver) ```" 3146,_mock_kubernetes_client,tensorflow/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py,28,function, 3147,_get_mock_pod_item,tensorflow/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py,35,function, 3148,_create_pod_list,tensorflow/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py,47,function, 3149,KubernetesClusterResolverTest,tensorflow/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py,51,class, 3150,expand_hostlist,tensorflow/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py,31,function,"Create a list of hosts out of a SLURM hostlist. The order of nodes is preserved and no deduplication is done Input: 'n[1-2],m5,o[3-4,6,7-9]') Output: ['n1', 'n2', 'm5', 'o3', 'o4', 'o6', 'o7', 'o8', 'o9']" 3151,expand_tasks_per_node,tensorflow/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py,89,function,"Expands the tasks per node expression from SLURM. The order is preserved so it can be matched to the hostlist Input: '3(x2),2,1' Output: [3, 3, 2, 1]" 3152,_get_slurm_var,tensorflow/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py,110,function,"Gets the SLURM variable from the environment. Args: name: Name of the step variable Returns: SLURM_ from os.environ Raises: RuntimeError if variable is not found" 3153,_get_num_slurm_tasks,tensorflow/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py,129,function,"Returns the number of SLURM tasks of the current job step. Returns: The number of tasks as an int" 3154,_get_num_nvidia_gpus,tensorflow/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py,138,function,"Gets the number of NVIDIA GPUs by using CUDA_VISIBLE_DEVICES and nvidia-smi. Returns: Number of GPUs available on the node Raises: RuntimeError if executing nvidia-smi failed" 3155,get_num_gpus,tensorflow/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py,159,function,"Returns the number of GPUs visible on the current node. Currently only implemented for NVIDIA GPUs." 3156,SlurmClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py,168,class,"ClusterResolver for system with Slurm workload manager. This is an implementation of ClusterResolver for Slurm clusters. This allows the specification of jobs and task counts, number of tasks per node, number of GPUs on each node and number of GPUs for each task. It retrieves system attributes by Slurm environment variables, resolves allocated computing node names, constructs a cluster and returns a ClusterResolver object which can be used for distributed TensorFlow." 3157,SlurmClusterResolverTest,tensorflow/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver_test.py,32,class, 3158,format_master_url,tensorflow/tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver.py,35,function, 3159,_load_tf_config,tensorflow/tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver.py,42,function, 3160,_get_value_in_tfconfig,tensorflow/tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver.py,46,function, 3161,TFConfigClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver.py,52,class,"Implementation of a ClusterResolver which reads the TF_CONFIG EnvVar. This is an implementation of cluster resolvers when using TF_CONFIG to set information about the cluster. The cluster spec returned will be initialized from the TF_CONFIG environment variable. An example to set TF_CONFIG is: ```Python os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': [""localhost:12345"", ""localhost:23456""] }, 'task': {'type': 'worker', 'index': 0} }) ``` However, sometimes the container orchestration framework will set TF_CONFIG for you. In this case, you can just create an instance without passing in any arguments. You can find an example here to let Kuburnetes set TF_CONFIG for you: https://github.com/tensorflow/ecosystem/tree/master/kubernetes. Then you can use it with `tf.distribute.Strategy` as: ```Python # `TFConfigClusterResolver` is already the default one in the following # strategy. strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=TFConfigClusterResolver()) ```" 3162,TFConfigClusterResolverTest,tensorflow/tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver_test.py,35,class, 3163,is_running_in_gce,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver.py,40,function, 3164,TPUClusterResolver,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver.py,51,class,"Cluster Resolver for Google Cloud TPUs. This is an implementation of cluster resolvers for the Google Cloud TPU service. TPUClusterResolver supports the following distinct environments: Google Compute Engine Google Kubernetes Engine Google internal It can be passed into `tf.distribute.TPUStrategy` to support TF2 training on Cloud TPUs." 3165,MockRequestClass,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py,47,class, 3166,MockNodeClass,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py,60,class, 3167,mock_request_compute_metadata,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py,69,function, 3168,mock_is_running_in_gce,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py,80,function, 3169,mock_is_not_running_in_gce,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py,84,function, 3170,mock_running_in_gce_urlopen,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py,88,function, 3171,mock_not_running_in_gce_urlopen,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py,95,function, 3172,TPUClusterResolverTest,tensorflow/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py,101,class, 3173,SaveAndLoadForServingTest,tensorflow/tensorflow/python/distribute/integration_test/saved_model_test.py,50,class, 3174,SaveAndLoadForTrainingTest,tensorflow/tensorflow/python/distribute/integration_test/saved_model_test.py,304,class, 3175,ParallelDevice,tensorflow/tensorflow/python/distribute/parallel_device/parallel_device.py,42,class,A device which executes operations in parallel. 3176,_collective_reduce,tensorflow/tensorflow/python/distribute/parallel_device/parallel_device_test.py,50,function, 3177,_collective_sum,tensorflow/tensorflow/python/distribute/parallel_device/parallel_device_test.py,68,function, 3178,_Dense,tensorflow/tensorflow/python/distribute/parallel_device/parallel_device_test.py,73,class, 3179,_VirtualDeviceTestCase,tensorflow/tensorflow/python/distribute/parallel_device/parallel_device_test.py,90,class, 3180,ParallelDeviceTests,tensorflow/tensorflow/python/distribute/parallel_device/parallel_device_test.py,112,class, 3181,LayerTests,tensorflow/tensorflow/python/distribute/parallel_device/parallel_device_test.py,257,class, 3182,_read_component,tensorflow/tensorflow/python/distribute/parallel_device/saving.py,33,function,Read one component of a parallel variable and discard the rest. 3183,_ParallelDeviceSaveable,tensorflow/tensorflow/python/distribute/parallel_device/saving.py,45,class,Saves and restores a parallel variable. 3184,VariableWithFixedCheckpointing,tensorflow/tensorflow/python/distribute/parallel_device/saving.py,86,class,Overrides checkpointing behavior to save like a partitioned variable. 3185,_variable_creator,tensorflow/tensorflow/python/distribute/parallel_device/saving.py,110,function, 3186,independent_buffers,tensorflow/tensorflow/python/distribute/parallel_device/saving.py,117,function,"Context manager which saves parallel buffers independently. Creates a ParallelDevice-aware variable subclass which saves buffers for each device separately. Args: parallel_device: A ParallelDevice object on which variables are placed. Yields: Nothing." 3187,to_dlpack,tensorflow/tensorflow/python/dlpack/dlpack.py,27,function,"Returns the dlpack capsule representing the tensor. This operation ensures the underlying data memory is ready when returns. ```python a = tf.tensor([1, 10]) dlcapsule = tf.experimental.dlpack.to_dlpack(a) # dlcapsule represents the dlpack data structure ``` Args: tf_tensor: Tensorflow eager tensor, to be converted to dlpack capsule. Returns: A PyCapsule named as dltensor, which shares the underlying memory to other framework. This PyCapsule can be consumed only once." 3188,from_dlpack,tensorflow/tensorflow/python/dlpack/dlpack.py,49,function,"Returns the Tensorflow eager tensor. The returned tensor uses the memory shared by dlpack capsules from other framework. ```python a = tf.experimental.dlpack.from_dlpack(dlcapsule) # `a` uses the memory shared by dlpack ``` Args: dlcapsule: A PyCapsule named as dltensor Returns: A Tensorflow eager tensor" 3189,FormatShapeAndDtype,tensorflow/tensorflow/python/dlpack/dlpack_test.py,40,function, 3190,GetNamedTestParameters,tensorflow/tensorflow/python/dlpack/dlpack_test.py,44,function, 3191,DLPackTest,tensorflow/tensorflow/python/dlpack/dlpack_test.py,56,class, 3192,op_attr_type,tensorflow/tensorflow/python/eager/backprop.py,75,function, 3193,make_attr,tensorflow/tensorflow/python/eager/backprop.py,86,function, 3194,_MockOp,tensorflow/tensorflow/python/eager/backprop.py,106,class,Pretends to be a tf.Operation for the gradient functions. 3195,_gradient_function,tensorflow/tensorflow/python/eager/backprop.py,132,function,"Calls the gradient function of the op. Args: op_name: the name of the op to be differentiated. attr_tuple: the attrs, as a tuple. num_inputs: the number of inputs to the op. inputs: inputs to the original operation. outputs: outputs to the original operation. out_grads: gradients of the operation wrt its outputs. skip_input_indices: a tuple that is passed to the gradient function, indicating which inputs to skip calculating the gradient for forward_pass_name_scope: the namescope of the op in the forward pass. Returns: The gradients with respect to the inputs of the function, as a list." 3196,_must_record_gradient,tensorflow/tensorflow/python/eager/backprop.py,170,function, 3197,_record_gradient,tensorflow/tensorflow/python/eager/backprop.py,174,function, 3198,implicit_val_and_grad,tensorflow/tensorflow/python/eager/backprop.py,183,function,"Returns a function which differentiates f with respect to variables. The wrapped function returns the value and the gradient of f when called with the same arguments. The gradient is with respect to all trainable TFE variables accessed by `f`. This function is useful when the exact set of variables to differentiate with is not known ahead of time. Example: ```python dense_layer = tf.compat.v1.layers.Dense(1) def loss(x, y): return tf.reduce_sum(tf.square(dense_layer(x) - y)) # Obtain the gradient function. val_grad_fn = tfe.implicit_value_and_gradients(loss) # Invoke the gradient function with concrete values of x and y. x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) y = tf.constant([[10.0], [20.0]]) value, grads_and_vars = val_grad_fn(x, y) print('Value of loss: %s' % value) # Apply the gradients to Variables. optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1) optimizer.apply_gradients(grads_and_vars) ``` Args: f: function to be differentiated. If `f` returns a scalar, this scalar will be differentiated. If `f` returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. Returns: A function which, when called, returns a tuple pair. Its first element is the value to which the function evaluates. Its second element is list of (gradient, variable) pairs. Raises: ValueError: if `f` returns None." 3199,implicit_grad,tensorflow/tensorflow/python/eager/backprop.py,262,function,"Returns a function which differentiates f with respect to variables. The wrapped function returns the gradient of f when called with the same arguments. The gradient is with respect to all trainable TFE variables accessed by `f`. This function is useful when the exact set of variables to differentiate with is not known ahead of time. Example: ```python dense_layer = tf.compat.v1.layers.Dense(1) def loss(x, y): return tf.reduce_sum(tf.square(dense_layer(x) - y)) # Obtain the gradient function. grad_fn = tfe.implicit_gradients(loss) # Invoke the gradient function with concrete values of x and y. x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) y = tf.constant([[10.0], [20.0]]) grads_and_vars = grad_fn(x, y) # Apply the gradients to Variables. optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1) optimizer.apply_gradients(grads_and_vars) ``` Args: f: function to be differentiated. If `f` returns a scalar, this scalar will be differentiated. If `f` returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. Returns: A function which, when called, returns a list of (gradient, variable) pairs." 3200,_get_arg_spec,tensorflow/tensorflow/python/eager/backprop.py,311,function,The positions of the parameters of f to be differentiated in param_args. 3201,gradients_function,tensorflow/tensorflow/python/eager/backprop.py,340,function,"Returns a function which differentiates f with respect to params. Example: ```python # f(x, y) = (x ^ 3) * y - x * (y ^ 2) # Therefore, the 1st order derivatives are: # df / dx = 3 * (x ^ 2) * y - y ^ 2 # df / dy = x ^ 3 - 2 * x * y # The 2nd order derivatives with respect to x is: # d^2 f / (dx)^2 = 6 * x * y def f(x, y): return x * x * x * y - x * y * y # Obtain a function that returns 1st order gradients. grad_fn = tfe.gradients_function(f) x = 2.0 y = 3.0 # Invoke the 1st order gradient function. x_grad, y_grad = grad_fn(x, y) assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2 assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 # Obtain a function that returns the 2nd order gradient with respect to x. gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0]) # Invoke the 2nd order gradient function. x_gradgrad = gradgrad_fn(x, y)[0] assert x_gradgrad.numpy() == 6 * 2 * 3 # To obtain a callable that returns the gradient(s) of `f` with respect to a # subset of its inputs, use the `params` keyword argument with # `gradients_function()`. ygrad_fn = tfe.gradients_function(f, params=[1]) (y_grad,) = ygrad_fn(x, y) assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 ``` Note that only tensors with real or complex dtypes are differentiable. Args: f: function to be differentiated. If `f` returns a scalar, this scalar will be differentiated. If `f` returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. If desired, the tensors can be elementwise multiplied by the tensors passed as the `dy` keyword argument to the returned gradient function. params: list of parameter names of f or list of integers indexing the parameters with respect to which we'll differentiate. Passing None differentiates with respect to all parameters. Returns: function which, when called, returns the value of f and the gradient of `f` with respect to all of `params`. The function takes an extra optional keyword argument `dy`. Setting it allows computation of vector jacobian products for vectors other than the vector of ones. Raises: ValueError: if the params are not all strings or all integers." 3202,_ensure_unique_tensor_objects,tensorflow/tensorflow/python/eager/backprop.py,413,function,"Make each of the parameter_positions in args a unique ops.Tensor object. Ensure that each parameter is treated independently. For example: def f(x, y): return x * y g = gradients_function(f) one = tf.constant(1.) g(one, one) should return [1., 1.] (even though the two arguments are the same Tensor object). Args: parameter_positions: List of indices into args defining the arguments to differentiate against. args: A list of arguments to the function to be differentiated. Returns: args, possibly edited in-place." 3203,val_and_grad_function,tensorflow/tensorflow/python/eager/backprop.py,445,function,"Returns a function that computes f and its derivative w.r.t. params. Example: ```python # f(x, y) = (x ^ 3) * y - x * (y ^ 2) # Therefore, the 1st order derivatives are: # df / dx = 3 * (x ^ 2) * y - y ^ 2 # df / dy = x ^ 3 - 2 * x * y def f(x, y): return x * x * x * y - x * y * y # Obtain a function that returns the function value and the 1st order # gradients. val_grads_fn = tfe.value_and_gradients_function(f) x = 2.0 y = 3.0 # Invoke the value-and-gradients function. f_val, (x_grad, y_grad) = val_grads_fn(x, y) assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2) assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2 assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 # To obtain a callable that returns the value of `f` and the gradient(s) of # `f` with respect to a subset of its inputs, use the `params` keyword # argument with `value_and_gradients_function()`. val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1]) f_val, (y_grad,) = val_ygrad_fn(x, y) assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2) assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 ``` Args: f: function to be differentiated. If `f` returns a scalar, this scalar will be differentiated. If `f` returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. If desired, the tensors can be elementwise multiplied by the tensors passed as the `dy` keyword argument to the returned gradient function. params: list of parameter names of f or list of integers indexing the parameters with respect to which we'll differentiate. Passing `None` differentiates with respect to all parameters. Returns: function which, when called, returns the value of f and the gradient of f with respect to all of `params`. The function takes an extra optional keyword argument ""dy"". Setting it allows computation of vector jacobian products for vectors other than the vector of ones. Raises: ValueError: if the params are not all strings or all integers." 3204,make_vjp,tensorflow/tensorflow/python/eager/backprop.py,513,function,"Returns a function that computes f and its vjp w.r.t. params. The term ""vjp"" here is an abbreviation for vector-jacobian product. Args: f: the function to be differentiated. params: the parameters (numbers or names) to differentiate with respect to. A value of None will differentiate with respect to all parameters. persistent: Boolean controlling whether the VJP function can be re-used. Must be True or False. Returns: A function, which when called, returns a tuple (value, vjp), where: - value is the result of calling f. - vjp is a function, which takes a vector as an argument and returns the product of that vector with the Jacobian of f. Providing no argument to vjp is equivalent to providing a vector of ones. For example, ```python def f(x): return x * x wrapped_fn = tfe.make_vjp(f) result, vjp = wrapped_fn(tf.constant(3.0)) # result is 9.0 vjp() # the vjp function returns 6.0 Raises: ValueError: if `f` returns None." 3205,flatten_nested_indexed_slices,tensorflow/tensorflow/python/eager/backprop.py,589,function, 3206,aggregate_indexed_slices_gradients,tensorflow/tensorflow/python/eager/backprop.py,601,function,Aggregates gradients containing `IndexedSlices`s. 3207,_aggregate_grads,tensorflow/tensorflow/python/eager/backprop.py,628,function,"Aggregate gradients from multiple sources. Args: gradients: A list of 'Tensor' or 'IndexedSlices' gradients. Returns: If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'. Otherwise returns an aggregated 'IndexedSlices'." 3208,_num_elements,tensorflow/tensorflow/python/eager/backprop.py,650,function,The number of elements in the `grad` tensor. 3209,_fast_fill,tensorflow/tensorflow/python/eager/backprop.py,663,function, 3210,_zeros,tensorflow/tensorflow/python/eager/backprop.py,669,function,Helper to return (possibly cached) zero tensors in eager mode. 3211,_ones,tensorflow/tensorflow/python/eager/backprop.py,697,function, 3212,_handle_or_self,tensorflow/tensorflow/python/eager/backprop.py,726,function,Unwrap resource variable/ndarray to return tensors. 3213,GradientTape,tensorflow/tensorflow/python/eager/backprop.py,736,class,"Record operations for automatic differentiation. Operations are recorded if they are executed within this context manager and at least one of their inputs is being ""watched"". Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`, where `trainable=True` is default in both cases) are automatically watched. Tensors can be manually watched by invoking the `watch` method on this context manager. For example, consider the function `y = x * x`. The gradient at `x = 3.0` can be computed as: >>> x = tf.constant(3.0) >>> with tf.GradientTape() as g: ... g.watch(x) ... y = x * x >>> dy_dx = g.gradient(y, x) >>> print(dy_dx) tf.Tensor(6.0, shape=(), dtype=float32) GradientTapes can be nested to compute higher-order derivatives. For example, >>> x = tf.constant(5.0) >>> with tf.GradientTape() as g: ... g.watch(x) ... with tf.GradientTape() as gg: ... gg.watch(x) ... y = x * x ... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x >>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2 >>> print(dy_dx) tf.Tensor(10.0, shape=(), dtype=float32) >>> print(d2y_dx2) tf.Tensor(2.0, shape=(), dtype=float32) By default, the resources held by a GradientTape are released as soon as GradientTape.gradient() method is called. To compute multiple gradients over the same computation, create a persistent gradient tape. This allows multiple calls to the gradient() method as resources are released when the tape object is garbage collected. For example: >>> x = tf.constant(3.0) >>> with tf.GradientTape(persistent=True) as g: ... g.watch(x) ... y = x * x ... z = y * y >>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3) >>> print(dz_dx) tf.Tensor(108.0, shape=(), dtype=float32) >>> dy_dx = g.gradient(y, x) >>> print(dy_dx) tf.Tensor(6.0, shape=(), dtype=float32) By default GradientTape will automatically watch any trainable variables that are accessed inside the context. If you want fine grained control over which variables are watched you can disable automatic tracking by passing `watch_accessed_variables=False` to the tape constructor: >>> x = tf.Variable(2.0) >>> w = tf.Variable(5.0) >>> with tf.GradientTape( ... watch_accessed_variables=False, persistent=True) as tape: ... tape.watch(x) ... y = x ** 2 # Gradients will be available for `x`. ... z = w ** 3 # No gradients will be available as `w` isn't being watched. >>> dy_dx = tape.gradient(y, x) >>> print(dy_dx) tf.Tensor(4.0, shape=(), dtype=float32) >>> # No gradients will be available as `w` isn't being watched. >>> dz_dy = tape.gradient(z, w) >>> print(dz_dy) None Note that when using models you should ensure that your variables exist when using `watch_accessed_variables=False`. Otherwise it's quite easy to make your first iteration not have any gradients: ```python a = tf.keras.layers.Dense(32) b = tf.keras.layers.Dense(32) with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(a.variables) # Since `a.build` has not been called at this point # `a.variables` will return an empty list and the # tape will not be watching anything. result = b(a(inputs)) tape.gradient(result, a.variables) # The result of this computation will be # a list of `None`s since a's variables # are not being watched. ``` Note that only tensors with real or complex dtypes are differentiable." 3214,BackpropTest,tensorflow/tensorflow/python/eager/backprop_test.py,57,class, 3215,JacobianTest,tensorflow/tensorflow/python/eager/backprop_test.py,1605,class, 3216,BatchJacobianTest,tensorflow/tensorflow/python/eager/backprop_test.py,1703,class, 3217,AggregateIndexedSlicesGradientsTest,tensorflow/tensorflow/python/eager/backprop_test.py,1801,class, 3218,IsTrainable,tensorflow/tensorflow/python/eager/backprop_util.py,25,function, 3219,c_tfe_py_fastpath_execute,tensorflow/tensorflow/python/eager/benchmarks_test.py,74,function, 3220,run_benchmark,tensorflow/tensorflow/python/eager/benchmarks_test.py,95,function, 3221,MicroBenchmarks,tensorflow/tensorflow/python/eager/benchmarks_test.py,112,class, 3222,MicroBenchmarksBase,tensorflow/tensorflow/python/eager/benchmarks_test_base.py,32,class,"Run and report benchmark results. The first run is without any profilng. Second run is with xprof and python trace. Third run is with xprof without python trace. Note: xprof runs are with fewer iterations." 3223,CancellationManager,tensorflow/tensorflow/python/eager/cancellation.py,24,class,A mechanism for cancelling blocking computation. 3224,CancellationTest,tensorflow/tensorflow/python/eager/cancellation_test.py,24,class, 3225,_EagerTensorCache,tensorflow/tensorflow/python/eager/context.py,80,class,Simple cache which evicts items based on length in a FIFO manner. 3226,FunctionCallOptions,tensorflow/tensorflow/python/eager/context.py,106,class,"Options applied at call sites of eager functions. Eager functions are functions decorated with tf.contrib.eager.defun." 3227,_TensorCaches,tensorflow/tensorflow/python/eager/context.py,165,class,Thread local tensor caches. 3228,_ThreadLocalData,tensorflow/tensorflow/python/eager/context.py,188,class,Thread local storage for the eager context. 3229,_ContextSwitchStack,tensorflow/tensorflow/python/eager/context.py,210,class,A thread-local stack of context switches. 3230,LogicalDevice,tensorflow/tensorflow/python/eager/context.py,252,class,"Abstraction for a logical device initialized by the runtime. A `tf.config.LogicalDevice` corresponds to an initialized logical device on a `tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors and operations can be placed on a specific logical device by calling `tf.device` with a specified `tf.config.LogicalDevice`. Fields: name: The fully qualified name of the device. Can be used for Op or function placement. device_type: String declaring the type of device such as ""CPU"" or ""GPU""." 3231,LogicalDeviceConfiguration,tensorflow/tensorflow/python/eager/context.py,271,class,"Configuration class for a logical devices. The class specifies the parameters to configure a `tf.config.PhysicalDevice` as it is initialized to a `tf.config.LogicalDevice` during runtime initialization. Not all fields are valid for all device types. See `tf.config.get_logical_device_configuration` and `tf.config.set_logical_device_configuration` for usage examples. Fields: memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual device. Currently only supported for GPUs. experimental_priority: (optional) Priority to assign to a virtual device. Lower values have higher priorities and 0 is the default. Within a physical GPU, the GPU scheduler will prioritize ops on virtual devices with higher priority. Currently only supported for Nvidia GPUs." 3232,PhysicalDevice,tensorflow/tensorflow/python/eager/context.py,298,class,"Abstraction for a locally visible physical device. TensorFlow can utilize various devices such as the CPU or multiple GPUs for computation. Before initializing a local device for use, the user can customize certain properties of the device such as it's visibility or memory configuration. Once a visible `tf.config.PhysicalDevice` is initialized one or more `tf.config.LogicalDevice` objects are created. Use `tf.config.set_visible_devices` to configure the visibility of a physical device and `tf.config.set_logical_device_configuration` to configure multiple `tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is useful when separation between models is needed or to simulate a multi-device environment. Fields: name: Unique identifier for device. device_type: String declaring the type of device such as ""CPU"" or ""GPU""." 3233,_AtomicCounter,tensorflow/tensorflow/python/eager/context.py,322,class,A simple atomic counter. 3234,_TensorCacheDeleter,tensorflow/tensorflow/python/eager/context.py,340,class,Deletes tensor caches for a given context. 3235,is_tfrt_enabled,tensorflow/tensorflow/python/eager/context.py,358,function, 3236,Context,tensorflow/tensorflow/python/eager/context.py,370,class,Environment in which eager operations execute. 3237,_EagerDeviceContext,tensorflow/tensorflow/python/eager/context.py,1755,class,Context-manager forcing placement of ops and Tensors on a device. 3238,_set_context_locked,tensorflow/tensorflow/python/eager/context.py,1814,function, 3239,_set_context,tensorflow/tensorflow/python/eager/context.py,1820,function, 3240,_create_context,tensorflow/tensorflow/python/eager/context.py,1825,function, 3241,_reset_context,tensorflow/tensorflow/python/eager/context.py,1832,function,"Clears and re-initializes the singleton context. Should only be used for testing." 3242,context,tensorflow/tensorflow/python/eager/context.py,1846,function,Returns a singleton context object. 3243,context_safe,tensorflow/tensorflow/python/eager/context.py,1853,function,Returns current context (or None if one hasn't been initialized). 3244,ensure_initialized,tensorflow/tensorflow/python/eager/context.py,1858,function,Initialize the context. 3245,set_global_seed,tensorflow/tensorflow/python/eager/context.py,1863,function,Sets the eager mode seed. 3246,global_seed,tensorflow/tensorflow/python/eager/context.py,1868,function,Returns the eager mode seed. 3247,internal_operation_seed,tensorflow/tensorflow/python/eager/context.py,1873,function,Returns the operation seed generated based on global seed. 3248,executing_eagerly,tensorflow/tensorflow/python/eager/context.py,1879,function,"Checks whether the current thread has eager execution enabled. Eager execution is enabled by default and this API returns `True` in most of cases. However, this API might return `False` in the following use cases. * Executing inside `tf.function`, unless under `tf.init_scope` or `tf.config.run_functions_eagerly(True)` is previously called. * Executing inside a transformation function for `tf.dataset`. * `tf.compat.v1.disable_eager_execution()` is called. General case: >>> print(tf.executing_eagerly()) True Inside `tf.function`: >>> @tf.function ... def fn(): ... with tf.init_scope(): ... print(tf.executing_eagerly()) ... print(tf.executing_eagerly()) >>> fn() True False Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called: >>> tf.config.run_functions_eagerly(True) >>> @tf.function ... def fn(): ... with tf.init_scope(): ... print(tf.executing_eagerly()) ... print(tf.executing_eagerly()) >>> fn() True True >>> tf.config.run_functions_eagerly(False) Inside a transformation function for `tf.dataset`: >>> def data_fn(x): ... print(tf.executing_eagerly()) ... return x >>> dataset = tf.data.Dataset.range(100) >>> dataset = dataset.map(data_fn) False Returns: `True` if the current thread has eager execution enabled." 3249,executing_eagerly_v1,tensorflow/tensorflow/python/eager/context.py,1940,function,"Checks whether the current thread has eager execution enabled. Eager execution is typically enabled via `tf.compat.v1.enable_eager_execution`, but may also be enabled within the context of a Python function via tf.contrib.eager.py_func. When eager execution is enabled, returns `True` in most cases. However, this API might return `False` in the following use cases. * Executing inside `tf.function`, unless under `tf.init_scope` or `tf.config.run_functions_eagerly(True)` is previously called. * Executing inside a transformation function for `tf.dataset`. * `tf.compat.v1.disable_eager_execution()` is called. >>> tf.compat.v1.enable_eager_execution() General case: >>> print(tf.executing_eagerly()) True Inside `tf.function`: >>> @tf.function ... def fn(): ... with tf.init_scope(): ... print(tf.executing_eagerly()) ... print(tf.executing_eagerly()) >>> fn() True False Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called: >>> tf.config.run_functions_eagerly(True) >>> @tf.function ... def fn(): ... with tf.init_scope(): ... print(tf.executing_eagerly()) ... print(tf.executing_eagerly()) >>> fn() True True >>> tf.config.run_functions_eagerly(False) Inside a transformation function for `tf.dataset`: >>> def data_fn(x): ... print(tf.executing_eagerly()) ... return x >>> dataset = tf.data.Dataset.range(100) >>> dataset = dataset.map(data_fn) False Returns: `True` if the current thread has eager execution enabled." 3250,in_eager_mode,tensorflow/tensorflow/python/eager/context.py,2002,function,Use executing_eagerly() instead. This function will be removed. 3251,shared_name,tensorflow/tensorflow/python/eager/context.py,2007,function,"Returns the anonymous shared name GUID if no shared name is specified. In eager mode we need to use a unique shared name to avoid spurious sharing issues. The runtime generates a unique name on our behalf when the reserved GUID is used as a shared name. Args: name: Optional shared name Returns: Eager compatible shared name." 3252,graph_mode,tensorflow/tensorflow/python/eager/context.py,2028,function,Context-manager to disable eager execution for the current thread. 3253,eager_mode,tensorflow/tensorflow/python/eager/context.py,2033,function,Context-manager to enable eager execution for the current thread. 3254,scope_name,tensorflow/tensorflow/python/eager/context.py,2038,function,Name of the current scope. 3255,device,tensorflow/tensorflow/python/eager/context.py,2043,function,"Context-manager to force placement of operations and Tensors on a device. Example: ```python with tf.device('gpu:0'): with tf.device('cpu:0'): shape = tf.constant([], dtype=tf.int32) x = tf.random.truncated_normal(shape, tf.float32) ``` will ensure that the `shape` Tensor is on CPU but the `truncated_normal` operation runs on GPU 0. Args: name: Name of the device (see context().devices()), or None to perform automatic placement. Returns: Context manager for setting the device." 3256,get_log_device_placement,tensorflow/tensorflow/python/eager/context.py,2068,function,"Get if device placements are logged. Returns: If device placements are logged." 3257,set_log_device_placement,tensorflow/tensorflow/python/eager/context.py,2078,function,"Set if device placements should be logged. Args: enabled: Whether to enabled device placement logging." 3258,device_policy,tensorflow/tensorflow/python/eager/context.py,2088,function,Context manager for setting device placement policy for current thread. 3259,mirroring_policy,tensorflow/tensorflow/python/eager/context.py,2100,function,Context manager for setting mirroring policy for current thread. 3260,set_execution_mode,tensorflow/tensorflow/python/eager/context.py,2111,function,Sets execution mode for the current thread. 3261,execution_mode,tensorflow/tensorflow/python/eager/context.py,2118,function,Context manager for setting execution mode for current thread. 3262,executor_scope,tensorflow/tensorflow/python/eager/context.py,2136,function,"Context manager for changing executor for current thread. Args: e: A Executor to execute eager ops under this scope. Setting it to None will switch back to use the default executor for the context. Yields: Context manager for setting the executor for current thread." 3263,function_executor_type,tensorflow/tensorflow/python/eager/context.py,2157,function,"Context manager for setting the executor of eager defined functions. Eager defined functions are functions decorated by tf.contrib.eager.defun. Args: executor_type: a string for the name of the executor to be used to execute functions defined by tf.contrib.eager.defun. Yields: Context manager for setting the executor of eager defined functions." 3264,is_async,tensorflow/tensorflow/python/eager/context.py,2178,function,Returns true if current thread is in async mode. 3265,num_gpus,tensorflow/tensorflow/python/eager/context.py,2183,function,"Get the number of available GPU devices. Returns: The number of available GPU devices." 3266,enable_run_metadata,tensorflow/tensorflow/python/eager/context.py,2192,function,"Enables tracing of op execution via RunMetadata. To retrieve the accumulated metadata call context.export_run_metadata() and to stop tracing call context.disable_run_metadata()." 3267,disable_run_metadata,tensorflow/tensorflow/python/eager/context.py,2201,function,Disables tracing of op execution via RunMetadata. 3268,enable_graph_collection,tensorflow/tensorflow/python/eager/context.py,2206,function,"Enables graph collection of executed functions. To retrieve the accumulated graphs call context.export_run_metadata() and to stop collecting graphs call context.disable_graph_collection()." 3269,disable_graph_collection,tensorflow/tensorflow/python/eager/context.py,2215,function,Disables graph collection of executed functions. 3270,export_run_metadata,tensorflow/tensorflow/python/eager/context.py,2220,function,"Returns a RunMetadata proto with accumulated information. The returned protocol buffer contains information since the most recent call to either enable_run_metadata or export_run_metadata. Returns: A RunMetadata protocol buffer." 3271,collect_graphs,tensorflow/tensorflow/python/eager/context.py,2233,function,"Collects a flat list of pre- or post-optimization graphs. The collected graphs include device placements, which can be useful for testing. Usage: ``` @def_function.function def f(x): return x + constant_op.constant(1.) with context.collect_graphs() as graphs: with ops.device(""CPU:0""): f(constant_op.constant(1.)) graph, = graphs # `graph` contains a single GraphDef for inspection ``` Args: optimized: whether to collect optimized graphs or non-optimized graphs Yields: A list of GraphDefs, populated when the context manager exits." 3272,get_server_def,tensorflow/tensorflow/python/eager/context.py,2273,function, 3273,set_server_def,tensorflow/tensorflow/python/eager/context.py,2277,function, 3274,update_server_def,tensorflow/tensorflow/python/eager/context.py,2281,function, 3275,check_alive,tensorflow/tensorflow/python/eager/context.py,2285,function, 3276,async_scope,tensorflow/tensorflow/python/eager/context.py,2291,function,"Context manager for grouping async operations. Ops/function calls inside the scope can return before finishing the actual execution. When exiting the async scope, a synchronization barrier will be automatically added to ensure the completion of all async op and function execution, potentially raising exceptions if async execution results in an error state. Users may write the following code to asynchronuously invoke `train_step_fn` and log the `loss` metric for every `num_steps` steps in a training loop. `train_step_fn` internally consumes data using `iterator.get_next()`, and may throw OutOfRangeError when running out of data. In the case: ``` try: with tf.experimental.async_scope(): for _ in range(num_steps): # Step function updates the metric `loss` internally train_step_fn() except tf.errors.OutOfRangeError: tf.experimental.async_clear_error() logging.info('loss =', loss.numpy()) ``` Yields: Context manager for grouping async operations." 3277,async_wait,tensorflow/tensorflow/python/eager/context.py,2337,function,"Sync all async operations and raise any errors during execution. In async execution mode, an op/function call can return before finishing the actual execution. Calling this method creates a synchronization barrier for all async op and function execution. It only returns when all pending nodes are finished, potentially raising exceptions if async execution results in an error state." 3278,async_clear_error,tensorflow/tensorflow/python/eager/context.py,2350,function,"Clear pending operations and error statuses in async execution. In async execution mode, an error in op/function execution can lead to errors in subsequent ops/functions that are scheduled but not yet executed. Calling this method clears all pending operations and reset the async execution state. Example: ``` while True: try: # Step function updates the metric `loss` internally train_step_fn() except tf.errors.OutOfRangeError: tf.experimental.async_clear_error() break logging.info('loss =', loss.numpy()) ```" 3279,add_function,tensorflow/tensorflow/python/eager/context.py,2373,function,Add a function definition to the context. 3280,remove_function,tensorflow/tensorflow/python/eager/context.py,2378,function,Remove a function from the context. 3281,get_function_def,tensorflow/tensorflow/python/eager/context.py,2383,function, 3282,register_custom_device,tensorflow/tensorflow/python/eager/context.py,2387,function,"Calls TFE_RegisterCustomDevice to register a custom device with Python. Enables using C extensions specifying a custom device from Python. See the experimental eager C API in tensorflow/c/eager/c_api_experimental.h for details. Note that custom devices are not currently supported inside `tf.function`s. Args: device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice' containing a pointer to a TFE_CustomDevice struct. The capsule retains ownership of the memory. device_name: A string indicating the name to register the custom device under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may subsequently be passed to `with tf.device(...):`. device_info_capsule: A PyCapsule with the name set to 'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific struct with the initial state of the custom device (the void* device_info argument to TFE_RegisterCustomDevice). This method takes ownership of the memory and clears the capsule destructor." 3283,_tmp_in_graph_mode,tensorflow/tensorflow/python/eager/context.py,2417,function, 3284,ContextTest,tensorflow/tensorflow/python/eager/context_test.py,32,class, 3285,_status_to_exception,tensorflow/tensorflow/python/eager/core.py,28,function, 3286,_NotOkStatusException,tensorflow/tensorflow/python/eager/core.py,36,class,Exception class to handle not ok Status. 3287,_FallbackException,tensorflow/tensorflow/python/eager/core.py,52,class,"Exception class to handle fallback from the fastpath. The fastpath that we refer to here is the one implemented to reduce per-op overheads (TFE_Py_FastPathExecute_C). If the conditions for executing the op on the fastpath are not met, we fallback to a safer (and more complete) slowpath, and this Exception is raised to signal that transition." 3288,_SymbolicException,tensorflow/tensorflow/python/eager/core.py,63,class,"Exception class to handle use of symbolic tensors when executing eagerly. `keras.Input()` creates symbolic tensors (in a FuncGraph managed by the Keras backend) while in eager execution. This exception is used to identify this case (raised in `convert_to_tensor` cause generated functions for ops to construct graphs instead of executing the kernel)." 3289,execute,tensorflow/tensorflow/python/eager/core_test.py,51,function, 3290,truncated_normal,tensorflow/tensorflow/python/eager/core_test.py,56,function, 3291,current_device,tensorflow/tensorflow/python/eager/core_test.py,65,function, 3292,configure_virtual_cpus,tensorflow/tensorflow/python/eager/core_test.py,69,function, 3293,TFETest,tensorflow/tensorflow/python/eager/core_test.py,78,class, 3294,SendRecvTest,tensorflow/tensorflow/python/eager/core_test.py,1037,class, 3295,EagerTensorCacheTest,tensorflow/tensorflow/python/eager/core_test.py,1098,class, 3296,CustomDeviceTest,tensorflow/tensorflow/python/eager/custom_device_test.py,28,class, 3297,_CallCounter,tensorflow/tensorflow/python/eager/def_function.py,54,class,Class keeping track of how many recent calls triggered tracing. 3298,_FrequentTracingDetector,tensorflow/tensorflow/python/eager/def_function.py,86,class,Class for frequent retracing detection and warning. 3299,UnliftedInitializerVariable,tensorflow/tensorflow/python/eager/def_function.py,130,class,"Variable which does not lift its initializer out of function context. Instances of this variable, when created, build a graph which runs their initializer inside a tf.cond(is_initialized) block. This can only be created inside a defun called from (eventually) eager mode. That is, non-function-building graphs are not supported." 3300,experimental_run_functions_eagerly,tensorflow/tensorflow/python/eager/def_function.py,318,function,"Enables / disables eager execution of `tf.function`s. Calling `tf.config.experimental_run_functions_eagerly(True)` will make all invocations of `tf.function` run eagerly instead of running as a traced graph function. This can be useful for debugging or profiling. For example, let's say you implemented a simple iterative sqrt function, and you want to collect the intermediate values and plot the convergence. Appending the values to a list in `@tf.function` normally wouldn't work since it will just record the Tensors being traced, not the values. Instead, you can do the following. >>> ys = [] >>> >>> @tf.function ... def sqrt(x): ... y = x / 2 ... d = y ... for _ in range(10): ... d /= 2 ... if y * y < x: ... y += d ... else: ... y -= d ... ys.append(y.numpy()) ... return y >>> >>> tf.config.experimental_run_functions_eagerly(True) >>> sqrt(tf.constant(2.)) >>> ys [1.5, 1.25, 1.375, 1.4375, 1.40625, 1.421875, 1.4140625, 1.4179688, 1.4160156, 1.4150391] >>> tf.config.experimental_run_functions_eagerly(False) Calling `tf.config.experimental_run_functions_eagerly(False)` will undo this behavior. Note: This flag has no effect on functions passed into tf.data transformations as arguments. tf.data functions are never executed eagerly and are always executed as a compiled Tensorflow Graph. Args: run_eagerly: Boolean. Whether to run functions eagerly." 3301,run_functions_eagerly,tensorflow/tensorflow/python/eager/def_function.py,368,function,"Enables / disables eager execution of `tf.function`s. Calling `tf.config.run_functions_eagerly(True)` will make all invocations of `tf.function` run eagerly instead of running as a traced graph function. This can be useful for debugging or profiling. For example, let's say you implemented a simple iterative sqrt function, and you want to collect the intermediate values and plot the convergence. Appending the values to a list in `@tf.function` normally wouldn't work since it will just record the Tensors being traced, not the values. Instead, you can do the following. >>> ys = [] >>> >>> @tf.function ... def sqrt(x): ... y = x / 2 ... d = y ... for _ in range(10): ... d /= 2 ... if y * y < x: ... y += d ... else: ... y -= d ... ys.append(y.numpy()) ... return y >>> >>> tf.config.run_functions_eagerly(True) >>> sqrt(tf.constant(2.)) >>> ys [1.5, 1.25, 1.375, 1.4375, 1.40625, 1.421875, 1.4140625, 1.4179688, 1.4160156, 1.4150391] >>> tf.config.run_functions_eagerly(False) Calling `tf.config.run_functions_eagerly(False)` will undo this behavior. Note: This flag has no effect on functions passed into tf.data transformations as arguments. tf.data functions are never executed eagerly and are always executed as a compiled Tensorflow Graph. Args: run_eagerly: Boolean. Whether to run functions eagerly." 3302,experimental_functions_run_eagerly,tensorflow/tensorflow/python/eager/def_function.py,422,function,Returns the value of the `experimental_run_functions_eagerly` setting. 3303,functions_run_eagerly,tensorflow/tensorflow/python/eager/def_function.py,428,function,Returns the value of the `run_functions_eagerly` setting. 3304,FunctionDeleter,tensorflow/tensorflow/python/eager/def_function.py,433,class, 3305,Function,tensorflow/tensorflow/python/eager/def_function.py,448,class,"Wrapper class for the graph functions defined for a Python function. See the documentation for `tf.function` for more information on the semantics of defined functions. `Function` is thread-compatible." 3306,function,tensorflow/tensorflow/python/eager/def_function.py,1216,function,"Compiles a function into a callable TensorFlow graph. `tf.function` constructs a callable that executes a TensorFlow graph (`tf.Graph`) created by trace-compiling the TensorFlow operations in `func`, effectively executing `func` as a TensorFlow graph. Example usage: >>> @tf.function ... def f(x, y): ... return x ** 2 + y >>> x = tf.constant([2, 3]) >>> y = tf.constant([3, -2]) >>> f(x, y) _Features_ `func` may use data-dependent control flow, including `if`, `for`, `while` `break`, `continue` and `return` statements: >>> @tf.function ... def f(x): ... if tf.reduce_sum(x) > 0: ... return x * x ... else: ... return -x // 2 >>> f(tf.constant(-2)) `func`'s closure may include `tf.Tensor` and `tf.Variable` objects: >>> @tf.function ... def f(): ... return x ** 2 + y >>> x = tf.constant([-2, -3]) >>> y = tf.Variable([3, -2]) >>> f() `func` may also use ops with side effects, such as `tf.print`, `tf.Variable` and others: >>> v = tf.Variable(1) >>> @tf.function ... def f(x): ... for i in tf.range(x): ... v.assign_add(i) >>> f(3) >>> v Important: Any Python side-effects (appending to a list, printing with `print`, etc) will only happen once, when `func` is traced. To have side-effects executed into your `tf.function` they need to be written as TF ops: >>> l = [] >>> @tf.function ... def f(x): ... for i in x: ... l.append(i + 1) # Caution! Will only happen once when tracing >>> f(tf.constant([1, 2, 3])) >>> l [] Instead, use TensorFlow collections like `tf.TensorArray`: >>> @tf.function ... def f(x): ... ta = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True) ... for i in range(len(x)): ... ta = ta.write(i, x[i] + 1) ... return ta.stack() >>> f(tf.constant([1, 2, 3])) _`tf.function` is polymorphic_ Internally, `tf.function` can build more than one graph, to support arguments with different data types or shapes, since TensorFlow can build more efficient graphs that are specialized on shapes and dtypes. `tf.function` also treats any pure Python value as opaque objects, and builds a separate graph for each set of Python arguments that it encounters. To obtain an individual graph, use the `get_concrete_function` method of the callable created by `tf.function`. It can be called with the same arguments as `func` and returns a special `tf.Graph` object: >>> @tf.function ... def f(x): ... return x + 1 >>> isinstance(f.get_concrete_function(1).graph, tf.Graph) True Caution: Passing python scalars or lists as arguments to `tf.function` will always build a new graph. To avoid this, pass numeric arguments as Tensors whenever possible: >>> @tf.function ... def f(x): ... return tf.abs(x) >>> f1 = f.get_concrete_function(1) >>> f2 = f.get_concrete_function(2) # Slow - builds new graph >>> f1 is f2 False >>> f1 = f.get_concrete_function(tf.constant(1)) >>> f2 = f.get_concrete_function(tf.constant(2)) # Fast - reuses f1 >>> f1 is f2 True Python numerical arguments should only be used when they take few distinct values, such as hyperparameters like the number of layers in a neural network. _Input signatures_ For Tensor arguments, `tf.function` instantiates a separate graph for every unique set of input shapes and datatypes. The example below creates two separate graphs, each specialized to a different shape: >>> @tf.function ... def f(x): ... return x + 1 >>> vector = tf.constant([1.0, 1.0]) >>> matrix = tf.constant([[3.0]]) >>> f.get_concrete_function(vector) is f.get_concrete_function(matrix) False An ""input signature"" can be optionally provided to `tf.function` to control the graphs traced. The input signature specifies the shape and type of each Tensor argument to the function using a `tf.TensorSpec` object. More general shapes can be used. This is useful to avoid creating multiple graphs when Tensors have dynamic shapes. It also restricts the shape and datatype of Tensors that can be used: >>> @tf.function( ... input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)]) ... def f(x): ... return x + 1 >>> vector = tf.constant([1.0, 1.0]) >>> matrix = tf.constant([[3.0]]) >>> f.get_concrete_function(vector) is f.get_concrete_function(matrix) True _Variables may only be created once_ `tf.function` only allows creating new `tf.Variable` objects when it is called for the first time: >>> class MyModule(tf.Module): ... def __init__(self): ... self.v = None ... ... @tf.function ... def __call__(self, x): ... if self.v is None: ... self.v = tf.Variable(tf.ones_like(x)) ... return self.v * x In general, it is recommended to create stateful objects like `tf.Variable` outside of `tf.function` and passing them as arguments. _Using type annotations to improve performance_ 'experimental_follow_type_hints` can be used along with type annotations to improve performance by reducing the number of expensive graph retracings. For example, an argument annotated with `tf.Tensor` is converted to Tensor even when the input is a non-Tensor value. >>> @tf.function(experimental_follow_type_hints=True) ... def f_with_hints(x: tf.Tensor): ... print('Tracing') ... return x >>> @tf.function(experimental_follow_type_hints=False) ... def f_no_hints(x: tf.Tensor): ... print('Tracing') ... return x >>> f_no_hints(1) Tracing >>> f_no_hints(2) Tracing >>> f_with_hints(1) Tracing >>> f_with_hints(2) Args: func: the function to be compiled. If `func` is None, `tf.function` returns a decorator that can be invoked with a single argument - `func`. In other words, `tf.function(input_signature=...)(func)` is equivalent to `tf.function(func, input_signature=...)`. The former can be used as decorator. input_signature: A possibly nested sequence of `tf.TensorSpec` objects specifying the shapes and dtypes of the Tensors that will be supplied to this function. If `None`, a separate function is instantiated for each inferred input signature. If input_signature is specified, every input to `func` must be a `Tensor`, and `func` cannot accept `**kwargs`. autograph: Whether autograph should be applied on `func` before tracing a graph. Data-dependent control flow requires `autograph=True`. For more information, see the [tf.function and AutoGraph guide]( https://www.tensorflow.org/guide/function). experimental_implements: If provided, contains a name of a ""known"" function this implements. For example ""mycompany.my_recurrent_cell"". This is stored as an attribute in inference function, which can then be detected when processing serialized function. See [standardizing composite ops](https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md) # pylint: disable=line-too-long for details. For an example of utilizing this attribute see this [example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc) The code above automatically detects and substitutes function that implements ""embedded_matmul"" and allows TFLite to substitute its own implementations. For instance, a tensorflow user can use this attribute to mark that their function also implements `embedded_matmul` (perhaps more efficiently!) by specifying it using this parameter: `@tf.function(experimental_implements=""embedded_matmul"")` This can either be specified as just the string name of the function or a NameAttrList corresponding to a list of key-value attributes associated with the function name. The name of the function will be in the 'name' field of the NameAttrList. experimental_autograph_options: Optional tuple of `tf.autograph.experimental.Feature` values. experimental_relax_shapes: When True, `tf.function` may generate fewer, graphs that are less specialized on input shapes. experimental_compile: If True, the function is always compiled by [XLA](https://www.tensorflow.org/xla). XLA may be more efficient in some cases (e.g. TPU, XLA_GPU, dense tensor computations). experimental_follow_type_hints: When True, the function may use type annotations from `func` to optimize the tracing performance. For example, arguments annotated with `tf.Tensor` will automatically be converted to a Tensor. Returns: If `func` is not None, returns a callable that will execute the compiled function (and return zero or more `tf.Tensor` objects). If `func` is None, returns a decorator that, when invoked with a single `func` argument, returns a callable equivalent to the case above. Raises: ValueError when attempting to use experimental_compile, but XLA support is not enabled." 3307,undecorated_function,tensorflow/tensorflow/python/eager/def_function_test.py,52,function, 3308,_HasDecoratedMethod,tensorflow/tensorflow/python/eager/def_function_test.py,56,class, 3309,DefFunctionTest,tensorflow/tensorflow/python/eager/def_function_test.py,63,class, 3310,DefFunctionCpuOnlyTest,tensorflow/tensorflow/python/eager/def_function_test_cpu_only.py,29,class,"Test that experimental_compile=True correctly throws an exception if XLA is not available. This test should only be run without `--config=cuda`, as that implicitly links in XLA JIT." 3311,DefFunctionTest,tensorflow/tensorflow/python/eager/def_function_xla_jit_test.py,39,class, 3312,DefFunctionTests,tensorflow/tensorflow/python/eager/def_function_xla_test.py,27,class, 3313,SoftDevicePlacementTest,tensorflow/tensorflow/python/eager/device_placement_test.py,36,class, 3314,HardDevicePlacementTest,tensorflow/tensorflow/python/eager/device_placement_test.py,113,class, 3315,ClusterPlacementTest,tensorflow/tensorflow/python/eager/device_placement_test.py,151,class, 3316,quick_execute,tensorflow/tensorflow/python/eager/execute.py,33,function,"Execute a TensorFlow operation. Args: op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to execute. num_outputs: The number of outputs of the operation to fetch. (Explicitly provided instead of being inferred for performance reasons). inputs: A list of inputs to the operation. Each entry should be a Tensor, or a value which can be passed to the Tensor constructor to create one. attrs: A tuple with alternating string attr names and attr values for this operation. ctx: The value of context.context(). name: Customized name for the operation. Returns: List of output Tensor objects. The list is empty if there are no outputs Raises: An exception on error." 3317,execute_with_cancellation,tensorflow/tensorflow/python/eager/execute.py,80,function,"Execute a TensorFlow operation. Args: op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to execute. num_outputs: The number of outputs of the operation to fetch. (Explicitly provided instead of being inferred for performance reasons). inputs: A list of inputs to the operation. Each entry should be a Tensor, or a value which can be passed to the Tensor constructor to create one. attrs: A tuple with alternating string attr names and attr values for this operation. ctx: The value of context.context(). cancellation_manager: a `CancellationManager` object that can be used to cancel the operation. name: Customized name for the operation. Returns: List of output Tensor objects. The list is empty if there are no outputs Raises: An exception on error." 3318,execute_with_callbacks,tensorflow/tensorflow/python/eager/execute.py,136,function,Monkey-patch to execute to enable execution callbacks. 3319,must_record_gradient,tensorflow/tensorflow/python/eager/execute.py,148,function,Import backprop if you want gradients recorded. 3320,record_gradient,tensorflow/tensorflow/python/eager/execute.py,153,function,Import backprop if you want gradients recorded. 3321,make_float,tensorflow/tensorflow/python/eager/execute.py,159,function, 3322,make_int,tensorflow/tensorflow/python/eager/execute.py,166,function, 3323,make_str,tensorflow/tensorflow/python/eager/execute.py,177,function, 3324,make_bool,tensorflow/tensorflow/python/eager/execute.py,184,function, 3325,make_type,tensorflow/tensorflow/python/eager/execute.py,191,function, 3326,make_shape,tensorflow/tensorflow/python/eager/execute.py,201,function,Convert v into a list. 3327,make_tensor,tensorflow/tensorflow/python/eager/execute.py,223,function,Ensure v is a TensorProto. 3328,args_to_matching_eager,tensorflow/tensorflow/python/eager/execute.py,236,function,Convert sequence `l` to eager same-type Tensors. 3329,convert_to_mixed_eager_tensors,tensorflow/tensorflow/python/eager/execute.py,294,function, 3330,args_to_mixed_eager_tensors,tensorflow/tensorflow/python/eager/execute.py,300,function,Converts a list of same-length lists of values to eager tensors. 3331,Executor,tensorflow/tensorflow/python/eager/executor.py,24,class,"A class for handling eager execution. The default behavior for asynchronous execution is to serialize all ops on a single thread. Having different `Executor` objects in different threads enables executing ops asynchronously in parallel: ```python def thread_function(): executor = executor.Executor(enable_async=True): context.set_executor(executor) a = threading.Thread(target=thread_function) a.start() b = threading.Thread(target=thread_function) b.start() ```" 3332,new_executor,tensorflow/tensorflow/python/eager/executor.py,76,function, 3333,_identity_jvp,tensorflow/tensorflow/python/eager/forwardprop.py,46,function, 3334,_read_variable_jvp,tensorflow/tensorflow/python/eager/forwardprop.py,57,function, 3335,_jvp_helper,tensorflow/tensorflow/python/eager/forwardprop.py,73,function,"Computes a Jacobian-vector product for an op. Note that this function would be wasteful if executed eagerly. It runs the backward gradient function and throws away the result just to record its operations on a GradientTape. These unused ops are pruned away when this function is traced. Args: op_name: A string, the type of operation being executed. attr_tuple: Attributes of the operation. inputs: A flat list of input Tensors to the operation. outputs: A flat list of output Tensors from the operation. tangents: A flat list of Tensors, same shape as `inputs`. Returns: A flat list of tangents corresponding to `outputs`." 3336,_jvp_helper_wrapper,tensorflow/tensorflow/python/eager/forwardprop.py,145,function,"Computes a batch of Jacobian-vector product for an op. Args: op_name: A string, the type of operation being executed. attr_tuple: Attributes of the operation. inputs: A flat list of input Tensors to the operation. outputs: A flat list of output Tensors from the operation. tangents: A flat list of Tensors, compatible with shape `[None] + input_shape`. use_batch: A bool, True to vetorize over batch of tangents of shape `[None] + input_shape`. Returns: A flat list of tangents compatible with `outputs` or `[None] + output_shape`. Raises: ValueError: if tangent shapes are not compatible with input shapes." 3337,_jvp_dispatch,tensorflow/tensorflow/python/eager/forwardprop.py,201,function,Determine which forwardprop function to call. 3338,ForwardAccumulator,tensorflow/tensorflow/python/eager/forwardprop.py,221,class,"Computes Jacobian-vector products (""JVP""s) using forward-mode autodiff. Compare to `tf.GradientTape` which computes vector-Jacobian products (""VJP""s) using reverse-mode autodiff (backprop). Reverse mode is more attractive when computing gradients of a scalar-valued function with respect to many inputs (e.g. a neural network with many parameters and a scalar loss). Forward mode works best on functions with many outputs and few inputs. Since it does not hold on to intermediate activations, it is much more memory efficient than backprop where it is applicable. Consider a simple linear regression: >>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]]) >>> dense = tf.keras.layers.Dense(1) >>> dense.build([None, 2]) >>> with tf.autodiff.ForwardAccumulator( ... primals=dense.kernel, ... tangents=tf.constant([[1.], [0.]])) as acc: ... loss = tf.reduce_sum((dense(x) - tf.constant([1., -1.])) ** 2.) >>> acc.jvp(loss) The example has two variables containing parameters, `dense.kernel` (2 parameters) and `dense.bias` (1 parameter). Considering the training data `x` as a constant, this means the Jacobian matrix for the function mapping from parameters to loss has one row and three columns. With forwardprop, we specify a length-three vector in advance which multiplies the Jacobian. The `primals` constructor argument is the parameter (a `tf.Tensor` or `tf.Variable`) we're specifying a vector for, and the `tangents` argument is the ""vector"" in Jacobian-vector product. If our goal is to compute the entire Jacobian matrix, forwardprop computes one column at a time while backprop computes one row at a time. Since the Jacobian in the linear regression example has only one row, backprop requires fewer invocations: >>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]]) >>> dense = tf.keras.layers.Dense(1) >>> dense.build([None, 2]) >>> loss_fn = lambda: tf.reduce_sum((dense(x) - tf.constant([1., -1.])) ** 2.) >>> kernel_fprop = [] >>> with tf.autodiff.ForwardAccumulator( ... dense.kernel, tf.constant([[1.], [0.]])) as acc: ... kernel_fprop.append(acc.jvp(loss_fn())) >>> with tf.autodiff.ForwardAccumulator( ... dense.kernel, tf.constant([[0.], [1.]])) as acc: ... kernel_fprop.append(acc.jvp(loss_fn())) >>> with tf.autodiff.ForwardAccumulator(dense.bias, tf.constant([1.])) as acc: ... bias_fprop = acc.jvp(loss_fn()) >>> with tf.GradientTape() as tape: ... loss = loss_fn() >>> kernel_grad, bias_grad = tape.gradient(loss, (dense.kernel, dense.bias)) >>> np.testing.assert_allclose( ... kernel_grad, tf.stack(kernel_fprop)[:, tf.newaxis]) >>> np.testing.assert_allclose(bias_grad, bias_fprop[tf.newaxis]) Implicit in the `tape.gradient` call is a length-one vector which left-multiplies the Jacobian, a vector-Jacobian product. `ForwardAccumulator` maintains JVPs corresponding primal tensors it is watching, derived from the original `primals` specified in the constructor. As soon as a primal tensor is deleted, `ForwardAccumulator` deletes the corresponding JVP. `acc.jvp(x)` retrieves `acc`'s JVP corresponding to the primal tensor `x`. It does not perform any computation. `acc.jvp` calls can be repeated as long as `acc` is accessible, whether the context manager is active or not. New JVPs are only computed while the context manager is active. Note that `ForwardAccumulator`s are always applied in the order their context managers were entered, so inner accumulators will not see JVP computation from outer accumulators. Take higher-order JVPs from outer accumulators: >>> primal = tf.constant(1.1) >>> with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as outer: ... with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as inner: ... primal_out = primal ** tf.constant(3.5) >>> inner_jvp = inner.jvp(primal_out) >>> inner_jvp # 3.5 * 1.1 ** 2.5 >>> outer.jvp(inner_jvp) # 3.5 * 2.5 * 1.1 ** 1.5 Reversing the collection in the last line to instead retrieve `inner.jvp(outer.jvp(primal_out))` will not work. Strict nesting also applies to combinations of `ForwardAccumulator` and `tf.GradientTape`. More deeply nested `GradientTape` objects will ignore the products of outer `ForwardAccumulator` objects. This allows (for example) memory-efficient forward-over-backward computation of Hessian-vector products, where the inner `GradientTape` would otherwise hold on to all intermediate JVPs: >>> v = tf.Variable([1., 2.]) >>> with tf.autodiff.ForwardAccumulator( ... v, ... # The ""vector"" in Hessian-vector product. ... tf.constant([1., 0.])) as acc: ... with tf.GradientTape() as tape: ... y = tf.reduce_sum(v ** 3.) ... backward = tape.gradient(y, v) >>> backward # gradient from backprop >>> acc.jvp(backward) # forward-over-backward Hessian-vector product " 3339,_jvp,tensorflow/tensorflow/python/eager/forwardprop_test.py,62,function,Compute the jacobian of `f` at `primals` multiplied by `tangents`. 3340,_jacfwd,tensorflow/tensorflow/python/eager/forwardprop_test.py,70,function,Compute the jacobian of `f` at `primals` using forward-mode autodiff. 3341,_jvp_batch,tensorflow/tensorflow/python/eager/forwardprop_test.py,93,function, 3342,_jvp_batch_matmul,tensorflow/tensorflow/python/eager/forwardprop_test.py,100,function,Compute the jacobian of `f` at `primals` multiplied by `tangents`. 3343,_grad,tensorflow/tensorflow/python/eager/forwardprop_test.py,113,function,Return a function which computes the gradient of `f`. 3344,_gradfwd,tensorflow/tensorflow/python/eager/forwardprop_test.py,128,function,Return a function which computes the gradient of `f` in forward mode. 3345,_hvp,tensorflow/tensorflow/python/eager/forwardprop_test.py,144,function,Compute a forward-over-back Hessian-vector product. 3346,_vectorize_parameters,tensorflow/tensorflow/python/eager/forwardprop_test.py,154,function,"Loop over `params`, providing a one-hot mask to `f` for each." 3347,_forward_over_back_hessian,tensorflow/tensorflow/python/eager/forwardprop_test.py,172,function,"Computes the full Hessian matrix for the scalar-valued f(*params). Args: f: A function taking `params` and returning a scalar. params: A possibly nested structure of tensors. use_pfor: If true, uses `tf.vectorized_map` calls instead of looping. dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes (e.g. `tf.float32`) matching the structure of `f`'s returns. Returns: A possibly nested structure of matrix slices corresponding to `params`. Each slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`) in the corresponding element of `params` and `P` is the total number of parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating along the second axis." 3348,_test_gradients,tensorflow/tensorflow/python/eager/forwardprop_test.py,194,function,"Tests forward/backward jacobians of `f`'s [0, `order`)-order gradients." 3349,ForwardpropTest,tensorflow/tensorflow/python/eager/forwardprop_test.py,223,class, 3350,_has_loop,tensorflow/tensorflow/python/eager/forwardprop_test.py,918,function, 3351,_has_cond,tensorflow/tensorflow/python/eager/forwardprop_test.py,926,function, 3352,_fprop_while,tensorflow/tensorflow/python/eager/forwardprop_test.py,935,function, 3353,_fprop_cond,tensorflow/tensorflow/python/eager/forwardprop_test.py,944,function, 3354,ControlFlowTests,tensorflow/tensorflow/python/eager/forwardprop_test.py,953,class, 3355,HessianTests,tensorflow/tensorflow/python/eager/forwardprop_test.py,984,class, 3356,JacobianTests,tensorflow/tensorflow/python/eager/forwardprop_test.py,1012,class, 3357,TangentInfo,tensorflow/tensorflow/python/eager/forwardprop_util.py,30,class,Packed forward accumulator state. The return value of `pack_tangents`. 3358,pack_tangents,tensorflow/tensorflow/python/eager/forwardprop_util.py,42,function,"Packs forward accumulator state into a TangentInfo tuple. Args: tensors: A flat list of Tensors to pack forward accumulator state for. Returns: A tuple of (indices, tangents): indices: A sequence of sequences of two-element tuples. Each forward accumulator is represented as a sequence of tuples with (primal_index, jvp_index). Both integers index into the concatenated `tensors + jvps` array. tangents: A flat list of Tensors. Best interpreted as a sequence to be appended to `tensors`." 3359,push_forwardprop_state,tensorflow/tensorflow/python/eager/forwardprop_util.py,61,function,"Temporarily push or pop transient state for accumulators in the active set. Allows an accumulator which is currently processing an operation to temporarily reset its state. This is useful when building forwardprop versions of functions, where an accumulator will trigger function building and then must process captured symbolic tensors while building it. Without pushing and popping, accumulators ignore operations executed as a direct result of their own jvp computations. Yields: None (used for its side effect)." 3360,_make_input_signature_hashable,tensorflow/tensorflow/python/eager/function.py,97,function,"Rewrite input signature to be hashable. We replace nested variables in the input signature with TensorSpec in order to be hashable. Args: elem: Input signature element Returns: A hashable object for the requested input signature" 3361,_type_spec_for,tensorflow/tensorflow/python/eager/function.py,160,function,"Returns a TypeSpec for `x`, or `None` if `x` doesn't have a TensorSpec." 3362,_is_type_subset,tensorflow/tensorflow/python/eager/function.py,172,function,Returns true if TypeSpec `b` is a subset of type `a` (or if a is None.) 3363,_shape_relaxed_type_for_composite_tensor,tensorflow/tensorflow/python/eager/function.py,180,function,Returns a shape-relaxed TypeSpec for x (if composite) or x (if not). 3364,common_shape,tensorflow/tensorflow/python/eager/function.py,189,function,Find a `TensorShape` that is compatible with both `x` and `y`. 3365,is_same_structure,tensorflow/tensorflow/python/eager/function.py,214,function,"Check two structures for equality, optionally of types and of values." 3366,_parse_func_attrs,tensorflow/tensorflow/python/eager/function.py,232,function,"Convert the keyword arguments into function_def attributes. Currently only support primitive types: bool, int, float and string. Args: attributes: the dictionary of attributes. Returns: A dict of attributes where the key is the name of attribute and the value is the AttrValue proto. Raises: ValueError: If the kwargs contains unallowlisted name or unsupported value types." 3367,_InterpolateFunctionError,tensorflow/tensorflow/python/eager/function.py,265,class,Context Manager that interpolates the exception from 'top_level_func'. 3368,add_function_callback,tensorflow/tensorflow/python/eager/function.py,309,function,"Add a callback function for Function creation. The callback function has the signature: `def function_callback(function):` wherein `function` is the just-created _EagerDefinedFunction. The callback is invoked immediately after a new `_EagerDefinedFunction` is created. The return value(s) of the callback function (if any) is ignored. Repeated registration of the same callback function is idempotent. After a callback is added, it can be removed with the `remove_function_callback()` method. Args: function_callback: The callback to add." 3369,remove_function_callback,tensorflow/tensorflow/python/eager/function.py,330,function,"Remove an already-added function callback. See the doc string of `add_function_callback()` for more information. Args: function_callback: The callback to remove." 3370,clear_function_callbacks,tensorflow/tensorflow/python/eager/function.py,341,function,"Clear all function callbacks, if any have been regisered." 3371,_forward_name,tensorflow/tensorflow/python/eager/function.py,351,function,The name of a generated forward defun named n. 3372,_backward_name,tensorflow/tensorflow/python/eager/function.py,356,function,The name of a generated backward defun named n. 3373,_inference_name,tensorflow/tensorflow/python/eager/function.py,361,function,The name of a forward-but-no-gradient defun named n. 3374,_enclosing_xla_context,tensorflow/tensorflow/python/eager/function.py,366,function,"Returns the XLAControlFlowContext, which exists inside a tpu.rewrite()." 3375,_EagerDefinedFunctionDeleter,tensorflow/tensorflow/python/eager/function.py,383,class,Unregister function from eager context. 3376,_EagerDefinedFunction,tensorflow/tensorflow/python/eager/function.py,410,class,"Callable with the interface of `framework.function._DefinedFunction`. `_EagerDefinedFunction` encapsulates a function definition and its properties, and it provides a method for calling the encapsulated function. Some Ops take functions as attributes, which have type `func`; an instance of this class may be provided as the value of these `func` attributes." 3377,_DelayedRewriteGradientFunctions,tensorflow/tensorflow/python/eager/function.py,601,class,Caches forward/backward functions with a delayed forward rewrite. 3378,_TapeGradientFunctions,tensorflow/tensorflow/python/eager/function.py,843,class,"Caches forward and backward functions compatible with eager gradients. In contrast to the delayed-rewrite approach in `_DelayedRewriteGradientFunctions` which only works with delayed execution, the forward function generated by this class has a fixed set of outputs which may be preserved by a tape in order to compute gradients later. This class is abstract; its child classes differ in how many side outputs of the forward function their backward function accepts gradients for, which determines whether higher-order tape gradients are possible." 3379,_FirstOrderTapeGradientFunctions,tensorflow/tensorflow/python/eager/function.py,1309,class,Caches tape-friendly functions for first-order gradients. 3380,_HigherOrderTapeGradientFunctions,tensorflow/tensorflow/python/eager/function.py,1351,class,Caches tape-friendly functions for higher-order gradients. 3381,_ForwardBackwardCall,tensorflow/tensorflow/python/eager/function.py,1417,class,Holds the state of a function call between execution and recording. 3382,ConcreteFunction,tensorflow/tensorflow/python/eager/function.py,1464,class,"Callable object encapsulating a function definition and its gradient. `ConcreteFunction` is a callable that encapsulates a function definition and is differentiable under `tf.GradientTape` objects." 3383,_deterministic_dict_values,tensorflow/tensorflow/python/eager/function.py,2319,function, 3384,FunctionSpec,tensorflow/tensorflow/python/eager/function.py,2323,class,Specification of how to bind arguments to a function. 3385,_as_ndarray,tensorflow/tensorflow/python/eager/function.py,2704,function,"Converts value to an ndarray, assumes _is_ndarray(value)." 3386,_is_ndarray,tensorflow/tensorflow/python/eager/function.py,2710,function,Tests whether the given value is an ndarray (and not a TF tensor/var). 3387,_convert_numpy_inputs,tensorflow/tensorflow/python/eager/function.py,2724,function,Convert numpy array inputs to tensors. 3388,_convert_inputs_to_signature,tensorflow/tensorflow/python/eager/function.py,2750,function,Convert inputs to pass into a function with an explicit signature. 3389,FunctionCache,tensorflow/tensorflow/python/eager/function.py,2802,class,"A lightweight container for cached functions. " 3390,Function,tensorflow/tensorflow/python/eager/function.py,2836,class,"Wrapper class for the graph functions defined for a Python function. See the documentation for `defun` for more information on the semantics of defined functions. `Function` class is thread-compatible meaning that minimal usage of defuns (defining and calling) is thread-safe, but if users call other methods or invoke the base `python_function` themselves, external synchronization is necessary. In addition, Function is not reentrant, so recursive functions need to call the wrapped function, not the wrapper." 3391,register,tensorflow/tensorflow/python/eager/function.py,3328,function,"Register a specialization of a `Function` into the graph. This won't actually call the function with the inputs, and only put the function definition into graph. Register function with different input param will result into multiple version of functions registered in graph. Args: func: the `Function` instance that generated by a @defun *args: input arguments for the Python function. **kwargs: input keyword arguments for the Python function. Returns: a `ConcreteFunction` object specialized to inputs and execution context. Raises: ValueError: When the input function is not a defun wrapped python function." 3392,validate_signature,tensorflow/tensorflow/python/eager/function.py,3355,function, 3393,defun,tensorflow/tensorflow/python/eager/function.py,3363,function,"Compiles a Python function into a callable TensorFlow graph. `defun` (short for ""define function"") compiles a Python function composed of TensorFlow operations into a callable that executes a `tf.Graph` containing those operations. The callable produced by `defun` contains only the subgraph of TensorFlow operations that were executed when the Python function was called with a particular input signature, defined as a list of the shapes and dtypes of the Python function's Tensor-valued arguments and the values of its non-Tensor Python objects. When eager execution is enabled, the ability to create graphs from Python functions makes it possible to incrementally trade off debuggability and interactivity for performance. Functions compiled with `defun` cannot be inspected with `pdb`; however, executing a graph generated by `defun` sometimes takes less time and memory than eagerly executing the corresponding Python function, since specifying computations as graphs allows for optimizations like automatic buffer reuse and parallelization among ops. Note that executing a `defun`-compiled function incurs a small constant overhead, so eagerly executing sufficiently small Python functions might take less time than executing their corresponding `defun`-generated graphs. For a Python function to be compatible with `defun`, all of its arguments must be hashable Python objects or lists thereof. The function itself may not modify the list/map structure of its arguments. Additionally, it must return zero or more `tf.Tensor` objects. If the Python function returns a `tf.Variable`, its compiled version will return the value of that variable as a `tf.Tensor`. Executing a graph generated by `defun` respects device annotations (i.e., all `with tf.device` directives present in a Python function will also be present in its corresponding graph), but it is not yet possible to execute the generated graphs across multiple machines. _Example Usage_ ```python import tensorflow as tf tf.compat.v1.enable_eager_execution() # A simple example. def f(x, y): return tf.reduce_mean(tf.multiply(x ** 2, 3) + y) g = tf.contrib.eager.defun(f) x = tf.constant([[2.0, 3.0]]) y = tf.constant([[3.0, -2.0]]) # `f` and `g` will return the same value, but `g` will be executed as a # TensorFlow graph. assert f(x, y).numpy() == g(x, y).numpy() # `defun` is capable of compiling Python functions that close over Python # objects, including Tensors and Variables. @tf.contrib.eager.defun def h(): return f(x, y) assert (h().numpy() == f(x, y).numpy()).all() # `defun` automatically lifts variables out of the graphs it creates, # allowing you to compile the `call` methods of `tf.keras.layers.Layer` and # `tf.keras.Model` objects. class MyModel(tf.keras.Model): def __init__(self, keep_probability=0.2): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.keep_probability = keep_probability @tf.contrib.eager.defun def call(self, inputs, training=True): x = self.dense2(self.dense1(inputs)) if training: return tf.nn.dropout(x, self.keep_probability) else: return x model = MyModel() model(x, training=True) # executes a graph, with dropout model(x, training=False) # executes a graph, without dropout # `defun`-compiled functions are differentiable. optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01) with tf.GradientTape() as tape: outputs = model(x) gradient = tape.gradient(outputs, model.trainable_variables) optimizer.apply_gradients((grad, var) for grad, var in zip(gradient, model.trainable_variables)) ``` When using `defun`, there are subtleties regarding inputs, Python control flow, and variable creation that one should be aware of. For concreteness, let `f` be a Python function that returns zero or more `tf.Tensor` objects and let `F = defun(f)`. `F` builds a graph for each unique input signature it sees, Python control flow is baked into graphs, and operations related to variable initialization are automatically lifted out of the graphs that `F` generates and placed in the eager context if executing eagerly or into an outer graph otherwise. _Input Signatures_ By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph for every unique sequence of the shapes and dtypes of Tensor arguments and the values of Python objects it is invoked with. For example, calling `F(tf.random.uniform([2])` will execute a different graph than `F(tf.random.uniform([3])` because the two inputs have different shapes. The first time that `F(*args, **kwargs)` is called with a particular sequence of Tensor shapes and dtypes and Python values, it constructs a graph by tracing the execution of `f(*args, **kwargs)`; this graph is bound to an input signature inferred from `(*args, **kwargs)` and cached for future reuse. NumPy arrays passed as inputs to `F` are converted to `tf.Tensor` objects before being passed to `f`, and are treated as Tensors for caching. This allows a function to be called multiple times with NumPy arrays having different values but the same shape and dtype without re-tracing each time. `tf.contrib.eager.defun` caches graphs for your convenience, letting you define TensorFlow functions without explicitly specifying their signatures. However, this policy is conservative and potentially expensive; for example, when different invocations of your function have differently-shaped Tensor inputs, this policy might generate more graph functions than necessary. To eliminate such costs, `tf.contrib.eager.defun` allows you to supply an optional `input_signature` argument specifying the shapes and dtypes of the inputs. In particular, the shapes may be partially unspecified, with `None`s in the unknown dimensions. When an input signature is provided, `tf.contrib.eager.defun` will only instantiate a single graph for the decorated Python function. The following is an example: ```python import tensorflow as tf # The first `TensorSpec` below describes the shape and dtype of `words`, # and the second describes the shape and dtype of `another_tensor`. Note that # the last dimension of the `words` `TensorSpec` is left unspecified. @tf.contrib.eager.defun(input_signature=[ tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32), tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32) ]) def my_sequence_model(words, another_tensor): ... # Note how the third dimension of the first input can vary freely. words = tf.random.uniform(([50, 300, 10]) second_input = tf.random.uniform([300, 100]) my_sequence_model(words, second_input) words = tf.random.uniform(([50, 300, 20]) my_sequence_model(words, second_input) # Passing an input with an incompatible shape will raise an error. words = tf.random.uniform(([50, 100, 20]) my_sequence_model(words, second_input) # <---- This will raise an error. ``` Python functions that are compiled with an `input_signature` must only accept Tensors as arguments and must not take unnamed keyword arguments (**kwargs). _Tracing_ Be aware that because `F` only logs TensorFlow operations, all the other Python code that `f` executes will only shape the _construction_ of the graphs that `F` executes: the Python code won't be executed when the graphs themselves are executed, though it will be executed every time the Python function is traced (and a given Python function might be traced multiple times, once for each input signature it is invoked with). For example, whereas the Python function ```python import tensorflow as tf import numpy as np tf.compat.v1.enable_eager_execution() def add_noise(): return tf.eye(5) + np.random.randn(5, 5) ``` will return a different output everytime it is invoked, the compiled function `compiled = tf.contrib.eager.defun(add_noise)` will return the same value every time it is called, since a particular random offset generated by NumPy will be inserted into the graph as a TensorFlow constant. The solution is to replace the call to `np.random.randn` with `tf.random.normal((5, 5))`. _Python Side-Effects_ A corollary of the previous discussion on tracing is the following: If a Python function `f` has Python side-effects, then executing `f` multiple times will not necessarily be semantically equivalent to executing `F = tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact that `defun` only captures the subgraph of TensorFlow operations that is constructed when `f` is called in a graph-building context. _Python Control Flow_ The structure of many machine learning computations depend upon whether one is training or validating, and it is common to nest specialized logic under `if training:` blocks. By mapping each input signature to a unique graph, `defun` lets users transparently compile such code, as the following code snippet demonstrates: ```python import tensorflow as tf tf.compat.v1.enable_eager_execution() @tf.contrib.eager.defun def lossy_matmul(W, x, training=True): outputs = tf.matmul(W, x) if training: outputs = tf.nn.dropout(outputs, keep_probability=0.2) return outputs W = tf.random.normal((3, 5)) x = tf.random.normal((5, 1)) # Executes a graph that applies dropout. lossy_outputs = lossy_matmul(W, x, training=True) # Executes a graph that does not apply dropout. exact_outputs = lossy_matmul(W, x, training=False) ``` _TensorFlow Control Flow_ When `autograph` is `True`, data-dependent control flow is allowed as well. Control flow statements that depend on `Tensor` values are staged into corresponding TensorFlow ops. For example, the following code will work as expected: ```python @tf.contrib.eager.defun def dynamic_rnn_loop(cell, seq): state, output = cell.zero_state() for input in seq: state, output = cell(input, state) return output ``` For more information see `tf.autograph`. _Variables_ TensorFlow operations related to variable creation and initialization are automatically lifted out of the graphs generated by `defun`. In practice, this implies that variable creation and initialization only happen the first time `F` is called, and that variables are reused every time thereafter. Many TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the first time they are called and reuse them thereafter. Automatic variable lifting makes it possible to compile these APIs without extra effort, at the cost of introducing a discrepancy between the semantics of executing Python functions and their corresponding compiled functions. For example: ```python import tensorflow as tf tf.compat.v1.enable_eager_execution() def fn(): x = tf.Variable(0.0) x.assign_add(1.0) return x.read_value() # `fn` is a Python function, so x is created, initialized, and destroyed upon # every invocation assert fn().numpy() == fn().numpy() == 1.0 compiled = tf.contrib.eager.defun(fn) # Compiling `fn` with `defun` hoists all variables outside of the generated # graph, so initialization happens exactly once. assert compiled().numpy() == 1.0 assert compiled().numpy() == 2.0 ``` Finally, because each input signature is bound to a unique graph, if your Python function constructs `tf.Variable` objects, then each graph constructed for that Python function will reference a unique set of variables. To circumvent this problem, we recommend against compiling Python functions that create `tf.Variable` objects. Instead, Python functions should either lexically close over `tf.Variable` objects or accept them as arguments, preferably encapsulated in an object-oriented container. If you must create variables inside your Python function and you want each graph generated for it to reference the same set of variables, add logic to your Python function that ensures that variables are only created the first time it is called and are reused for every subsequent invocation; note that this is precisely what `tf.keras.layers.Layer` objects do, so we recommend using them to represent variable-bearing computations whenever possible. Args: func: function to be compiled. If `func` is None, returns a decorator that can be invoked with a single argument - `func`. The end result is equivalent to providing all the arguments up front. In other words, defun(input_signature=...)(func) is equivalent to defun(func, input_signature=...). The former allows the following use case: @tf.contrib.eager.defun(input_signature=...) def foo(...): ... input_signature: A possibly nested sequence of `tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of the Tensors that will be supplied to this function. If `None`, a separate function is instantiated for each inferred input signature. If a signature is specified, every input to `func` must be a `Tensor`, and `func` cannot accept `**kwargs`. autograph: Whether `func` should be compiled before constructing the graph. See https://www.tensorflow.org/guide/autograph for more information. experimental_autograph_options: Experimental knobs (in the form of a tuple of tensorflow.autograph.Feature values) to control behavior when autograph=True. experimental_relax_shapes: When true, argument shapes may be relaxed to avoid unnecessary retracing. Returns: If `func` is not None, returns a callable that will execute the compiled function (and return zero or more `tf.Tensor` objects). If `func` is None, returns a decorator that, when invoked with a single `func` argument, returns a callable equivalent to the case above. Raises: TypeError: If `input_signature` is neither `None` nor a sequence of `tf.contrib.eager.TensorSpec` objects." 3394,defun_with_attributes,tensorflow/tensorflow/python/eager/function.py,3705,function,"Compiles a Python function into a callable TensorFlow graph. This function supports adding extra function attributes. See detailed documentation in defun(). Currently this is not exposed in public API since we don't expect user to directly use attributes, and attribute won't work by itself. This assumption might change in future. Args: func: function to be compiled. input_signature: same as defun()'s input_signature. attributes: A dictionary of arguments which will be added to function def as attributes. Currently only support primitive types as value, and only allowlisted attribute name is allowed. Unallowlisted attribute name or unsupported value will result into ValueError. `func_name` is also one of the allowlisted argument which is a python string, and sets the name for this `ConcreteFunction` in the graph. autograph: same as defun()'s autograph. experimental_autograph_options: same as defun()'s experimental_autograph_options. experimental_compile: same as defun()'s experimental_compile. experimental_relax_shapes: same as defun()'s experimental_relax_shapes experimental_follow_type_hints: see `tf.function`. Returns: Same as the return value of defun, with attributes added to the function in graph." 3395,TfMethodTarget,tensorflow/tensorflow/python/eager/function.py,3784,class,Binding target for methods replaced by function and defun. 3396,class_method_to_instance_method,tensorflow/tensorflow/python/eager/function.py,3813,function,Constructs a new `Function` with `self` bound. 3397,_FunctionGarbageCollector,tensorflow/tensorflow/python/eager/function.py,3870,class,Cleans up cycles when a defun goes out of scope. 3398,ConcreteFunctionGarbageCollector,tensorflow/tensorflow/python/eager/function.py,3889,class,Cleans up reference cycles when a `ConcreteFunction` goes out of scope. 3399,_Marker,tensorflow/tensorflow/python/eager/function.py,3910,class,Markers used to pretty-print nested args in function signatures. 3400,_structure_summary,tensorflow/tensorflow/python/eager/function.py,3922,function,Displays a summary of the nesting structure of the given value. 3401,_contains_type_spec,tensorflow/tensorflow/python/eager/function.py,3935,function, 3402,ArgumentNamingTests,tensorflow/tensorflow/python/eager/function_argument_naming_test.py,38,class,Tests for recognizable export signatures from concrete functions. 3403,DefunCollectionTest,tensorflow/tensorflow/python/eager/function_defun_collection_test.py,33,class, 3404,FunctionGradientsTest,tensorflow/tensorflow/python/eager/function_gradients_test.py,52,class, 3405,total_function_cache,tensorflow/tensorflow/python/eager/function_test.py,93,function, 3406,_example_indexed_slices_with_dense_shape,tensorflow/tensorflow/python/eager/function_test.py,100,function, 3407,_example_indexed_slices_without_dense_shape,tensorflow/tensorflow/python/eager/function_test.py,106,function, 3408,_spec_for_value,tensorflow/tensorflow/python/eager/function_test.py,111,function,Returns the (nested) TypeSpec for a value. 3409,FunctionTest,tensorflow/tensorflow/python/eager/function_test.py,121,class, 3410,MultiDeviceTest,tensorflow/tensorflow/python/eager/function_test.py,4224,class, 3411,_SubscriptUseTracker,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions.py,110,class,"Track uses of composite names, excluding certain names when subscripted." 3412,_FunctionCallsTracker,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions.py,148,class,Tracks any function calls made with a given first argument name. 3413,_live_tensors,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions.py,183,function,"Returns the indices of the used inputs. Note: This currently only handles direct index accesses e.g. op.inputs[1]. If the function has slicing or list comprehension on attr_name then returns _ALL. This ensure that this is correct even if inefficient. Args: f: A grad function, taking the op as first argument. attr_name: op attr to track. ""inputs"" or ""outputs"". Returns: Either one of: * set of integers representing individual indices of inputs used * the value _ALL, if indices are used but cannot be determined which * empty set, if no inputs are used" 3414,_get_num_inputs_outputs,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions.py,264,function,"Returns (num_inputs, num_outputs). Args: op_type: String. The type of the Operation. Used to lookup the op in the registry. Returns: (num_inputs, num_outputs), for either num_inputs or num_outputs if the value can't be statically inferred from the OpDef alone or of the OpDef lookup fails, -1 is returned." 3415,get_entries,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions.py,293,function,"Returns the dict of entries. Each entry is of the form {op_name, {true|false, indices}} true: All values are unused. false: `indices` are the only unused indices. Note: ops for which all values are used are not printed. Args: attr_name: inputs or outputs. Returns: A dict from op_type to formatted entry in the dict." 3416,get_function,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions.py,338,function,Generates lookup function with given name and lookup table entries. 3417,get_contents,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions.py,363,function,Returns contents for the generated file. 3418,main,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions.py,374,function, 3419,GradientInputOutputExclusionsTest,tensorflow/tensorflow/python/eager/gradient_input_output_exclusions_test.py,29,class, 3420,graph_placeholder,tensorflow/tensorflow/python/eager/graph_only_ops.py,29,function,"Graph-only version of tf.compat.v1.placeholder(), for internal use only." 3421,GraphOnlyOpsTest,tensorflow/tensorflow/python/eager/graph_only_ops_test.py,30,class, 3422,imperative_grad,tensorflow/tensorflow/python/eager/imperative_grad.py,33,function,"Computes gradients from the imperatively defined tape on top of the stack. Works by filtering the tape, computing how many downstream usages are of each tensor and entry, and repeatedly applying backward functions until we have gradients for all sources. Args: tape: the gradient tape which stores the trace. target: either a Tensor or list of Tensors to be differentiated. sources: list of Tensors for which we want gradients output_gradients: if not None, a list of gradient provided for each Target, or None if we are to use the target's computed downstream gradient. sources_raw: if not None, a list of the source python objects from which the sources were generated. Should have the same length as sources. Only needs to be populated if unconnected_gradients is 'zero'. unconnected_gradients: determines the value returned if the target and sources are unconnected. When 'none' the value returned is None wheras when 'zero' a zero tensor in the same shape as the sources is returned. Returns: the gradient wrt each of the sources. Raises: ValueError: if the arguments are invalid. RuntimeError: if something goes wrong." 3423,_as_operation,tensorflow/tensorflow/python/eager/lift_to_graph.py,36,function, 3424,_constant_inputs,tensorflow/tensorflow/python/eager/lift_to_graph.py,42,function, 3425,_copy_non_source,tensorflow/tensorflow/python/eager/lift_to_graph.py,62,function,"Copy an op directly to a given graph. Generally `op`'s inputs should already have been copied. If this is not the case, for example with v1 while_loops, then `_copy_non_source` inserts placeholders for the unavailable Tensors and returns a list of required mutations. Args: op: The op to be copied. graph: The destination graph. op_map: A dict mapping ops and tensors in the old graph to the new one. base_graph: The graph we're copying from, for any necessary functions. Returns: A tuple of (required_inputs, required_control_inputs): required_inputs: A list of `_InputMutation` tuples containing inputs to `copied_op` which must be updated once `old_graph_tensor` has been copied. required_control_inputs: A list of `_ControlMutation` tuples containing control inputs to `copied_op` which must be added once `old_graph_op` has been copied." 3426,_copy_source,tensorflow/tensorflow/python/eager/lift_to_graph.py,145,function,"Create a source in a graph based on a Tensor from a different graph. This function creates a placeholder analog of `s` in a graph with the following behavior: 1) If s is a captured Tensor or Variable and handle_captures is set to True, simply capture it in the new graph as well. 2) If s is a PlaceholderWithDefault whose default is a constant, preserve said default in the new graph. 3) When applicable, copy resource variable metadata from `s` to the newly created placeholder. Args: s: The source of interest. graph: The destination graph. op_map: A dict mapping ops and tensors in the old graph to the new one. handle_captures: A boolean indicating whether to re-capture s in the new graph or simply create a vanilla placeholder. inverse_captures: A dict mapping s back to the Tensor or Variable that it captures. base_graph: The graph being copied from." 3427,lift_to_graph,tensorflow/tensorflow/python/eager/lift_to_graph.py,205,function,"Copies the tensor and all its inputs recursively to the outer graph. Args: tensors: The Tensors to lift. graph: The graph to lift to. sources: Optional sequence of nodes to start from. If omitted the whole subgraph which feeds into `init_tensor` is lifted. disallowed_placeholders: An optional set of ops which may not appear in the lifted graph. Defaults to all placeholders. add_sources: A boolean indicating whether placeholders which are not in sources should be allowed. handle_captures: A boolean indicating whether to re-capture s in the new graph or simply create a vanilla placeholder. base_graph: The graph from which to lift ops. This will be inferred if not specified. op_map: A map contains all the existing nodes that have been lifted to the destination graph, so they won't be lifted and copied again. Returns: A mapping from ops in the current default graph to ops in `graph`. Raises: UnliftableError: If a placeholder blocks lifting." 3428,LiftToGraphTest,tensorflow/tensorflow/python/eager/lift_to_graph_test.py,32,class, 3429,Metric,tensorflow/tensorflow/python/eager/monitoring.py,104,class,The base class of metric. 3430,CounterCell,tensorflow/tensorflow/python/eager/monitoring.py,147,class,CounterCell stores each value of a Counter. 3431,Counter,tensorflow/tensorflow/python/eager/monitoring.py,173,class,"A stateful class for updating a cumulative integer metric. This class encapsulates a set of values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to increment each value." 3432,IntGaugeCell,tensorflow/tensorflow/python/eager/monitoring.py,199,class,A single integer value stored in an `IntGauge`. 3433,IntGauge,tensorflow/tensorflow/python/eager/monitoring.py,225,class,"A stateful class for updating a gauge-like integer metric. This class encapsulates a set of integer values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value." 3434,StringGaugeCell,tensorflow/tensorflow/python/eager/monitoring.py,251,class,A single string value stored in an `StringGauge`. 3435,StringGauge,tensorflow/tensorflow/python/eager/monitoring.py,280,class,"A stateful class for updating a gauge-like string metric. This class encapsulates a set of string values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value." 3436,BoolGaugeCell,tensorflow/tensorflow/python/eager/monitoring.py,306,class,A single boolean value stored in an `BoolGauge`. 3437,BoolGauge,tensorflow/tensorflow/python/eager/monitoring.py,332,class,"A stateful class for updating a gauge-like bool metric. This class encapsulates a set of boolean values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value." 3438,SamplerCell,tensorflow/tensorflow/python/eager/monitoring.py,358,class,SamplerCell stores each value of a Sampler. 3439,Buckets,tensorflow/tensorflow/python/eager/monitoring.py,393,class,Bucketing strategies for the samplers. 3440,ExponentialBuckets,tensorflow/tensorflow/python/eager/monitoring.py,410,class,"Exponential bucketing strategy. Sets up buckets of the form: [-DBL_MAX, ..., scale * growth^i, scale * growth_factor^(i + 1), ..., DBL_MAX]." 3441,Sampler,tensorflow/tensorflow/python/eager/monitoring.py,433,class,"A stateful class for updating a cumulative histogram metric. This class encapsulates a set of histograms (or a single histogram for a label-less metric) configured with a list of increasing bucket boundaries. Each histogram is identified by a tuple of labels. The class allows the user to add a sample to each histogram value." 3442,MonitoredTimer,tensorflow/tensorflow/python/eager/monitoring.py,461,class,A context manager to measure the walltime and increment a Counter cell. 3443,monitored_timer,tensorflow/tensorflow/python/eager/monitoring.py,484,function,"A function decorator for adding MonitoredTimer support. Arguments: cell: the cell associated with the time metric that will be inremented. Returns: A decorator that measure the function runtime and increment the specified counter cell." 3444,MonitoringTest,tensorflow/tensorflow/python/eager/monitoring_test.py,29,class, 3445,OpsTest,tensorflow/tensorflow/python/eager/ops_test.py,46,class, 3446,ProfilerAlreadyRunningError,tensorflow/tensorflow/python/eager/profiler.py,58,class, 3447,ProfilerNotRunningError,tensorflow/tensorflow/python/eager/profiler.py,62,class, 3448,start,tensorflow/tensorflow/python/eager/profiler.py,67,function,"Start profiling. Raises: ProfilerAlreadyRunningError: If another profiling session is running." 3449,stop,tensorflow/tensorflow/python/eager/profiler.py,90,function,"Stop current profiling session and return its result. Returns: A binary string of tensorflow.tpu.Trace. User can write the string to file for offline analysis by tensorboard. Raises: ProfilerNotRunningError: If there is no active profiling session." 3450,maybe_create_event_file,tensorflow/tensorflow/python/eager/profiler.py,118,function,"Create an empty event file if not already exists. This event file indicates that we have a plugins/profile/ directory in the current logdir. Args: logdir: log directory." 3451,save,tensorflow/tensorflow/python/eager/profiler.py,140,function,"Save profile result to TensorBoard logdir. Args: logdir: log directory read by TensorBoard. result: profiling result returned by stop()." 3452,start_profiler_server,tensorflow/tensorflow/python/eager/profiler.py,157,function,"Start a profiler grpc server that listens to given port. The profiler server will keep the program running even the training finishes. Please shutdown the server with CTRL-C. It can be used in both eager mode and graph mode. The service defined in tensorflow/core/profiler/profiler_service.proto. Please use tensorflow/contrib/tpu/profiler/capture_tpu_profile to capture tracable file following https://cloud.google.com/tpu/docs/cloud-tpu-tools#capture_trace Args: port: port profiler server listens to." 3453,Profiler,tensorflow/tensorflow/python/eager/profiler.py,176,class,"Context-manager eager profiler api. Example usage: ```python with Profiler(""/path/to/logdir""): # do some work ```" 3454,start_tracing,tensorflow/tensorflow/python/eager/profiler_client.py,26,function,"Sends grpc requests to profiler server to perform on-demand profiling. This method will block caller thread until receives tracing result. Args: service_addr: Address of profiler service e.g. localhost:6009. logdir: Path of TensorBoard log directory e.g. /tmp/tb_log. duration_ms: Duration of tracing or monitoring in ms. worker_list: The list of worker TPUs that we are about to profile in the current session. (TPU only) include_dataset_ops: Set to false to profile longer traces. num_tracing_attempts: Automatically retry N times when no trace event is collected. Raises: UnavailableError: If no trace event is collected." 3455,monitor,tensorflow/tensorflow/python/eager/profiler_client.py,54,function,"Sends grpc requests to profiler server to perform on-demand monitoring. This method will block caller thread until receives monitoring result. Args: service_addr: Address of profiler service e.g. localhost:6009. duration_ms: Duration of tracing or monitoring in ms. monitoring_level: Choose a monitoring level between 1 and 2 to monitor your job. Level 2 is more verbose than level 1 and shows more metrics. display_timestamp: Set to true to display timestamp in monitoring result. Returns: A string of monitoring output." 3456,ProfilerClientTest,tensorflow/tensorflow/python/eager/profiler_client_test.py,27,class, 3457,ProfilerTest,tensorflow/tensorflow/python/eager/profiler_test.py,33,class, 3458,Tests,tensorflow/tensorflow/python/eager/pywrap_tfe_test.py,44,class, 3459,connect_to_remote_host,tensorflow/tensorflow/python/eager/remote.py,42,function,"Connects to a single machine to enable remote execution on it. Will make devices on the remote host available to use. Note that calling this more than once will work, but will invalidate any tensor handles on the old remote devices. Using the default job_name of worker, you can schedule ops to run remotely as follows: ```python # When eager execution is enabled, connect to the remote host. tf.config.experimental_connect_to_host(""exampleaddr.com:9876"") with ops.device(""job:worker/replica:0/task:1/device:CPU:0""): # The following tensors should be resident on the remote device, and the op # will also execute remotely. x1 = array_ops.ones([2, 2]) x2 = array_ops.ones([2, 2]) y = math_ops.matmul(x1, x2) ``` Args: remote_host: a single or a list the remote server addr in host-port format. job_name: The job name under which the new server will be accessible. Raises: ValueError: if remote_host is None." 3460,connect_to_cluster,tensorflow/tensorflow/python/eager/remote.py,81,function,"Connects to the given cluster. Will make devices on the cluster available to use. Note that calling this more than once will work, but will invalidate any tensor handles on the old remote devices. If the given local job name is not present in the cluster specification, it will be automatically added, using an unused port on the localhost. Device filters can be specified to isolate groups of remote tasks to avoid undesired accesses between workers. Workers accessing resources or launching ops / functions on filtered remote devices will result in errors (unknown devices). For any remote task, if no device filter is present, all cluster devices will be visible; if any device filter is specified, it can only see devices matching at least one filter. Devices on the task itself are always visible. Device filters can be particially specified. For example, for a cluster set up for parameter server training, the following device filters might be specified: ```python cdf = tf.config.experimental.ClusterDeviceFilters() # For any worker, only the devices on PS nodes and itself are visible for i in range(num_workers): cdf.set_device_filters('worker', i, ['/job:ps']) # Similarly for any ps, only the devices on workers and itself are visible for i in range(num_ps): cdf.set_device_filters('ps', i, ['/job:worker']) tf.config.experimental_connect_to_cluster(cluster_def, cluster_device_filters=cdf) ``` Args: cluster_spec_or_resolver: A `ClusterSpec` or `ClusterResolver` describing the cluster. job_name: The name of the local job. task_index: The local task index. protocol: The communication protocol, such as `""grpc""`. If unspecified, will use the default from `python/platform/remote_utils.py`. make_master_device_default: If True and a cluster resolver is passed, will automatically enter the master task device scope, which indicates the master becomes the default device to run ops. It won't do anything if a cluster spec is passed. Will throw an error if the caller is currently already in some device scope. cluster_device_filters: an instance of `tf.train.experimental/ClusterDeviceFilters` that specify device filters to the remote tasks in cluster." 3461,_strip_prefix,tensorflow/tensorflow/python/eager/remote.py,222,function, 3462,run_benchmark,tensorflow/tensorflow/python/eager/remote_benchmarks_test.py,45,function, 3463,Foo,tensorflow/tensorflow/python/eager/remote_benchmarks_test.py,62,class, 3464,RemoteWorkerMicroBenchmarks,tensorflow/tensorflow/python/eager/remote_benchmarks_test.py,78,class, 3465,RemoteCloudTPUTest,tensorflow/tensorflow/python/eager/remote_cloud_tpu_test.py,56,class,Test that we can connect to a real Cloud TPU. 3466,get_server_def,tensorflow/tensorflow/python/eager/remote_cluster_test.py,46,function,Returns a server def with a single job + multiple tasks. 3467,DynamicClusterTest,tensorflow/tensorflow/python/eager/remote_cluster_test.py,66,class, 3468,get_server_def,tensorflow/tensorflow/python/eager/remote_execution_test.py,46,function,Returns a server def with a single job + multiple tasks. 3469,RemoteExecutionTest,tensorflow/tensorflow/python/eager/remote_execution_test.py,66,class, 3470,RemoteExecutionWithoutLazyRemoteInputsCopyTest,tensorflow/tensorflow/python/eager/remote_execution_test.py,236,class, 3471,SingleWorkerTest,tensorflow/tensorflow/python/eager/remote_test.py,49,class, 3472,RemoteAsyncTest,tensorflow/tensorflow/python/eager/remote_test.py,196,class, 3473,MultiWorkersTest,tensorflow/tensorflow/python/eager/remote_test.py,284,class, 3474,MultiJobsTest,tensorflow/tensorflow/python/eager/remote_test.py,480,class, 3475,_strip_prefix,tensorflow/tensorflow/python/eager/remote_test.py,617,function, 3476,Tape,tensorflow/tensorflow/python/eager/tape.py,35,class,Represents a gradient propagation trace. 3477,push_new_tape,tensorflow/tensorflow/python/eager/tape.py,47,function,Pushes a new tape onto the tape stack. 3478,push_tape,tensorflow/tensorflow/python/eager/tape.py,53,function,Pushes an existing tape onto the tape stack. 3479,watch,tensorflow/tensorflow/python/eager/tape.py,58,function,Marks this tensor to be watched by the given tape. 3480,VariableWatcher,tensorflow/tensorflow/python/eager/tape.py,63,class,"A scope that tracks all trainable variable accesses within it. This explicitly ignores variables that are not marked as trainable. Sample usage: var = tf.Variable(0.0) with VariableWatcher() as variable_watcher: var.assign_add(1.0) assert variable_watcher.watched_variables == [var]" 3481,watch_variable,tensorflow/tensorflow/python/eager/tape.py,95,function,Marks this variable to be watched by the given tape. 3482,variable_accessed,tensorflow/tensorflow/python/eager/tape.py,108,function,"Notifies all tapes in the stack that a variable has been accessed. Args: variable: variable to be watched." 3483,variables_accessed,tensorflow/tensorflow/python/eager/tape.py,125,function,"Notifies all tapes in the stack that variables have been accessed. Only trainable variables are marked as accessed. Args: variables: iterable of variables to mark as accessed." 3484,pop_tape,tensorflow/tensorflow/python/eager/tape.py,149,function,Pops the given tape in the stack. 3485,stop_recording,tensorflow/tensorflow/python/eager/tape.py,155,function,Stop all gradient recording (backprop and forwardprop). 3486,should_record_backprop,tensorflow/tensorflow/python/eager/tape.py,167,function,"Returns true if any tape in the stack watches any of these tensors. Only takes GradientTapes into account, not forward accumulators. Args: tensors: Tensors to check, typically inputs to an operation. Returns: Boolean, whether any tape watches any of `tensors`." 3487,record_operation,tensorflow/tensorflow/python/eager/tape.py,181,function,Records the operation on all tapes in the stack. 3488,record_operation_backprop_only,tensorflow/tensorflow/python/eager/tape.py,189,function,Records the operation on all backward tapes in the stack. 3489,record_operation_forwardprop_only,tensorflow/tensorflow/python/eager/tape.py,197,function,"Records the operation on all forward accumulators in the stack. Args: op_type: a string for the operation type, used in the backprop code output_tensors: a list of Python Tensor objects output by the operation input_tensors: a list of input Tensors to the recorded operation backward_function: the function to be called to, given the gradients of the output tensors, produce the gradients of the input tensors. This function is automatically transposed to produce output gradients given input gradients. forwardprop_output_indices: indicates any output_tensors which contain JVPs. Typically these will have come from TFE_Py_PackForwardGradients. May be None or an empty sequence if there are no JVP outputs from the operation." 3490,delete_trace,tensorflow/tensorflow/python/eager/tape.py,219,function,Deletes traces for this Tensor from all tapes in the stack. 3491,could_possibly_record,tensorflow/tensorflow/python/eager/tape.py,224,function,Returns True if any tape is active. 3492,two_outputs,tensorflow/tensorflow/python/eager/tape_test.py,39,function, 3493,gradient_is_constant,tensorflow/tensorflow/python/eager/tape_test.py,55,function, 3494,TapeTest,tensorflow/tensorflow/python/eager/tape_test.py,64,class, 3495,VariableWatcherTest,tensorflow/tensorflow/python/eager/tape_test.py,171,class, 3496,_create_tensor,tensorflow/tensorflow/python/eager/tensor_test.py,44,function, 3497,TFETensorTest,tensorflow/tensorflow/python/eager/tensor_test.py,57,class, 3498,TFETensorUtilTest,tensorflow/tensorflow/python/eager/tensor_test.py,435,class, 3499,main,tensorflow/tensorflow/python/eager/test.py,27,function, 3500,VariableHolder,tensorflow/tensorflow/python/eager/wrap_function.py,46,class,Holds variables for a python function. 3501,_get_element_from_tensor_info,tensorflow/tensorflow/python/eager/wrap_function.py,98,function,Simplified copy of the deprecated `get_tensor_from_tensor_info`. 3502,_lift_single_variable,tensorflow/tensorflow/python/eager/wrap_function.py,123,function,Lifts `old_variable` out of the `FuncGraph` `graph`. 3503,_lift_unlifted_variables,tensorflow/tensorflow/python/eager/wrap_function.py,146,function,"Finds resource variables and lifts them into the outer context. When we import a GraphDef inside a wrap_function, no Python graph building code runs. This means we get VarHandleOps which create variable resources, but no corresponding Python objects. Leaving them like this works but gives the user no way to interact with or modify the variables outside the graph. This method searches for variables and lifts them out as regular variable objects when possible, indicating to the FuncGraph that they are captures. Args: graph: The FuncGraph to lift variables from. variable_holder: A VariableHolder to record the lifted variables in." 3504,WrappedFunction,tensorflow/tensorflow/python/eager/wrap_function.py,220,class,Wraps a tf V1 piece of code in a function. 3505,_filter_returned_ops,tensorflow/tensorflow/python/eager/wrap_function.py,382,function,"Filtering out any ops returned by function. Args: fn: a function Returns: A tuple of ( Wrapped function that returns `None` in place of any ops, dict that maps the index in the flat output structure to the returned op )" 3506,WrappedGraph,tensorflow/tensorflow/python/eager/wrap_function.py,409,class,"Class for wrapping multiple TF 1.X functions in a single graph. Maintains a dictionary mapping names to wrapped functions. See `tf.compat.v1.wrap_function` to learn more about wrapping V1 functions. Functions wrapped using this class have access to variables and collections created in other wrapped functions, using the standard TF 1.X API ( `tf.compat.v1.get_variable` or `tf.compat.v1.get_default_graph().get_collection(...)`) Outside a function, variables and collections may be accessed using the `variables` and `graph` properties. Example: ``` def add_v1(x): with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE): v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32) return v + x def increment_var_v1(x): with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE): v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32) return v.assign_add(x) g = WrappedGraph() add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)]) increment_var = g.wrap_function(increment_var_v1, [tf.TensorSpec([], tf.int32)]) assert len(g.variables) == 1 assert g.variables[0].numpy() == 0 increment_var(tf.constant(5)) assert g.variables[0].numpy() == 5 ```" 3507,wrap_function,tensorflow/tensorflow/python/eager/wrap_function.py,560,function,"Wraps the TF 1.x function fn into a graph function. The python function `fn` will be called once with symbolic arguments specified in the `signature`, traced, and turned into a graph function. Any variables created by `fn` will be owned by the object returned by `wrap_function`. The resulting graph function can be called with tensors which match the signature. ```python def f(x, do_add): v = tf.Variable(5.0) if do_add: op = v.assign_add(x) else: op = v.assign_sub(x) with tf.control_dependencies([op]): return v.read_value() f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True]) assert float(f_add(1.0)) == 6.0 assert float(f_add(1.0)) == 7.0 # Can call tf.compat.v1.wrap_function again to get a new trace, a new set # of variables, and possibly different non-template arguments. f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False]) assert float(f_sub(1.0)) == 4.0 assert float(f_sub(1.0)) == 3.0 ``` Both `tf.compat.v1.wrap_function` and `tf.function` create a callable TensorFlow graph. But while `tf.function` runs all stateful operations (e.g. `tf.print`) and sequences operations to provide the same semantics as eager execution, `wrap_function` is closer to the behavior of `session.run` in TensorFlow 1.x. It will not run any operations unless they are required to compute the function's outputs, either through a data dependency or a control dependency. Nor will it sequence operations. Unlike `tf.function`, `wrap_function` will only trace the Python function once. As with placeholders in TF 1.x, shapes and dtypes must be provided to `wrap_function`'s `signature` argument. Since it is only traced once, variables and state may be created inside the function and owned by the function wrapper object. Args: fn: python function to be wrapped signature: the placeholder and python arguments to be passed to the wrapped function name: Optional. The name of the function. Returns: the wrapped graph function." 3508,function_from_graph_def,tensorflow/tensorflow/python/eager/wrap_function.py,633,function,"Creates a ConcreteFunction from a GraphDef. Args: graph_def: A GraphDef to make a function out of. inputs: A Tensor name or nested structure of names in `graph_def` which should be inputs to the function. outputs: A Tensor name or nested structure of names in `graph_def` which should be outputs of the function. Returns: A ConcreteFunction." 3509,WrapFunctionTest,tensorflow/tensorflow/python/eager/wrap_function_test.py,45,class, 3510,WrappedGraphTest,tensorflow/tensorflow/python/eager/wrap_function_test.py,398,class, 3511,_forward_over_back_hvp,tensorflow/tensorflow/python/eager/benchmarks/resnet50/hvp_test.py,33,function, 3512,_back_over_forward_hvp,tensorflow/tensorflow/python/eager/benchmarks/resnet50/hvp_test.py,44,function, 3513,_tf_gradients_forward_over_back_hvp,tensorflow/tensorflow/python/eager/benchmarks/resnet50/hvp_test.py,55,function, 3514,_back_over_back_hvp,tensorflow/tensorflow/python/eager/benchmarks/resnet50/hvp_test.py,67,function, 3515,HVPTest,tensorflow/tensorflow/python/eager/benchmarks/resnet50/hvp_test.py,78,class, 3516,HVPBenchmarks,tensorflow/tensorflow/python/eager/benchmarks/resnet50/hvp_test.py,108,class, 3517,_IdentityBlock,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50.py,34,class,"_IdentityBlock is the block that has no conv layer at shortcut. Args: kernel_size: the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names data_format: data_format for the input ('channels_first' or 'channels_last')." 3518,_ConvBlock,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50.py,89,class,"_ConvBlock is the block that has a conv layer at shortcut. Args: kernel_size: the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names data_format: data_format for the input ('channels_first' or 'channels_last'). strides: strides for the convolution. Note that from stage 3, the first conv layer at main path is with strides=(2,2), and the shortcut should have strides=(2,2) as well." 3519,ResNet50,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50.py,168,class,"Instantiates the ResNet50 architecture. Args: data_format: format for the image. Either 'channels_first' or 'channels_last'. 'channels_first' is typically faster on GPUs while 'channels_last' is typically faster on CPUs. See https://www.tensorflow.org/performance/performance_guide#data_formats name: Prefix applied to names of variables created in the model. trainable: Is the model trainable? If true, performs backward and optimization after call() method. include_top: whether to include the fully-connected layer at the top of the network. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. block3_strides: whether to add a stride of 2 to block3 to make it compatible with tf.slim ResNet implementation. average_pooling: whether to do average pooling of block4 features before global pooling. classes: optional number of classes to classify images into, only to be specified if `include_top` is True. Raises: ValueError: in case of invalid argument for data_format." 3520,data_format,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_graph_test.py,29,function, 3521,image_shape,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_graph_test.py,33,function, 3522,random_batch,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_graph_test.py,39,function, 3523,ResNet50GraphTest,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_graph_test.py,49,class, 3524,ResNet50Benchmarks,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_graph_test.py,69,class, 3525,compute_gradients,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py,37,function, 3526,apply_gradients,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py,54,function, 3527,_events_from_file,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py,58,function,"Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.compat.v1.Event protos in the event file." 3528,events_from_logdir,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py,76,function,"Returns all events in the single eventfile in logdir. Args: logdir: The directory in which the single event file is sought. Returns: A list of all tf.compat.v1.Event protos from the single event file. Raises: AssertionError: If logdir does not contain exactly one file." 3529,ResNet50Test,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py,94,class, 3530,MockIterator,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py,260,class, 3531,ResNet50Benchmarks,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py,269,class, 3532,device_and_data_format,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test_util.py,26,function, 3533,random_batch,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test_util.py,32,function,Create synthetic resnet50 images and labels for testing. 3534,report,tensorflow/tensorflow/python/eager/benchmarks/resnet50/resnet50_test_util.py,50,function, 3535,MemoryTest,tensorflow/tensorflow/python/eager/memory_tests/memory_test.py,40,class, 3536,_instance_count_by_class,tensorflow/tensorflow/python/eager/memory_tests/memory_test_util.py,36,function, 3537,assert_no_leak,tensorflow/tensorflow/python/eager/memory_tests/memory_test_util.py,48,function,Assert memory usage doesn't increase beyond given threshold for f. 3538,memory_profiler_is_available,tensorflow/tensorflow/python/eager/memory_tests/memory_test_util.py,78,function, 3539,RemoteWorkerMemoryTest,tensorflow/tensorflow/python/eager/memory_tests/remote_memory_test.py,31,class, 3540,_internal_input_layer,tensorflow/tensorflow/python/feature_column/feature_column.py,171,function,See input_layer. `scope` is a name or variable scope to use. 3541,input_layer,tensorflow/tensorflow/python/feature_column/feature_column.py,234,function,"Returns a dense `Tensor` as input layer based on given `feature_columns`. Generally a single example in training data is described with FeatureColumns. At the first layer of the model, this column oriented data should be converted to a single `Tensor`. Example: ```python price = numeric_column('price') keywords_embedded = embedding_column( categorical_column_with_hash_bucket(""keywords"", 10K), dimensions=16) columns = [price, keywords_embedded, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) for units in [128, 64, 32]: dense_tensor = tf.compat.v1.layers.dense(dense_tensor, units, tf.nn.relu) prediction = tf.compat.v1.layers.dense(dense_tensor, 1) ``` Args: features: A mapping from key to tensors. `_FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `_FeatureColumn`. feature_columns: An iterable containing the FeatureColumns to use as inputs to your model. All items should be instances of classes derived from `_DenseColumn` such as `numeric_column`, `embedding_column`, `bucketized_column`, `indicator_column`. If you have categorical features, you can wrap them with an `embedding_column` or `indicator_column`. weight_collections: A list of collection names to which the Variable will be added. Note that variables will also be added to collections `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). cols_to_vars: If not `None`, must be a dictionary that will be filled with a mapping from `_FeatureColumn` to list of `Variable`s. For example, after the call, we might have cols_to_vars = {_EmbeddingColumn( categorical_column=_HashedCategoricalColumn( key='sparse_feature', hash_bucket_size=5, dtype=tf.string), dimension=10): [], 'bias': [], _NumericColumn( key='numeric_feature2', shape=(2,)): []} If a column creates no variables, its value will be an empty list. Note that cols_to_vars will also contain a string key 'bias' that maps to a list of Variables. Returns: A `Tensor` which represents predictions/logits of a linear model. Its shape is (batch_size, units) and its dtype is `float32`. Raises: ValueError: if an item in `feature_columns` is neither a `_DenseColumn` nor `_CategoricalColumn`." 3544,_add_to_collections,tensorflow/tensorflow/python/feature_column/feature_column.py,506,function,"Adds a var to the list of weight_collections provided. Handles the case for partitioned and non-partitioned variables. Args: var: A variable or Partitioned Variable. weight_collections: List of collections to add variable to." 3545,_FCLinearWrapper,tensorflow/tensorflow/python/feature_column/feature_column.py,528,class,"Wraps a _FeatureColumn in a layer for use in a linear model. See `linear_model` above." 3546,_BiasLayer,tensorflow/tensorflow/python/feature_column/feature_column.py,579,class,"A layer for the bias term. " 3547,_get_expanded_variable_list,tensorflow/tensorflow/python/feature_column/feature_column.py,606,function, 3548,_strip_leading_slashes,tensorflow/tensorflow/python/feature_column/feature_column.py,614,function, 3549,_LinearModel,tensorflow/tensorflow/python/feature_column/feature_column.py,618,class,"Creates a linear model using feature columns. See `linear_model` for details." 3550,_transform_features,tensorflow/tensorflow/python/feature_column/feature_column.py,716,function,"Returns transformed features based on features columns passed in. Please note that most probably you would not need to use this function. Please check `input_layer` and `linear_model` to see whether they will satisfy your use case or not. Example: ```python # Define features and transformations crosses_a_x_b = crossed_column( columns=[""sparse_feature_a"", ""sparse_feature_b""], hash_bucket_size=10000) price_buckets = bucketized_column( source_column=numeric_column(""price""), boundaries=[...]) columns = [crosses_a_x_b, price_buckets] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) transformed = transform_features(features=features, feature_columns=columns) assertCountEqual(columns, transformed.keys()) ``` Args: features: A mapping from key to tensors. `_FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `_FeatureColumn`. feature_columns: An iterable containing all the `_FeatureColumn`s. Returns: A `dict` mapping `_FeatureColumn` to `Tensor` and `SparseTensor` values." 3551,make_parse_example_spec,tensorflow/tensorflow/python/feature_column/feature_column.py,761,function,"Creates parsing spec dictionary from input feature_columns. The returned dictionary can be used as arg 'features' in `tf.io.parse_example`. Typical usage example: ```python # Define features and transformations feature_a = categorical_column_with_vocabulary_file(...) feature_b = numeric_column(...) feature_c_bucketized = bucketized_column(numeric_column(""feature_c""), ...) feature_a_x_feature_c = crossed_column( columns=[""feature_a"", feature_c_bucketized], ...) feature_columns = set( [feature_b, feature_c_bucketized, feature_a_x_feature_c]) features = tf.io.parse_example( serialized=serialized_examples, features=make_parse_example_spec(feature_columns)) ``` For the above example, make_parse_example_spec would return the dict: ```python { ""feature_a"": parsing_ops.VarLenFeature(tf.string), ""feature_b"": parsing_ops.FixedLenFeature([1], dtype=tf.float32), ""feature_c"": parsing_ops.FixedLenFeature([1], dtype=tf.float32) } ``` Args: feature_columns: An iterable containing all feature columns. All items should be instances of classes derived from `_FeatureColumn`. Returns: A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature` value. Raises: ValueError: If any of the given `feature_columns` is not a `_FeatureColumn` instance." 3552,_embedding_column,tensorflow/tensorflow/python/feature_column/feature_column.py,822,function,"`_DenseColumn` that converts from sparse, categorical input. Use this when your inputs are sparse, but you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be a `_CategoricalColumn` created by any of the `categorical_column_*` function. Here is an example of using `embedding_column` with `DNNClassifier`: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) label_column = ... def input_fn(): features = tf.io.parse_example( ..., features=make_parse_example_spec(columns + [label_column])) labels = features.pop(label_column.name) return features, labels estimator.train(input_fn=input_fn, steps=100) ``` Here is an example using `embedding_column` with model_fn: ```python def model_fn(features, ...): video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] dense_tensor = input_layer(features, columns) # Form DNN layers, calculate loss, and return EstimatorSpec. ... ``` Args: categorical_column: A `_CategoricalColumn` created by a `categorical_column_with_*` function. This column produces the sparse IDs that are inputs to the embedding lookup. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.compat.v1.truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. ckpt_to_load_from: String representing checkpoint name/pattern from which to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, embedding values are l2-normalized to this value. trainable: Whether or not the embedding is trainable. Default is True. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: `_DenseColumn` that converts from sparse input. Raises: ValueError: if `dimension` not > 0. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. RuntimeError: If eager execution is enabled." 3553,_numeric_column,tensorflow/tensorflow/python/feature_column/feature_column.py,946,function,"Represents real valued or numerical features. Example: ```python price = numeric_column('price') columns = [price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) # or bucketized_price = bucketized_column(price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. shape: An iterable of integers specifies the shape of the `Tensor`. An integer can be given which means a single dimension `Tensor` with given width. The `Tensor` representing the column will have the shape of [batch_size] + `shape`. default_value: A single value compatible with `dtype` or an iterable of values compatible with `dtype` which the column takes on during `tf.Example` parsing if data is missing. A default value of `None` will cause `tf.io.parse_example` to fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. normalizer_fn: If not `None`, a function that can be used to normalize the value of the tensor after `default_value` is applied for parsing. Normalizer function takes the input `Tensor` as its argument, and returns the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though the most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations. Returns: A `_NumericColumn`. Raises: TypeError: if any dimension in shape is not an int ValueError: if any dimension in shape is not a positive integer TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`." 3554,_bucketized_column,tensorflow/tensorflow/python/feature_column/feature_column.py,1022,function,"Represents discretized dense input. Buckets include the left boundary, and exclude the right boundary. Namely, `boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. For example, if the inputs are ```python boundaries = [0, 10, 100] input tensor = [[-5, 10000] [150, 10] [5, 100]] ``` then the output will be ```python output = [[0, 3] [3, 2] [1, 3]] ``` Example: ```python price = numeric_column('price') bucketized_price = bucketized_column(price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) # or columns = [bucketized_price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` A `bucketized_column` can also be crossed with another categorical column using `crossed_column`: ```python price = numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = bucketized_column(price, boundaries=[...]) # 'keywords' is a string feature. price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K) columns = [price_x_keywords, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Args: source_column: A one-dimensional dense column which is generated with `numeric_column`. boundaries: A sorted list or tuple of floats specifying the boundaries. Returns: A `_BucketizedColumn`. Raises: ValueError: If `source_column` is not a numeric column, or if it is not one-dimensional. ValueError: If `boundaries` is not a sorted list or tuple." 3555,_categorical_column_with_hash_bucket,tensorflow/tensorflow/python/feature_column/feature_column.py,1105,function,"Represents sparse feature where ids are set by hashing. Use this when your sparse features are in string or integer format, and you want to distribute your inputs into a finite number of buckets by hashing. output_id = Hash(input_feature_string) % bucket_size for string type input. For int type input, the value is converted to its string representation first and then hashed by the same formula. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example: ```python keywords = categorical_column_with_hash_bucket(""keywords"", 10K) columns = [keywords, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) # or keywords_embedded = embedding_column(keywords, 16) columns = [keywords_embedded, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A `_HashedCategoricalColumn`. Raises: ValueError: `hash_bucket_size` is not greater than 1. ValueError: `dtype` is neither string nor integer." 3556,_categorical_column_with_vocabulary_file,tensorflow/tensorflow/python/feature_column/feature_column.py,1163,function,"A `_CategoricalColumn` with a vocabulary file. Use this when your inputs are in string or integer format, and you have a vocabulary file that maps each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state abbreviation. All inputs with values in that file are assigned an ID 0-49, corresponding to its line number. All other values are hashed and assigned an ID 50-54. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=50, num_oov_buckets=5) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Example with `default_value`: File '/us/states.txt' contains 51 lines - the first line is 'XX', and the other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX' in input, and other values missing from the file, will be assigned ID 0. All others are assigned the corresponding line number 1-50. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=51, default_value=0) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(states, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of `vocabulary_file`, if less than length, later values are ignored. If None, it is set to the length of `vocabulary_file`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. dtype: The type of features. Only string and integer types are supported. Returns: A `_CategoricalColumn` with a vocabulary file. Raises: ValueError: `vocabulary_file` is missing or cannot be opened. ValueError: `vocabulary_size` is missing or < 1. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: `dtype` is neither string nor integer." 3557,_categorical_column_with_vocabulary_list,tensorflow/tensorflow/python/feature_column/feature_column.py,1282,function,"A `_CategoricalColumn` with in-memory vocabulary. Use this when your inputs are in string or integer format, and you have an in-memory vocabulary mapping each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: In the following example, each input in `vocabulary_list` is assigned an ID 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other inputs are hashed and assigned an ID 4-5. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('R', 'G', 'B', 'Y'), num_oov_buckets=2) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Example with `default_value`: In the following example, each input in `vocabulary_list` is assigned an ID 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other inputs are assigned `default_value` 0. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(colors, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in `vocabulary_list`. Must be castable to `dtype`. dtype: The type of features. Only string and integer types are supported. If `None`, it will be inferred from `vocabulary_list`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `_CategoricalColumn` with in-memory vocabulary. Raises: ValueError: if `vocabulary_list` is empty, or contains duplicate keys. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: if `dtype` is not integer or string." 3558,_categorical_column_with_identity,tensorflow/tensorflow/python/feature_column/feature_column.py,1395,function,"A `_CategoricalColumn` that returns identity values. Use this when your inputs are integers in the range `[0, num_buckets)`, and you want to use the input value itself as the categorical ID. Values outside this range will result in `default_value` if specified, otherwise it will fail. Typically, this is used for contiguous ranges of integer indexes, but it doesn't have to be. This might be inefficient, however, if many of IDs are unused. Consider `categorical_column_with_hash_bucket` in that case. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. In the following examples, each input in the range `[0, 1000000)` is assigned the same value. All other inputs are assigned `default_value` 0. Note that a literal 0 in inputs will result in the same default ID. Linear model: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [video_id, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Embedding for a DNN model: ```python columns = [embedding_column(video_id, 9),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. num_buckets: Range of inputs and outputs is `[0, num_buckets)`. default_value: If set, values outside of range `[0, num_buckets)` will be replaced with this value. If not set, values >= num_buckets will cause a failure while values < 0 will be dropped. Returns: A `_CategoricalColumn` that returns identity values. Raises: ValueError: if `num_buckets` is less than one. ValueError: if `default_value` is not in range `[0, num_buckets)`." 3559,_indicator_column,tensorflow/tensorflow/python/feature_column/feature_column.py,1462,function,"Represents multi-hot representation of given categorical column. - For DNN model, `indicator_column` can be used to wrap any `categorical_column_*` (e.g., to feed to DNN). Consider to Use `embedding_column` if the number of buckets/unique(values) are large. - For Wide (aka linear) model, `indicator_column` is the internal representation for categorical column when passing categorical column directly (as any element in feature_columns) to `linear_model`. See `linear_model` for details. ```python name = indicator_column(categorical_column_with_vocabulary_list( 'name', ['bob', 'george', 'wanda']) columns = [name, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) dense_tensor == [[1, 0, 0]] # If ""name"" bytes_list is [""bob""] dense_tensor == [[1, 0, 1]] # If ""name"" bytes_list is [""bob"", ""wanda""] dense_tensor == [[2, 0, 0]] # If ""name"" bytes_list is [""bob"", ""bob""] ``` Args: categorical_column: A `_CategoricalColumn` which is created by `categorical_column_with_*` or `crossed_column` functions. Returns: An `_IndicatorColumn`." 3560,_weighted_categorical_column,tensorflow/tensorflow/python/feature_column/feature_column.py,1496,function,"Applies weight values to a `_CategoricalColumn`. Use this when each of your sparse inputs has both an ID and a value. For example, if you're representing text documents as a collection of word frequencies, you can provide 2 parallel sparse input features ('terms' and 'frequencies' below). Example: Input `tf.Example` objects: ```proto [ features { feature { key: ""terms"" value {bytes_list {value: ""very"" value: ""model""}} } feature { key: ""frequencies"" value {float_list {value: 0.3 value: 0.1}} } }, features { feature { key: ""terms"" value {bytes_list {value: ""when"" value: ""course"" value: ""human""}} } feature { key: ""frequencies"" value {float_list {value: 0.4 value: 0.1 value: 0.2}} } } ] ``` ```python categorical_column = categorical_column_with_hash_bucket( column_name='terms', hash_bucket_size=1000) weighted_column = weighted_categorical_column( categorical_column=categorical_column, weight_feature_key='frequencies') columns = [weighted_column, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` This assumes the input dictionary contains a `SparseTensor` for key 'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have the same indices and dense shape. Args: categorical_column: A `_CategoricalColumn` created by `categorical_column_with_*` functions. weight_feature_key: String key for weight values. dtype: Type of weights, such as `tf.float32`. Only float and integer weights are supported. Returns: A `_CategoricalColumn` composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if `dtype` is not convertible to float." 3561,_crossed_column,tensorflow/tensorflow/python/feature_column/feature_column.py,1571,function,"Returns a column for performing crosses of categorical features. Crossed features will be hashed according to `hash_bucket_size`. Conceptually, the transformation can be thought of as: Hash(cartesian product of features) % `hash_bucket_size` For example, if the input features are: * SparseTensor referred by first key: ```python shape = [2, 2] { [0, 0]: ""a"" [1, 0]: ""b"" [1, 1]: ""c"" } ``` * SparseTensor referred by second key: ```python shape = [2, 1] { [0, 0]: ""d"" [1, 0]: ""e"" } ``` then crossed feature will look like: ```python shape = [2, 2] { [0, 0]: Hash64(""d"", Hash64(""a"")) % hash_bucket_size [1, 0]: Hash64(""e"", Hash64(""b"")) % hash_bucket_size [1, 1]: Hash64(""e"", Hash64(""c"")) % hash_bucket_size } ``` Here is an example to create a linear model with crosses of string features: ```python keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K) columns = [keywords_x_doc_terms, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` You could also use vocabulary lookup before crossing: ```python keywords = categorical_column_with_vocabulary_file( 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K) keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K) columns = [keywords_x_doc_terms, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` If an input feature is of numeric type, you can use `categorical_column_with_identity`, or `bucketized_column`, as in the example: ```python # vertical_id is an integer categorical feature. vertical_id = categorical_column_with_identity('vertical_id', 10K) price = numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = bucketized_column(price, boundaries=[...]) vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) columns = [vertical_id_x_price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` To use crossed column in DNN model, you need to add it in an embedding column as in this example: ```python vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10) dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...]) ``` Args: keys: An iterable identifying the features to be crossed. Each element can be either: * string: Will use the corresponding feature which must be of string type. * `_CategoricalColumn`: Will use the transformed tensor produced by this column. Does not support hashed categorical column. hash_bucket_size: An int > 1. The number of buckets. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseCrossOp (optional). Returns: A `_CrossedColumn`. Raises: ValueError: If `len(keys) < 2`. ValueError: If any of the keys is neither a string nor `_CategoricalColumn`. ValueError: If any of the keys is `_HashedCategoricalColumn`. ValueError: If `hash_bucket_size < 1`." 3562,_EmbeddingColumnLayer,tensorflow/tensorflow/python/feature_column/feature_column.py,1699,class,A layer that stores all the state required for a embedding column. 3563,_FeatureColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,1754,class,"Represents a feature column abstraction. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. To distinguish the concept of a feature family and a specific binary feature within a family, we refer to a feature family like ""country"" as a feature column. Following is an example feature in a `tf.Example` format: {key: ""country"", value: [ ""US"" ]} In this example the value of feature is ""US"" and ""country"" refers to the column of the feature. This class is an abstract class. User should not create instances of this." 3564,_DenseColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,1896,class,"Represents a column which can be represented as `Tensor`. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. Some examples of this type are: numeric_column, embedding_column, indicator_column." 3565,_create_weighted_sum,tensorflow/tensorflow/python/feature_column/feature_column.py,1937,function,Creates a weighted sum for a dense/categorical column for linear_model. 3566,_create_dense_column_weighted_sum,tensorflow/tensorflow/python/feature_column/feature_column.py,1964,function,Create a weighted sum of a dense column for linear_model. 3567,_CategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,1990,class,"Represents a categorical feature. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. A categorical feature typically handled with a `tf.sparse.SparseTensor` of IDs." 3568,_create_categorical_column_weighted_sum,tensorflow/tensorflow/python/feature_column/feature_column.py,2036,function,"Create a weighted sum of a categorical column for linear_model. Note to maintainer: As implementation details, the weighted sum is implemented via embedding_lookup_sparse toward efficiency. Mathematically, they are the same. To be specific, conceptually, categorical column can be treated as multi-hot vector. Say: ```python x = [0 0 1] # categorical column input w = [a b c] # weights ``` The weighted sum is `c` in this case, which is same as `w[2]`. Another example is ```python x = [0 1 1] # categorical column input w = [a b c] # weights ``` The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`. For both cases, we can implement weighted sum via embedding_lookup with sparse_combiner = ""sum""." 3569,_SequenceDenseColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2100,class,Represents dense sequence data. 3570,_LazyBuilder,tensorflow/tensorflow/python/feature_column/feature_column.py,2113,class,"Handles caching of transformations while building the model. `_FeatureColumn` specifies how to digest an input column to the network. Some feature columns require data transformations. This class caches those transformations. Some features may be used in more than one place. For example, one can use a bucketized feature by itself and a cross with it. In that case we should create only one bucketization op instead of creating ops for each feature column separately. To handle re-use of transformed columns, `_LazyBuilder` caches all previously transformed columns. Example: We're trying to use the following `_FeatureColumn`s: ```python bucketized_age = fc.bucketized_column(fc.numeric_column(""age""), ...) keywords = fc.categorical_column_with_hash_buckets(""keywords"", ...) age_X_keywords = fc.crossed_column([bucketized_age, ""keywords""]) ... = linear_model(features, [bucketized_age, keywords, age_X_keywords] ``` If we transform each column independently, then we'll get duplication of bucketization (one for cross, one for bucketization itself). The `_LazyBuilder` eliminates this duplication." 3571,_shape_offsets,tensorflow/tensorflow/python/feature_column/feature_column.py,2249,function,Returns moving offset for each dimension given shape. 3572,_to_sparse_input_and_drop_ignore_values,tensorflow/tensorflow/python/feature_column/feature_column.py,2262,function,"Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells. If `input_tensor` is already a `SparseTensor`, just return it. Args: input_tensor: A string or integer `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the resulting `SparseTensor`. If `None`, default value of `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`). Returns: A `SparseTensor` with the same shape as `input_tensor`. Raises: ValueError: when `input_tensor`'s rank is `None`." 3573,_normalize_feature_columns,tensorflow/tensorflow/python/feature_column/feature_column.py,2306,function,"Normalizes the `feature_columns` input. This method converts the `feature_columns` to list type as best as it can. In addition, verifies the type and other parts of feature_columns, required by downstream library. Args: feature_columns: The raw feature columns, usually passed by users. Returns: The normalized feature column list. Raises: ValueError: for any invalid inputs, such as empty, duplicated names, etc." 3574,_NumericColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2351,class,see `numeric_column`. 3575,_BucketizedColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2406,class,See `bucketized_column`. 3576,_EmbeddingColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2476,class,See `embedding_column`. 3577,_get_graph_for_variable,tensorflow/tensorflow/python/feature_column/feature_column.py,2604,function, 3578,_SharedEmbeddingColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2611,class,See `embedding_column`. 3579,_check_shape,tensorflow/tensorflow/python/feature_column/feature_column.py,2753,function,"Returns shape if it's valid, raises error otherwise." 3580,_HashedCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2769,class,see `categorical_column_with_hash_bucket`. 3581,_VocabularyFileCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2818,class,See `categorical_column_with_vocabulary_file`. 3582,_VocabularyListCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2871,class,See `categorical_column_with_vocabulary_list`. 3583,_IdentityCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2922,class,See `categorical_column_with_identity`. 3584,_WeightedCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,2975,class,See `weighted_categorical_column`. 3585,_CrossedColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,3025,class,See `crossed_column`. 3586,_collect_leaf_level_keys,tensorflow/tensorflow/python/feature_column/feature_column.py,3081,function,"Collects base keys by expanding all nested crosses. Args: cross: A `_CrossedColumn`. Returns: A list of strings or `_CategoricalColumn` instances." 3587,_IndicatorColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,3099,class,"Represents a one-hot column for use in deep networks. Args: categorical_column: A `_CategoricalColumn` which is created by `categorical_column_with_*` function." 3588,_verify_static_batch_size_equality,tensorflow/tensorflow/python/feature_column/feature_column.py,3227,function,"Validates that the first dim (batch size) of all tensors are equal or None. Args: tensors: list of tensors to check. columns: list of feature columns matching tensors. Will be used for error messaging. Raises: ValueError: if one of the tensors has a variant batch size" 3589,_SequenceCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column.py,3253,class,Represents sequences of categorical data. 3590,_initialized_session,tensorflow/tensorflow/python/feature_column/feature_column_test.py,58,function, 3591,LazyColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,65,class, 3592,NumericColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,188,class, 3593,BucketizedColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,387,class, 3594,HashedCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,668,class, 3595,CrossedColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,911,class, 3596,get_linear_model_bias,tensorflow/tensorflow/python/feature_column/feature_column_test.py,1297,function, 3597,get_linear_model_column_var,tensorflow/tensorflow/python/feature_column/feature_column_test.py,1302,function, 3598,get_keras_linear_model_predictions,tensorflow/tensorflow/python/feature_column/feature_column_test.py,1307,function, 3599,LinearModelTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,1327,class, 3600,_LinearModelTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,2006,class, 3601,InputLayerTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,2627,class, 3602,FunctionalInputLayerTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,2727,class, 3603,MakeParseExampleSpecTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,3264,class, 3604,_assert_sparse_tensor_value,tensorflow/tensorflow/python/feature_column/feature_column_test.py,3344,function, 3605,VocabularyFileCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,3356,class, 3606,VocabularyListCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,3830,class, 3607,IdentityCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,4239,class, 3608,TransformFeaturesTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,4541,class, 3609,IndicatorColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,4602,class, 3610,EmbeddingColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,4816,class, 3611,SharedEmbeddingColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,5561,class, 3612,WeightedCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_test.py,6309,class, 3613,StateManager,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,176,class,"Manages the state associated with FeatureColumns. Some `FeatureColumn`s create variables or resources to assist their computation. The `StateManager` is responsible for creating and storing these objects since `FeatureColumn`s are supposed to be stateless configuration only." 3614,_StateManagerImpl,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,272,class,Manages the state of DenseFeatures and LinearLayer. 3615,_StateManagerImplV2,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,351,class,Manages the state of DenseFeatures. 3616,_transform_features_v2,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,381,function,"Returns transformed features based on features columns passed in. Please note that most probably you would not need to use this function. Please check `input_layer` and `linear_model` to see whether they will satisfy your use case or not. Example: ```python # Define features and transformations crosses_a_x_b = crossed_column( columns=[""sparse_feature_a"", ""sparse_feature_b""], hash_bucket_size=10000) price_buckets = bucketized_column( source_column=numeric_column(""price""), boundaries=[...]) columns = [crosses_a_x_b, price_buckets] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) transformed = transform_features(features=features, feature_columns=columns) assertCountEqual(columns, transformed.keys()) ``` Args: features: A mapping from key to tensors. `FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `FeatureColumn`. feature_columns: An iterable containing all the `FeatureColumn`s. state_manager: A StateManager object that holds the FeatureColumn state. Returns: A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values." 3617,make_parse_example_spec_v2,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,429,function,"Creates parsing spec dictionary from input feature_columns. The returned dictionary can be used as arg 'features' in `tf.io.parse_example`. Typical usage example: ```python # Define features and transformations feature_a = tf.feature_column.categorical_column_with_vocabulary_file(...) feature_b = tf.feature_column.numeric_column(...) feature_c_bucketized = tf.feature_column.bucketized_column( tf.feature_column.numeric_column(""feature_c""), ...) feature_a_x_feature_c = tf.feature_column.crossed_column( columns=[""feature_a"", feature_c_bucketized], ...) feature_columns = set( [feature_b, feature_c_bucketized, feature_a_x_feature_c]) features = tf.io.parse_example( serialized=serialized_examples, features=tf.feature_column.make_parse_example_spec(feature_columns)) ``` For the above example, make_parse_example_spec would return the dict: ```python { ""feature_a"": parsing_ops.VarLenFeature(tf.string), ""feature_b"": parsing_ops.FixedLenFeature([1], dtype=tf.float32), ""feature_c"": parsing_ops.FixedLenFeature([1], dtype=tf.float32) } ``` Args: feature_columns: An iterable containing all feature columns. All items should be instances of classes derived from `FeatureColumn`. Returns: A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature` value. Raises: ValueError: If any of the given `feature_columns` is not a `FeatureColumn` instance." 3618,embedding_column,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,491,function,"`DenseColumn` that converts from sparse, categorical input. Use this when your inputs are sparse, but you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be a `CategoricalColumn` created by any of the `categorical_column_*` function. Here is an example of using `embedding_column` with `DNNClassifier`: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) label_column = ... def input_fn(): features = tf.io.parse_example( ..., features=make_parse_example_spec(columns + [label_column])) labels = features.pop(label_column.name) return features, labels estimator.train(input_fn=input_fn, steps=100) ``` Here is an example using `embedding_column` with model_fn: ```python def model_fn(features, ...): video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] dense_tensor = input_layer(features, columns) # Form DNN layers, calculate loss, and return EstimatorSpec. ... ``` Args: categorical_column: A `CategoricalColumn` created by a `categorical_column_with_*` function. This column produces the sparse IDs that are inputs to the embedding lookup. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. ckpt_to_load_from: String representing checkpoint name/pattern from which to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, embedding values are l2-normalized to this value. trainable: Whether or not the embedding is trainable. Default is True. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: `DenseColumn` that converts from sparse input. Raises: ValueError: if `dimension` not > 0. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. RuntimeError: If eager execution is enabled." 3619,shared_embedding_columns,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,605,function,"List of dense columns that convert from sparse, categorical input. This is similar to `embedding_column`, except that it produces a list of embedding columns that share the same embedding weights. Use this when your inputs are sparse and of the same type (e.g. watched and impression video IDs that share the same vocabulary), and you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be a list of categorical columns created by any of the `categorical_column_*` function. They must all be of the same type and have the same arguments except `key`. E.g. they can be categorical_column_with_vocabulary_file with the same vocabulary_file. Some or all columns could also be weighted_categorical_column. Here is an example embedding of two features for a DNNClassifier model: ```python watched_video_id = categorical_column_with_vocabulary_file( 'watched_video_id', video_vocabulary_file, video_vocabulary_size) impression_video_id = categorical_column_with_vocabulary_file( 'impression_video_id', video_vocabulary_file, video_vocabulary_size) columns = shared_embedding_columns( [watched_video_id, impression_video_id], dimension=10) estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) label_column = ... def input_fn(): features = tf.io.parse_example( ..., features=make_parse_example_spec(columns + [label_column])) labels = features.pop(label_column.name) return features, labels estimator.train(input_fn=input_fn, steps=100) ``` Here is an example using `shared_embedding_columns` with model_fn: ```python def model_fn(features, ...): watched_video_id = categorical_column_with_vocabulary_file( 'watched_video_id', video_vocabulary_file, video_vocabulary_size) impression_video_id = categorical_column_with_vocabulary_file( 'impression_video_id', video_vocabulary_file, video_vocabulary_size) columns = shared_embedding_columns( [watched_video_id, impression_video_id], dimension=10) dense_tensor = input_layer(features, columns) # Form DNN layers, calculate loss, and return EstimatorSpec. ... ``` Args: categorical_columns: List of categorical columns created by a `categorical_column_with_*` function. These columns produce the sparse IDs that are inputs to the embedding lookup. All columns must be of the same type and have the same arguments except `key`. E.g. they can be categorical_column_with_vocabulary_file with the same vocabulary_file. Some or all columns could also be weighted_categorical_column. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. shared_embedding_collection_name: Optional name of the collection where shared embedding weights are added. If not given, a reasonable name will be chosen based on the names of `categorical_columns`. This is also used in `variable_scope` when creating shared embedding weights. ckpt_to_load_from: String representing checkpoint name/pattern from which to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. trainable: Whether or not the embedding is trainable. Default is True. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: A list of dense columns that converts from sparse input. The order of results follows the ordering of `categorical_columns`. Raises: ValueError: if `dimension` not > 0. ValueError: if any of the given `categorical_columns` is of different type or has different arguments than the others. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. RuntimeError: if eager execution is enabled." 3620,shared_embedding_columns_v2,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,789,function,"List of dense columns that convert from sparse, categorical input. This is similar to `embedding_column`, except that it produces a list of embedding columns that share the same embedding weights. Use this when your inputs are sparse and of the same type (e.g. watched and impression video IDs that share the same vocabulary), and you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be a list of categorical columns created by any of the `categorical_column_*` function. They must all be of the same type and have the same arguments except `key`. E.g. they can be categorical_column_with_vocabulary_file with the same vocabulary_file. Some or all columns could also be weighted_categorical_column. Here is an example embedding of two features for a DNNClassifier model: ```python watched_video_id = categorical_column_with_vocabulary_file( 'watched_video_id', video_vocabulary_file, video_vocabulary_size) impression_video_id = categorical_column_with_vocabulary_file( 'impression_video_id', video_vocabulary_file, video_vocabulary_size) columns = shared_embedding_columns( [watched_video_id, impression_video_id], dimension=10) estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) label_column = ... def input_fn(): features = tf.io.parse_example( ..., features=make_parse_example_spec(columns + [label_column])) labels = features.pop(label_column.name) return features, labels estimator.train(input_fn=input_fn, steps=100) ``` Here is an example using `shared_embedding_columns` with model_fn: ```python def model_fn(features, ...): watched_video_id = categorical_column_with_vocabulary_file( 'watched_video_id', video_vocabulary_file, video_vocabulary_size) impression_video_id = categorical_column_with_vocabulary_file( 'impression_video_id', video_vocabulary_file, video_vocabulary_size) columns = shared_embedding_columns( [watched_video_id, impression_video_id], dimension=10) dense_tensor = input_layer(features, columns) # Form DNN layers, calculate loss, and return EstimatorSpec. ... ``` Args: categorical_columns: List of categorical columns created by a `categorical_column_with_*` function. These columns produce the sparse IDs that are inputs to the embedding lookup. All columns must be of the same type and have the same arguments except `key`. E.g. they can be categorical_column_with_vocabulary_file with the same vocabulary_file. Some or all columns could also be weighted_categorical_column. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. shared_embedding_collection_name: Optional collective name of these columns. If not given, a reasonable name will be chosen based on the names of `categorical_columns`. ckpt_to_load_from: String representing checkpoint name/pattern from which to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. trainable: Whether or not the embedding is trainable. Default is True. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: A list of dense columns that converts from sparse input. The order of results follows the ordering of `categorical_columns`. Raises: ValueError: if `dimension` not > 0. ValueError: if any of the given `categorical_columns` is of different type or has different arguments than the others. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. RuntimeError: if eager execution is enabled." 3621,numeric_column,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,964,function,"Represents real valued or numerical features. Example: ```python price = numeric_column('price') columns = [price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) # or bucketized_price = bucketized_column(price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. shape: An iterable of integers specifies the shape of the `Tensor`. An integer can be given which means a single dimension `Tensor` with given width. The `Tensor` representing the column will have the shape of [batch_size] + `shape`. default_value: A single value compatible with `dtype` or an iterable of values compatible with `dtype` which the column takes on during `tf.Example` parsing if data is missing. A default value of `None` will cause `tf.io.parse_example` to fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. normalizer_fn: If not `None`, a function that can be used to normalize the value of the tensor after `default_value` is applied for parsing. Normalizer function takes the input `Tensor` as its argument, and returns the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though the most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations. Returns: A `NumericColumn`. Raises: TypeError: if any dimension in shape is not an int ValueError: if any dimension in shape is not a positive integer TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`." 3622,bucketized_column,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1041,function,"Represents discretized dense input bucketed by `boundaries`. Buckets include the left boundary, and exclude the right boundary. Namely, `boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. For example, if the inputs are ```python boundaries = [0, 10, 100] input tensor = [[-5, 10000] [150, 10] [5, 100]] ``` then the output will be ```python output = [[0, 3] [3, 2] [1, 3]] ``` Example: ```python price = tf.feature_column.numeric_column('price') bucketized_price = tf.feature_column.bucketized_column( price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.io.parse_example( ..., features=tf.feature_column.make_parse_example_spec(columns)) dense_tensor = tf.keras.layers.DenseFeatures(columns)(features) ``` A `bucketized_column` can also be crossed with another categorical column using `crossed_column`: ```python price = tf.feature_column.numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = tf.feature_column.bucketized_column( price, boundaries=[...]) # 'keywords' is a string feature. price_x_keywords = tf.feature_column.crossed_column( [bucketized_price, 'keywords'], 50K) columns = [price_x_keywords, ...] features = tf.io.parse_example( ..., features=tf.feature_column.make_parse_example_spec(columns)) dense_tensor = tf.keras.layers.DenseFeatures(columns)(features) linear_model = tf.keras.experimental.LinearModel(units=...)(dense_tensor) ``` Args: source_column: A one-dimensional dense column which is generated with `numeric_column`. boundaries: A sorted list or tuple of floats specifying the boundaries. Returns: A `BucketizedColumn`. Raises: ValueError: If `source_column` is not a numeric column, or if it is not one-dimensional. ValueError: If `boundaries` is not a sorted list or tuple." 3623,categorical_column_with_hash_bucket,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1127,function,"Represents sparse feature where ids are set by hashing. Use this when your sparse features are in string or integer format, and you want to distribute your inputs into a finite number of buckets by hashing. output_id = Hash(input_feature_string) % bucket_size for string type input. For int type input, the value is converted to its string representation first and then hashed by the same formula. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example: ```python keywords = categorical_column_with_hash_bucket(""keywords"", 10K) columns = [keywords, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) # or keywords_embedded = embedding_column(keywords, 16) columns = [keywords_embedded, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A `HashedCategoricalColumn`. Raises: ValueError: `hash_bucket_size` is not greater than 1. ValueError: `dtype` is neither string nor integer." 3624,categorical_column_with_vocabulary_file,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1186,function,"A `CategoricalColumn` with a vocabulary file. Use this when your inputs are in string or integer format, and you have a vocabulary file that maps each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state abbreviation. All inputs with values in that file are assigned an ID 0-49, corresponding to its line number. All other values are hashed and assigned an ID 50-54. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=50, num_oov_buckets=5) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Example with `default_value`: File '/us/states.txt' contains 51 lines - the first line is 'XX', and the other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX' in input, and other values missing from the file, will be assigned ID 0. All others are assigned the corresponding line number 1-50. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=51, default_value=0) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(states, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of `vocabulary_file`, if less than length, later values are ignored. If None, it is set to the length of `vocabulary_file`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. dtype: The type of features. Only string and integer types are supported. Returns: A `CategoricalColumn` with a vocabulary file. Raises: ValueError: `vocabulary_file` is missing or cannot be opened. ValueError: `vocabulary_size` is missing or < 1. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: `dtype` is neither string nor integer." 3625,categorical_column_with_vocabulary_file_v2,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1277,function,"A `CategoricalColumn` with a vocabulary file. Use this when your inputs are in string or integer format, and you have a vocabulary file that maps each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: File `'/us/states.txt'` contains 50 lines, each with a 2-character U.S. state abbreviation. All inputs with values in that file are assigned an ID 0-49, corresponding to its line number. All other values are hashed and assigned an ID 50-54. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=50, num_oov_buckets=5) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Example with `default_value`: File `'/us/states.txt'` contains 51 lines - the first line is `'XX'`, and the other 50 each have a 2-character U.S. state abbreviation. Both a literal `'XX'` in input, and other values missing from the file, will be assigned ID 0. All others are assigned the corresponding line number 1-50. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=51, default_value=0) columns = [states, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(states, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of `vocabulary_file`, if less than length, later values are ignored. If None, it is set to the length of `vocabulary_file`. dtype: The type of features. Only string and integer types are supported. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `CategoricalColumn` with a vocabulary file. Raises: ValueError: `vocabulary_file` is missing or cannot be opened. ValueError: `vocabulary_size` is missing or < 1. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: `dtype` is neither string nor integer." 3626,categorical_column_with_vocabulary_list,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1397,function,"A `CategoricalColumn` with in-memory vocabulary. Use this when your inputs are in string or integer format, and you have an in-memory vocabulary mapping each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. Example with `num_oov_buckets`: In the following example, each input in `vocabulary_list` is assigned an ID 0-3 corresponding to its index (e.g., input 'B' produces output 2). All other inputs are hashed and assigned an ID 4-5. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('R', 'G', 'B', 'Y'), num_oov_buckets=2) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Example with `default_value`: In the following example, each input in `vocabulary_list` is assigned an ID 0-4 corresponding to its index (e.g., input 'B' produces output 3). All other inputs are assigned `default_value` 0. ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0) columns = [colors, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` And to make an embedding with either: ```python columns = [embedding_column(colors, 3),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in `vocabulary_list`. Must be castable to `dtype`. dtype: The type of features. Only string and integer types are supported. If `None`, it will be inferred from `vocabulary_list`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `CategoricalColumn` with in-memory vocabulary. Raises: ValueError: if `vocabulary_list` is empty, or contains duplicate keys. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: if `dtype` is not integer or string." 3627,categorical_column_with_identity,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1514,function,"A `CategoricalColumn` that returns identity values. Use this when your inputs are integers in the range `[0, num_buckets)`, and you want to use the input value itself as the categorical ID. Values outside this range will result in `default_value` if specified, otherwise it will fail. Typically, this is used for contiguous ranges of integer indexes, but it doesn't have to be. This might be inefficient, however, if many of IDs are unused. Consider `categorical_column_with_hash_bucket` in that case. For input dictionary `features`, `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string, which will be dropped by this feature column. In the following examples, each input in the range `[0, 1000000)` is assigned the same value. All other inputs are assigned `default_value` 0. Note that a literal 0 in inputs will result in the same default ID. Linear model: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [video_id, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Embedding for a DNN model: ```python columns = [embedding_column(video_id, 9),...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. num_buckets: Range of inputs and outputs is `[0, num_buckets)`. default_value: If set, values outside of range `[0, num_buckets)` will be replaced with this value. If not set, values >= num_buckets will cause a failure while values < 0 will be dropped. Returns: A `CategoricalColumn` that returns identity values. Raises: ValueError: if `num_buckets` is less than one. ValueError: if `default_value` is not in range `[0, num_buckets)`." 3628,indicator_column,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1582,function,"Represents multi-hot representation of given categorical column. - For DNN model, `indicator_column` can be used to wrap any `categorical_column_*` (e.g., to feed to DNN). Consider to Use `embedding_column` if the number of buckets/unique(values) are large. - For Wide (aka linear) model, `indicator_column` is the internal representation for categorical column when passing categorical column directly (as any element in feature_columns) to `linear_model`. See `linear_model` for details. ```python name = indicator_column(categorical_column_with_vocabulary_list( 'name', ['bob', 'george', 'wanda'])) columns = [name, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) dense_tensor == [[1, 0, 0]] # If ""name"" bytes_list is [""bob""] dense_tensor == [[1, 0, 1]] # If ""name"" bytes_list is [""bob"", ""wanda""] dense_tensor == [[2, 0, 0]] # If ""name"" bytes_list is [""bob"", ""bob""] ``` Args: categorical_column: A `CategoricalColumn` which is created by `categorical_column_with_*` or `crossed_column` functions. Returns: An `IndicatorColumn`. Raises: ValueError: If `categorical_column` is not CategoricalColumn type." 3629,weighted_categorical_column,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1625,function,"Applies weight values to a `CategoricalColumn`. Use this when each of your sparse inputs has both an ID and a value. For example, if you're representing text documents as a collection of word frequencies, you can provide 2 parallel sparse input features ('terms' and 'frequencies' below). Example: Input `tf.Example` objects: ```proto [ features { feature { key: ""terms"" value {bytes_list {value: ""very"" value: ""model""}} } feature { key: ""frequencies"" value {float_list {value: 0.3 value: 0.1}} } }, features { feature { key: ""terms"" value {bytes_list {value: ""when"" value: ""course"" value: ""human""}} } feature { key: ""frequencies"" value {float_list {value: 0.4 value: 0.1 value: 0.2}} } } ] ``` ```python categorical_column = categorical_column_with_hash_bucket( column_name='terms', hash_bucket_size=1000) weighted_column = weighted_categorical_column( categorical_column=categorical_column, weight_feature_key='frequencies') columns = [weighted_column, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` This assumes the input dictionary contains a `SparseTensor` for key 'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have the same indices and dense shape. Args: categorical_column: A `CategoricalColumn` created by `categorical_column_with_*` functions. weight_feature_key: String key for weight values. dtype: Type of weights, such as `tf.float32`. Only float and integer weights are supported. Returns: A `CategoricalColumn` composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if `dtype` is not convertible to float." 3630,crossed_column,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1701,function,"Returns a column for performing crosses of categorical features. Crossed features will be hashed according to `hash_bucket_size`. Conceptually, the transformation can be thought of as: Hash(cartesian product of features) % `hash_bucket_size` For example, if the input features are: * SparseTensor referred by first key: ```python shape = [2, 2] { [0, 0]: ""a"" [1, 0]: ""b"" [1, 1]: ""c"" } ``` * SparseTensor referred by second key: ```python shape = [2, 1] { [0, 0]: ""d"" [1, 0]: ""e"" } ``` then crossed feature will look like: ```python shape = [2, 2] { [0, 0]: Hash64(""d"", Hash64(""a"")) % hash_bucket_size [1, 0]: Hash64(""e"", Hash64(""b"")) % hash_bucket_size [1, 1]: Hash64(""e"", Hash64(""c"")) % hash_bucket_size } ``` Here is an example to create a linear model with crosses of string features: ```python keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K) columns = [keywords_x_doc_terms, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` You could also use vocabulary lookup before crossing: ```python keywords = categorical_column_with_vocabulary_file( 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K) keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K) columns = [keywords_x_doc_terms, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` If an input feature is of numeric type, you can use `categorical_column_with_identity`, or `bucketized_column`, as in the example: ```python # vertical_id is an integer categorical feature. vertical_id = categorical_column_with_identity('vertical_id', 10K) price = numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = bucketized_column(price, boundaries=[...]) vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) columns = [vertical_id_x_price, ...] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` To use crossed column in DNN model, you need to add it in an embedding column as in this example: ```python vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10) dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...]) ``` Args: keys: An iterable identifying the features to be crossed. Each element can be either: * string: Will use the corresponding feature which must be of string type. * `CategoricalColumn`: Will use the transformed tensor produced by this column. Does not support hashed categorical column. hash_bucket_size: An int > 1. The number of buckets. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseCrossOp (optional). Returns: A `CrossedColumn`. Raises: ValueError: If `len(keys) < 2`. ValueError: If any of the keys is neither a string nor `CategoricalColumn`. ValueError: If any of the keys is `HashedCategoricalColumn`. ValueError: If `hash_bucket_size < 1`." 3631,FeatureColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,1829,class,"Represents a feature column abstraction. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. To distinguish between the concept of a feature family and a specific binary feature within a family, we refer to a feature family like ""country"" as a feature column. For example, we can have a feature in a `tf.Example` format: {key: ""country"", value: [ ""US"" ]} In this example the value of feature is ""US"" and ""country"" refers to the column of the feature. This class is an abstract class. Users should not create instances of this." 3632,DenseColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2082,class,"Represents a column which can be represented as `Tensor`. Some examples of this type are: numeric_column, embedding_column, indicator_column." 3633,is_feature_column_v2,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2119,function,Returns True if all feature columns are V2. 3634,_create_weighted_sum,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2129,function,Creates a weighted sum for a dense/categorical column for linear_model. 3635,_create_dense_column_weighted_sum,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2147,function,Create a weighted sum of a dense column for linear_model. 3636,CategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2157,class,"Represents a categorical feature. A categorical feature typically handled with a `tf.sparse.SparseTensor` of IDs." 3637,_create_categorical_column_weighted_sum,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2195,function,"Create a weighted sum of a categorical column for linear_model. Note to maintainer: As implementation details, the weighted sum is implemented via embedding_lookup_sparse toward efficiency. Mathematically, they are the same. To be specific, conceptually, categorical column can be treated as multi-hot vector. Say: ```python x = [0 0 1] # categorical column input w = [a b c] # weights ``` The weighted sum is `c` in this case, which is same as `w[2]`. Another example is ```python x = [0 1 1] # categorical column input w = [a b c] # weights ``` The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`. For both cases, we can implement weighted sum via embedding_lookup with sparse_combiner = ""sum""." 3638,SequenceDenseColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2243,class,Represents dense sequence data. 3639,FeatureTransformationCache,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2262,class,"Handles caching of transformations while building the model. `FeatureColumn` specifies how to digest an input column to the network. Some feature columns require data transformations. This class caches those transformations. Some features may be used in more than one place. For example, one can use a bucketized feature by itself and a cross with it. In that case we should create only one bucketization op instead of creating ops for each feature column separately. To handle re-use of transformed columns, `FeatureTransformationCache` caches all previously transformed columns. Example: We're trying to use the following `FeatureColumn`s: ```python bucketized_age = fc.bucketized_column(fc.numeric_column(""age""), ...) keywords = fc.categorical_column_with_hash_buckets(""keywords"", ...) age_X_keywords = fc.crossed_column([bucketized_age, ""keywords""]) ... = linear_model(features, [bucketized_age, keywords, age_X_keywords] ``` If we transform each column independently, then we'll get duplication of bucketization (one for cross, one for bucketization itself). The `FeatureTransformationCache` eliminates this duplication." 3640,_to_sparse_input_and_drop_ignore_values,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2412,function,"Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells. If `input_tensor` is already a `SparseTensor`, just return it. Args: input_tensor: A string or integer `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the resulting `SparseTensor`. If `None`, default value of `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`). Returns: A `SparseTensor` with the same shape as `input_tensor`. Raises: ValueError: when `input_tensor`'s rank is `None`." 3641,_normalize_feature_columns,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2456,function,"Normalizes the `feature_columns` input. This method converts the `feature_columns` to list type as best as it can. In addition, verifies the type and other parts of feature_columns, required by downstream library. Args: feature_columns: The raw feature columns, usually passed by users. Returns: The normalized feature column list. Raises: ValueError: for any invalid inputs, such as empty, duplicated names, etc." 3642,NumericColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2501,class,see `numeric_column`. 3643,BucketizedColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2628,class,See `bucketized_column`. 3644,EmbeddingColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,2783,class,See `embedding_column`. 3645,_raise_shared_embedding_column_error,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3055,function, 3646,SharedEmbeddingColumnCreator,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3061,class, 3647,SharedEmbeddingColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3113,class,See `embedding_column`. 3648,_check_shape,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3250,function,"Returns shape if it's valid, raises error otherwise." 3649,HashedCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3266,class,see `categorical_column_with_hash_bucket`. 3650,VocabularyFileCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3374,class,See `categorical_column_with_vocabulary_file`. 3651,VocabularyListCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3493,class,See `categorical_column_with_vocabulary_list`. 3652,IdentityCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3612,class,See `categorical_column_with_identity`. 3653,WeightedCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3721,class,See `weighted_categorical_column`. 3654,CrossedColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3845,class,See `crossed_column`. 3655,_collect_leaf_level_keys,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,3988,function,"Collects base keys by expanding all nested crosses. Args: cross: A `CrossedColumn`. Returns: A list of strings or `CategoricalColumn` instances." 3656,_prune_invalid_ids,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,4006,function,Prune invalid IDs (< 0) from the input ids and weights. 3657,_prune_invalid_weights,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,4019,function,Prune invalid weights (< 0) from the input ids and weights. 3658,IndicatorColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,4028,class,"Represents a one-hot column for use in deep networks. Args: categorical_column: A `CategoricalColumn` which is created by `categorical_column_with_*` function." 3659,_verify_static_batch_size_equality,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,4254,function,"Verify equality between static batch sizes. Args: tensors: iterable of input tensors. columns: Corresponding feature columns. Raises: ValueError: in case of mismatched batch sizes." 3660,SequenceCategoricalColumn,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,4281,class,Represents sequences of categorical data. 3661,_check_config_keys,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,4400,function,Checks that a config has all expected_keys. 3662,_standardize_and_copy_config,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,4407,function,"Returns a shallow copy of config with lists turned to tuples. Keras serialization uses nest to listify everything. This causes problems with the NumericColumn shape, which becomes unhashable. We could try to solve this on the Keras side, but that would require lots of tracking to avoid changing existing behavior. Instead, we ensure here that we revive correctly. Args: config: dict that will be used to revive a Feature Column Returns: Shallow copy of config with lists turned to tuples." 3663,_sanitize_column_name_for_variable_scope,tensorflow/tensorflow/python/feature_column/feature_column_v2.py,4430,function,Sanitizes user-provided feature names for use as variable scopes. 3664,_initialized_session,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,52,function, 3665,get_linear_model_bias,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,59,function, 3666,get_linear_model_column_var,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,64,function, 3667,BaseFeatureColumnForTests,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,69,class,"A base FeatureColumn useful to avoid boiler-plate in tests. Provides dummy implementations for abstract methods that raise ValueError in order to avoid re-defining all abstract methods for each test sub-class." 3668,SortableFeatureColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,88,class, 3669,LazyColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,112,class, 3670,NumericColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,260,class, 3671,BucketizedColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,477,class, 3672,HashedCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,780,class, 3673,CrossedColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,1001,class, 3674,OldLinearModelTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,1407,class, 3675,InputLayerTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,2292,class, 3676,FunctionalInputLayerTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,2393,class, 3677,MakeParseExampleSpecTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,2871,class, 3678,_assert_sparse_tensor_value,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,2983,function, 3679,VocabularyFileCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,2996,class, 3680,VocabularyListCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,3508,class, 3681,IdentityCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,3928,class, 3682,TransformFeaturesTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,4248,class, 3683,IndicatorColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,4318,class, 3684,_TestStateManager,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,4601,class, 3685,EmbeddingColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,4640,class, 3686,SharedEmbeddingColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,5467,class, 3687,WeightedCategoricalColumnTest,tensorflow/tensorflow/python/feature_column/feature_column_v2_test.py,6001,class, 3688,concatenate_context_input,tensorflow/tensorflow/python/feature_column/sequence_feature_column.py,41,function,"Replicates `context_input` across all timesteps of `sequence_input`. Expands dimension 1 of `context_input` then tiles it `sequence_length` times. This value is appended to `sequence_input` on dimension 2 and the result is returned. Args: context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`. sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size, padded_length, d0]`. Returns: A `Tensor` of dtype `float32` and shape `[batch_size, padded_length, d0 + d1]`. Raises: ValueError: If `sequence_input` does not have rank 3 or `context_input` does not have rank 2." 3689,sequence_categorical_column_with_identity,tensorflow/tensorflow/python/feature_column/sequence_feature_column.py,91,function,"Returns a feature column that represents sequences of integers. Pass this to `embedding_column` or `indicator_column` to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: ```python watches = sequence_categorical_column_with_identity( 'watches', num_buckets=1000) watches_embedding = embedding_column(watches, dimension=10) columns = [watches_embedding] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) sequence_feature_layer = SequenceFeatures(columns) sequence_input, sequence_length = sequence_feature_layer(features) sequence_length_mask = tf.sequence_mask(sequence_length) rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size) rnn_layer = tf.keras.layers.RNN(rnn_cell) outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask) ``` Args: key: A unique string identifying the input feature. num_buckets: Range of inputs. Namely, inputs are expected to be in the range `[0, num_buckets)`. default_value: If `None`, this column's graph operations will fail for out-of-range inputs. Otherwise, this value must be in the range `[0, num_buckets)`, and will replace out-of-range inputs. Returns: A `SequenceCategoricalColumn`. Raises: ValueError: if `num_buckets` is less than one. ValueError: if `default_value` is not in range `[0, num_buckets)`." 3690,sequence_categorical_column_with_hash_bucket,tensorflow/tensorflow/python/feature_column/sequence_feature_column.py,140,function,"A sequence of categorical terms where ids are set by hashing. Pass this to `embedding_column` or `indicator_column` to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: ```python tokens = sequence_categorical_column_with_hash_bucket( 'tokens', hash_bucket_size=1000) tokens_embedding = embedding_column(tokens, dimension=10) columns = [tokens_embedding] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) sequence_feature_layer = SequenceFeatures(columns) sequence_input, sequence_length = sequence_feature_layer(features) sequence_length_mask = tf.sequence_mask(sequence_length) rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size) rnn_layer = tf.keras.layers.RNN(rnn_cell) outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask) ``` Args: key: A unique string identifying the input feature. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A `SequenceCategoricalColumn`. Raises: ValueError: `hash_bucket_size` is not greater than 1. ValueError: `dtype` is neither string nor integer." 3691,sequence_categorical_column_with_vocabulary_file,tensorflow/tensorflow/python/feature_column/sequence_feature_column.py,186,function,"A sequence of categorical terms where ids use a vocabulary file. Pass this to `embedding_column` or `indicator_column` to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: ```python states = sequence_categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=50, num_oov_buckets=5) states_embedding = embedding_column(states, dimension=10) columns = [states_embedding] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) sequence_feature_layer = SequenceFeatures(columns) sequence_input, sequence_length = sequence_feature_layer(features) sequence_length_mask = tf.sequence_mask(sequence_length) rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size) rnn_layer = tf.keras.layers.RNN(rnn_cell) outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask) ``` Args: key: A unique string identifying the input feature. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of `vocabulary_file`, if less than length, later values are ignored. If None, it is set to the length of `vocabulary_file`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. dtype: The type of features. Only string and integer types are supported. Returns: A `SequenceCategoricalColumn`. Raises: ValueError: `vocabulary_file` is missing or cannot be opened. ValueError: `vocabulary_size` is missing or < 1. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: `dtype` is neither string nor integer." 3692,sequence_categorical_column_with_vocabulary_list,tensorflow/tensorflow/python/feature_column/sequence_feature_column.py,251,function,"A sequence of categorical terms where ids use an in-memory list. Pass this to `embedding_column` or `indicator_column` to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: ```python colors = sequence_categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('R', 'G', 'B', 'Y'), num_oov_buckets=2) colors_embedding = embedding_column(colors, dimension=3) columns = [colors_embedding] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) sequence_feature_layer = SequenceFeatures(columns) sequence_input, sequence_length = sequence_feature_layer(features) sequence_length_mask = tf.sequence_mask(sequence_length) rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size) rnn_layer = tf.keras.layers.RNN(rnn_cell) outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask) ``` Args: key: A unique string identifying the input feature. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in `vocabulary_list`. Must be castable to `dtype`. dtype: The type of features. Only string and integer types are supported. If `None`, it will be inferred from `vocabulary_list`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to `-1`. This can not be specified with a positive `num_oov_buckets`. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. Returns: A `SequenceCategoricalColumn`. Raises: ValueError: if `vocabulary_list` is empty, or contains duplicate keys. ValueError: `num_oov_buckets` is a negative integer. ValueError: `num_oov_buckets` and `default_value` are both specified. ValueError: if `dtype` is not integer or string." 3693,sequence_numeric_column,tensorflow/tensorflow/python/feature_column/sequence_feature_column.py,313,function,"Returns a feature column that represents sequences of numeric data. Example: ```python temperature = sequence_numeric_column('temperature') columns = [temperature] features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) sequence_feature_layer = SequenceFeatures(columns) sequence_input, sequence_length = sequence_feature_layer(features) sequence_length_mask = tf.sequence_mask(sequence_length) rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size) rnn_layer = tf.keras.layers.RNN(rnn_cell) outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask) ``` Args: key: A unique string identifying the input features. shape: The shape of the input data per sequence id. E.g. if `shape=(2,)`, each example must contain `2 * sequence_length` values. default_value: A single value compatible with `dtype` that is used for padding the sparse data into a dense `Tensor`. dtype: The type of values. normalizer_fn: If not `None`, a function that can be used to normalize the value of the tensor after `default_value` is applied for parsing. Normalizer function takes the input `Tensor` as its argument, and returns the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though the most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations. Returns: A `SequenceNumericColumn`. Raises: TypeError: if any dimension in shape is not an int. ValueError: if any dimension in shape is not a positive integer. ValueError: if `dtype` is not convertible to `tf.float32`." 3694,_assert_all_equal_and_return,tensorflow/tensorflow/python/feature_column/sequence_feature_column.py,375,function,Asserts that all tensors are equal and returns the first one. 3695,SequenceNumericColumn,tensorflow/tensorflow/python/feature_column/sequence_feature_column.py,387,class,Represents sequences of numeric data. 3696,SequenceExampleParsingTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_integration_test.py,34,class, 3697,_make_sequence_example,tensorflow/tensorflow/python/feature_column/sequence_feature_column_integration_test.py,200,function, 3698,_initialized_session,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,43,function, 3699,ConcatenateContextInputTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,51,class,Tests the utility fn concatenate_context_input. 3700,_assert_sparse_tensor_value,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,113,function, 3701,_assert_sparse_tensor_indices_shape,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,121,function, 3702,_get_sequence_dense_tensor,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,129,function, 3703,_get_sequence_dense_tensor_state,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,134,function, 3704,_get_sparse_tensors,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,143,function, 3705,SequenceCategoricalColumnWithIdentityTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,149,class, 3706,SequenceCategoricalColumnWithHashBucketTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,221,class, 3707,SequenceCategoricalColumnWithVocabularyFileTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,260,class, 3708,SequenceCategoricalColumnWithVocabularyListTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,342,class, 3709,SequenceEmbeddingColumnTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,382,class, 3710,SequenceSharedEmbeddingColumnTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,517,class, 3711,SequenceIndicatorColumnTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,684,class, 3712,SequenceNumericColumnTest,tensorflow/tensorflow/python/feature_column/sequence_feature_column_test.py,798,class, 3713,serialize_feature_column,tensorflow/tensorflow/python/feature_column/serialization.py,41,function,"Serializes a FeatureColumn or a raw string key. This method should only be used to serialize parent FeatureColumns when implementing FeatureColumn.get_config(), else serialize_feature_columns() is preferable. This serialization also keeps information of the FeatureColumn class, so deserialization is possible without knowing the class type. For example: a = numeric_column('x') a.get_config() gives: { 'key': 'price', 'shape': (1,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': None } While serialize_feature_column(a) gives: { 'class_name': 'NumericColumn', 'config': { 'key': 'price', 'shape': (1,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': None } } Args: fc: A FeatureColumn or raw feature key string. Returns: Keras serialization for FeatureColumns, leaves string keys unaffected. Raises: ValueError if called with input that is not string or FeatureColumn." 3714,deserialize_feature_column,tensorflow/tensorflow/python/feature_column/serialization.py,89,function,"Deserializes a `config` generated with `serialize_feature_column`. This method should only be used to deserialize parent FeatureColumns when implementing FeatureColumn.from_config(), else deserialize_feature_columns() is preferable. Returns a FeatureColumn for this config. TODO(b/118939620): Simplify code if Keras utils support object deduping. Args: config: A Dict with the serialization of feature columns acquired by `serialize_feature_column`, or a string representing a raw column. custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). columns_by_name: A Dict[String, FeatureColumn] of existing columns in order to avoid duplication. Raises: ValueError if `config` has invalid format (e.g: expected keys missing, or refers to unknown classes). Returns: A FeatureColumn corresponding to the input `config`." 3715,serialize_feature_columns,tensorflow/tensorflow/python/feature_column/serialization.py,146,function,"Serializes a list of FeatureColumns. Returns a list of Keras-style config dicts that represent the input FeatureColumns and can be used with `deserialize_feature_columns` for reconstructing the original columns. Args: feature_columns: A list of FeatureColumns. Returns: Keras serialization for the list of FeatureColumns. Raises: ValueError if called with input that is not a list of FeatureColumns." 3716,deserialize_feature_columns,tensorflow/tensorflow/python/feature_column/serialization.py,165,function,"Deserializes a list of FeatureColumns configs. Returns a list of FeatureColumns given a list of config dicts acquired by `serialize_feature_columns`. Args: configs: A list of Dicts with the serialization of feature columns acquired by `serialize_feature_columns`. custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). Returns: FeatureColumn objects corresponding to the input configs. Raises: ValueError if called with input that is not a list of FeatureColumns." 3717,_column_name_with_class_name,tensorflow/tensorflow/python/feature_column/serialization.py,190,function,"Returns a unique name for the feature column used during deduping. Without this two FeatureColumns that have the same name and where one wraps the other, such as an IndicatorColumn wrapping a SequenceCategoricalColumn, will fail to deserialize because they will have the same name in columns_by_name, causing the wrong column to be returned. Args: fc: A FeatureColumn. Returns: A unique name as a string." 3718,_serialize_keras_object,tensorflow/tensorflow/python/feature_column/serialization.py,207,function,Serialize a Keras object into a JSON-compatible representation. 3719,_deserialize_keras_object,tensorflow/tensorflow/python/feature_column/serialization.py,238,function,Turns the serialized form of a Keras object back into an actual object. 3720,_class_and_config_for_serialized_keras_object,tensorflow/tensorflow/python/feature_column/serialization.py,290,function,Returns the class name and config for a serialized keras object. 3721,_get_registered_object,tensorflow/tensorflow/python/feature_column/serialization.py,333,function, 3722,FeatureColumnSerializationTest,tensorflow/tensorflow/python/feature_column/serialization_test.py,27,class,"Tests for serialization, deserialization helpers." 3723,sequence_length_from_sparse_tensor,tensorflow/tensorflow/python/feature_column/utils.py,30,function,Returns a [batch_size] Tensor with per-example sequence length. 3724,assert_string_or_int,tensorflow/tensorflow/python/feature_column/utils.py,55,function, 3725,assert_key_is_string,tensorflow/tensorflow/python/feature_column/utils.py,61,function, 3726,check_default_value,tensorflow/tensorflow/python/feature_column/utils.py,68,function,"Returns default value as tuple if it's valid, otherwise raises errors. This function verifies that `default_value` is compatible with both `shape` and `dtype`. If it is not compatible, it raises an error. If it is compatible, it casts default_value to a tuple and returns it. `key` is used only for error message. Args: shape: An iterable of integers specifies the shape of the `Tensor`. default_value: If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. key: Column name, used only for error messages. Returns: A tuple which will be used as default value. Raises: TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`." 3727,_create_tuple,tensorflow/tensorflow/python/feature_column/utils.py,127,function,Returns a tuple with given shape and filled with value. 3728,_as_tuple,tensorflow/tensorflow/python/feature_column/utils.py,134,function, 3729,_is_shape_and_default_value_compatible,tensorflow/tensorflow/python/feature_column/utils.py,140,function,Verifies compatibility of shape and default_value. 3730,op_is_stateful,tensorflow/tensorflow/python/framework/auto_control_deps.py,126,function, 3731,ResourceType,tensorflow/tensorflow/python/framework/auto_control_deps.py,132,class, 3732,collective_manager_ids_from_op,tensorflow/tensorflow/python/framework/auto_control_deps.py,137,function,"Returns CollectiveManager ID from the op if one exists, else None. CollectiveManager adds collective and no_op operations tagged with an ID, unique to the manager object. This function extracts that ID, or None, if the node was not generated by a CollectiveManager. Args: op: `Operation` to get the collective manager ID from. Returns: List of CollectiveManager IDs used by the op." 3733,AutomaticControlDependencies,tensorflow/tensorflow/python/framework/auto_control_deps.py,163,class,"Context manager to automatically add control dependencies. Code under this context manager will act as if a sensible set of control dependencies were present. More specifically: 1. All stateful ops in the scope will execute (with the exception of ops in ASYNC_STATEFUL_OPS and LEGACY_RANDOM_OPS) 2. Stateful ops which modify the same resource will execute in program order Note: creating variables in an automatic control dependencies context is not supported (the value of the variables will never change as they will keep getting reinitialized). NOT THREAD SAFE" 3734,register_acd_resource_resolver,tensorflow/tensorflow/python/framework/auto_control_deps.py,483,function,"Register a function for resolving resources touched by an op. `f` is called for every Operation added in the ACD context with the op's original resource reads and writes. `f` is expected to update the sets of resource reads and writes in-place and return True if it updated either of the sets, False otherwise. Example: @register_acd_resource_resolver def ResolveIdentity(op, resource_reads, resource_writes): # op: The `Operation` being processed by ACD currently. # resource_reads: An `ObjectIdentitySet` of read-only resources. # resource_writes: An `ObjectIdentitySet` of read-write resources. if not resource_reads or resource_writes: return False def update(resource_inputs): to_add = [] to_remove = [] for t in resource_inputs: if t.op.type == ""Identity"": to_remove.append(t) to_add.append(t.op.inputs[0]) if not to_add and not to_remove: return False for t in to_remove: resource_inputs.discard(t) resource_inputs.update(to_add) return True return update(resource_reads) or update(resource_writes) Args: f: Python function with signature (Operation, ObjectIdentitySet, ObjectIdentitySet) -> bool Returns: The function `f` after adding it to the registry." 3735,_get_resource_inputs,tensorflow/tensorflow/python/framework/auto_control_deps.py,525,function,Returns an iterable of resources touched by this `op`. 3736,automatic_control_dependencies,tensorflow/tensorflow/python/framework/auto_control_deps.py,550,function,"Wraps f to automatically insert control dependencies. The inserted dependencies ensure that: 1. All stateful ops in f run when the result of f runs 2. Updates to the same resources happen in order. Args: f: the function to be wrapped. Returns: The wrapped function." 3737,AutomaticControlDependenciesTest,tensorflow/tensorflow/python/framework/auto_control_deps_test.py,42,class, 3738,register_read_only_resource_op,tensorflow/tensorflow/python/framework/auto_control_deps_utils.py,31,function,Declares that `op_type` does not update its touched resource. 3739,get_read_only_resource_input_indices_graph,tensorflow/tensorflow/python/framework/auto_control_deps_utils.py,36,function,Returns sorted list of read-only resource indices in func_graph.inputs. 3740,_get_read_only_resource_input_indices_op,tensorflow/tensorflow/python/framework/auto_control_deps_utils.py,63,function,Returns sorted list of read-only resource indices in op.inputs. 3741,get_read_write_resource_inputs,tensorflow/tensorflow/python/framework/auto_control_deps_utils.py,89,function,"Returns a tuple of resource reads, writes in op.inputs. Args: op: Operation Returns: A 2-tuple of ObjectIdentitySets, the first entry containing read-only resource handles and the second containing read-write resource handles in `op.inputs`." 3742,_op_writes_to_resource,tensorflow/tensorflow/python/framework/auto_control_deps_utils.py,128,function,"Returns whether op writes to resource handle. Args: handle: Resource handle. Must be an input of `op`. op: Operation. Returns: Returns False if op is a read-only op registered using `register_read_only_resource_op` or if `handle` is an input at one of the indices in the `READ_ONLY_RESOURCE_INPUTS_ATTR` attr of the op, True otherwise. Raises: ValueError: if `handle` is not an input of `op`." 3743,_input_index,tensorflow/tensorflow/python/framework/auto_control_deps_utils.py,155,function,"Returns the index of `handle` in `op.inputs`. Args: op: Operation. handle: Resource handle. Returns: Index in `op.inputs` receiving the resource `handle`. Raises: ValueError: If handle and its replicated input are both not found in `op.inputs`." 3744,ScopedTFStatus,tensorflow/tensorflow/python/framework/c_api_util.py,29,class,Wrapper around TF_Status that handles deletion. 3745,ScopedTFGraph,tensorflow/tensorflow/python/framework/c_api_util.py,44,class,Wrapper around TF_Graph that handles deletion. 3746,ScopedTFImportGraphDefOptions,tensorflow/tensorflow/python/framework/c_api_util.py,61,class,Wrapper around TF_ImportGraphDefOptions that handles deletion. 3747,ScopedTFImportGraphDefResults,tensorflow/tensorflow/python/framework/c_api_util.py,76,class,Wrapper around TF_ImportGraphDefOptions that handles deletion. 3748,ScopedTFFunction,tensorflow/tensorflow/python/framework/c_api_util.py,91,class,Wrapper around TF_Function that handles deletion. 3749,ScopedTFBuffer,tensorflow/tensorflow/python/framework/c_api_util.py,110,class,An internal class to help manage the TF_Buffer lifetime. 3750,ApiDefMap,tensorflow/tensorflow/python/framework/c_api_util.py,122,class,"Wrapper around Tf_ApiDefMap that handles querying and deletion. The OpDef protos are also stored in this class so that they could be queried by op name." 3751,tf_buffer,tensorflow/tensorflow/python/framework/c_api_util.py,172,function,"Context manager that creates and deletes TF_Buffer. Example usage: with tf_buffer() as buf: # get serialized graph def into buf ... proto_data = c_api.TF_GetBuffer(buf) graph_def.ParseFromString(compat.as_bytes(proto_data)) # buf has been deleted with tf_buffer(some_string) as buf: c_api.TF_SomeFunction(buf) # buf has been deleted Args: data: An optional `bytes`, `str`, or `unicode` object. If not None, the yielded buffer will contain this data. Yields: Created TF_Buffer" 3752,tf_output,tensorflow/tensorflow/python/framework/c_api_util.py,204,function,"Returns a wrapped TF_Output with specified operation and index. Args: c_op: wrapped TF_Operation index: integer Returns: Wrapped TF_Output" 3753,tf_operations,tensorflow/tensorflow/python/framework/c_api_util.py,220,function,"Generator that yields every TF_Operation in `graph`. Args: graph: Graph Yields: wrapped TF_Operation" 3754,new_tf_operations,tensorflow/tensorflow/python/framework/c_api_util.py,238,function,"Generator that yields newly-added TF_Operations in `graph`. Specifically, yields TF_Operations that don't have associated Operations in `graph`. This is useful for processing nodes added by the C API. Args: graph: Graph Yields: wrapped TF_Operation" 3755,ApiDefMapTest,tensorflow/tensorflow/python/framework/c_api_util_test.py,26,class, 3756,EagerGraphCombination,tensorflow/tensorflow/python/framework/combinations.py,33,class,"Run the test in Graph or Eager mode. The optional `mode` parameter controls the test's execution mode. Its accepted values are ""graph"" or ""eager"" literals." 3757,TFVersionCombination,tensorflow/tensorflow/python/framework/combinations.py,56,class,"Control the execution of the test in TF1.x and TF2. If TF2 is enabled then a test with TF1 test is going to be skipped and vice versa. Test targets continuously run in TF2 thanks to the tensorflow.v2 TAP target. A test can be run in TF2 with bazel by passing --test_env=TF2_BEHAVIOR=1." 3758,_broadcast_shape_helper,tensorflow/tensorflow/python/framework/common_shapes.py,25,function,"Helper functions for is_broadcast_compatible and broadcast_shape. Args: shape_x: A `TensorShape` shape_y: A `TensorShape` Returns: Returns None if the shapes are not broadcast compatible, a list of the broadcast dimensions otherwise." 3759,is_broadcast_compatible,tensorflow/tensorflow/python/framework/common_shapes.py,73,function,"Returns True if `shape_x` and `shape_y` are broadcast compatible. Args: shape_x: A `TensorShape` shape_y: A `TensorShape` Returns: True if a shape exists that both `shape_x` and `shape_y` can be broadcasted to. False otherwise." 3760,broadcast_shape,tensorflow/tensorflow/python/framework/common_shapes.py,89,function,"Returns the broadcasted shape between `shape_x` and `shape_y`. Args: shape_x: A `TensorShape` shape_y: A `TensorShape` Returns: A `TensorShape` representing the broadcasted shape. Raises: ValueError: If the two shapes can not be broadcasted." 3761,CommonShapesTest,tensorflow/tensorflow/python/framework/common_shapes_test.py,29,class, 3762,CompositeTensor,tensorflow/tensorflow/python/framework/composite_tensor.py,31,class,"Abstract base class for Tensor-like objects that are composed from Tensors. Each `CompositeTensor` can be decomposed into a structured collection of component `tf.Tensor`s, and reconstructed from those components. The `tensorflow.python.util.nest` module has support for treating composite tensors as structure, which makes it easy to flatten and reconstruct composite tensors (or larger structures that contain composite tensors). E.g.: ```python ct = ... # Create a composite tensor. flat_list_of_tensors = nest.flatten(ct, expand_composites=True) transformed_list_of_tensors = ... # do something with the flat tensors. result = nest.pack_sequence_as(ct, transformed_list_of_tensors, expand_composites=True) ```" 3763,replace_composites_with_components,tensorflow/tensorflow/python/framework/composite_tensor.py,94,function,"Recursively replaces CompositeTensors with their components. Args: structure: A `nest`-compatible structure, possibly containing composite tensors. Returns: A copy of `structure`, where each composite tensor has been replaced by its components. The result will contain no composite tensors. Note that `nest.flatten(replace_composites_with_components(structure))` returns the same value as `nest.flatten(structure)`." 3764,CTSpec,tensorflow/tensorflow/python/framework/composite_tensor_test.py,40,class,"A generic CompositeTensor TypeSpec, used for constructing tests." 3765,CT,tensorflow/tensorflow/python/framework/composite_tensor_test.py,60,class,"A generic CompositeTensor, used for constructing tests." 3766,CTSpec2,tensorflow/tensorflow/python/framework/composite_tensor_test.py,87,class, 3767,CT2,tensorflow/tensorflow/python/framework/composite_tensor_test.py,91,class, 3768,CompositeTensorTest,tensorflow/tensorflow/python/framework/composite_tensor_test.py,96,class, 3769,is_composite_or_composite_value,tensorflow/tensorflow/python/framework/composite_tensor_utils.py,31,function,Returns true if 'tensor' is a CompositeTensor or a CT Value object. 3770,get_shape,tensorflow/tensorflow/python/framework/composite_tensor_utils.py,41,function,Returns the shape of the passed composite tensor. 3771,_append_sparse_tensor_value,tensorflow/tensorflow/python/framework/composite_tensor_utils.py,50,function,Append sparse tensor value objects. 3772,_append_ragged_tensor_value,tensorflow/tensorflow/python/framework/composite_tensor_utils.py,94,function,Append ragged tensor value objects. 3773,append_composite_tensor,tensorflow/tensorflow/python/framework/composite_tensor_utils.py,113,function,"Helper function to append composite tensors to each other in the 0 axis. In order to support batching within a fit/evaluate/predict call, we need to be able to aggregate within a CompositeTensor. Unfortunately, the CT API currently does not make this easy - especially in V1 mode, where we're working with CompositeTensor Value objects that have no connection with the CompositeTensors that created them. Arguments: target: CompositeTensor or CompositeTensor value object that will be appended to. to_append: CompositeTensor or CompositeTensor value object to append to. 'target'. Returns: A CompositeTensor or CompositeTensor value object. Raises: RuntimeError: if concatenation is not possible." 3774,CompositeTensorTest,tensorflow/tensorflow/python/framework/composite_tensor_utils_test.py,32,class, 3775,tensor_float_32_execution_allowed,tensorflow/tensorflow/python/framework/config.py,28,function,"Get if TensorFloat-32 operations are enabled on supported hardware. Returns: True if TensorFloat-32 execution is enabled and False otherwise." 3776,allow_tensor_float_32_execution,tensorflow/tensorflow/python/framework/config.py,38,function,"Allow use of TensorFloat-32 with float32 ops on supported hardware. TensorFloat-32 is a math mode introduced with the NVIDIA Ampere architecture. TensorFloat-32 kernels take float32 inputs and produce float32 outputs. Internally, the inputs are cast to a custom representation with 10-bit mantissa (similar to float16) and 8-bit exponent (similar to float32) and are executed using TensorCores with float32 accumulation. For more information, see https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/. TensorFloat-32 execution is disabled by default, but this may change in a future version. Args: allowed: whether to allow TensorFloat-32 execution" 3777,get_intra_op_parallelism_threads,tensorflow/tensorflow/python/framework/config.py,58,function,"Get number of threads used within an individual op for parallelism. Certain operations like matrix multiplication and reductions can utilize parallel threads for speed ups. A value of 0 means the system picks an appropriate number. Returns: Number of parallel threads" 3778,set_intra_op_parallelism_threads,tensorflow/tensorflow/python/framework/config.py,72,function,"Set number of threads used within an individual op for parallelism. Certain operations like matrix multiplication and reductions can utilize parallel threads for speed ups. A value of 0 means the system picks an appropriate number. Args: num_threads: Number of parallel threads" 3779,get_inter_op_parallelism_threads,tensorflow/tensorflow/python/framework/config.py,86,function,"Get number of threads used for parallelism between independent operations. Determines the number of threads used by independent non-blocking operations. 0 means the system picks an appropriate number. Returns: Number of parallel threads" 3780,set_inter_op_parallelism_threads,tensorflow/tensorflow/python/framework/config.py,99,function,"Set number of threads used for parallelism between independent operations. Determines the number of threads used by independent non-blocking operations. 0 means the system picks an appropriate number. Args: num_threads: Number of parallel threads" 3781,get_optimizer_jit,tensorflow/tensorflow/python/framework/config.py,112,function,"Get if JIT compilation is enabled. Note that optimizations are only applied to code that is compiled into a graph. In eager mode, which is the TF2 API default, that means only code that is defined under a tf.function decorator. Returns: If JIT compilation is enabled." 3782,set_optimizer_jit,tensorflow/tensorflow/python/framework/config.py,126,function,"Set if JIT compilation is enabled. Note that optimizations are only applied to code that is compiled into a graph. In eager mode, which is the TF2 API default, that means only code that is defined under a tf.function decorator. Args: enabled: Whether to enable JIT compilation." 3783,get_optimizer_experimental_options,tensorflow/tensorflow/python/framework/config.py,140,function,"Get experimental optimizer options. Refer to tf.config.optimizer.set_experimental_options for a list of current options. Note that optimizations are only applied in graph mode, (within tf.function). In addition, as these are experimental options, the list is subject to change. Returns: Dictionary of configured experimental optimizer options" 3784,set_optimizer_experimental_options,tensorflow/tensorflow/python/framework/config.py,156,function,"Set experimental optimizer options. Note that optimizations are only applied in graph mode, (within tf.function). In addition, as these are experimental options, the list is subject to change. Args: options: Dictionary of experimental optimizer options to configure. Valid keys: - layout_optimizer: Optimize tensor layouts e.g. This will try to use NCHW layout on GPU which is faster. - constant_folding: Fold constants Statically infer the value of tensors when possible, and materialize the result using constants. - shape_optimization: Simplify computations made on shapes. - remapping: Remap subgraphs onto more efficient implementations. - arithmetic_optimization: Simplify arithmetic ops with common sub-expression elimination and arithmetic simplification. - dependency_optimization: Control dependency optimizations. Remove redundant control dependencies, which may enable other optimization. This optimizer is also essential for pruning Identity and NoOp nodes. - loop_optimization: Loop optimizations. - function_optimization: Function optimizations and inlining. - debug_stripper: Strips debug-related nodes from the graph. - disable_model_pruning: Disable removal of unnecessary ops from the graph - scoped_allocator_optimization: Try to allocate some independent Op outputs contiguously in order to merge or eliminate downstream Ops. - pin_to_host_optimization: Force small ops onto the CPU. - implementation_selector: Enable the swap of kernel implementations based on the device placement. - auto_mixed_precision: Change certain float32 ops to float16 on Volta GPUs and above. Without the use of loss scaling, this can cause numerical underflow (see `keras.mixed_precision.experimental.LossScaleOptimizer`). - disable_meta_optimizer: Disable the entire meta optimizer. - min_graph_nodes: The minimum number of nodes in a graph to optimizer. For smaller graphs, optimization is skipped." 3785,get_soft_device_placement,tensorflow/tensorflow/python/framework/config.py,198,function,"Get if soft device placement is enabled. If enabled, an op will be placed on CPU if any of the following are true 1. there's no GPU implementation for the OP 2. no GPU devices are known or registered 3. need to co-locate with reftype input(s) which are from CPU Returns: If soft placement is enabled." 3786,set_soft_device_placement,tensorflow/tensorflow/python/framework/config.py,213,function,"Set if soft device placement is enabled. If enabled, an op will be placed on CPU if any of the following are true 1. there's no GPU implementation for the OP 2. no GPU devices are known or registered 3. need to co-locate with reftype input(s) which are from CPU Args: enabled: Whether to enable soft placement." 3787,get_device_policy,tensorflow/tensorflow/python/framework/config.py,228,function,"Gets the current device policy. The device policy controls how operations requiring inputs on a specific device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1). This function only gets the device policy for the current thread. Any subsequently started thread will again use the default policy. Returns: Current thread device policy" 3788,set_device_policy,tensorflow/tensorflow/python/framework/config.py,254,function,"Sets the current thread device policy. The device policy controls how operations requiring inputs on a specific device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1). When using the default, an appropriate policy will be picked automatically. The default policy may change over time. This function only sets the device policy for the current thread. Any subsequently started thread will again use the default policy. Args: device_policy: A device policy. Valid values: - None: Switch to a system default. - 'warn': Copies the tensors which are not on the right device and logs a warning. - 'explicit': Raises an error if the placement is not as required. - 'silent': Silently copies the tensors. Note that this may hide performance problems as there is no notification provided when operations are blocked on the tensor being copied between devices. - 'silent_for_int32': silently copies `int32` tensors, raising errors on the other ones. Raises: ValueError: If an invalid `device_policy` is passed." 3789,get_synchronous_execution,tensorflow/tensorflow/python/framework/config.py,297,function,"Gets whether operations are executed synchronously or asynchronously. TensorFlow can execute operations synchronously or asynchronously. If asynchronous execution is enabled, operations may return ""non-ready"" handles. Returns: Current thread execution mode" 3790,set_synchronous_execution,tensorflow/tensorflow/python/framework/config.py,310,function,"Specifies whether operations are executed synchronously or asynchronously. TensorFlow can execute operations synchronously or asynchronously. If asynchronous execution is enabled, operations may return ""non-ready"" handles. When `enable` is set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Args: enable: Whether operations should be dispatched synchronously. Valid values: - None: sets the system default. - True: executes each operation synchronously. - False: executes each operation asynchronously." 3791,list_physical_devices,tensorflow/tensorflow/python/framework/config.py,338,function,"Return a list of physical devices visible to the host runtime. Physical devices are hardware devices present on the host machine. By default all discovered CPU and GPU devices are considered visible. This API allows querying the physical hardware resources prior to runtime initialization. Thus, giving an opportunity to call any additional configuration APIs. This is in contrast to `tf.config.list_logical_devices`, which triggers runtime initialization in order to list the configured devices. The following example lists the number of visible GPUs on the host. >>> physical_devices = tf.config.list_physical_devices('GPU') >>> print(""Num GPUs:"", len(physical_devices)) Num GPUs: ... However, the number of GPUs available to the runtime may change during runtime initialization due to marking certain devices as not visible or configuring multiple logical devices. Args: device_type: (optional string) Only include devices matching this device type. For example ""CPU"" or ""GPU"". Returns: List of discovered `tf.config.PhysicalDevice` objects" 3792,list_logical_devices,tensorflow/tensorflow/python/framework/config.py,373,function,"Return a list of logical devices created by runtime. Logical devices may correspond to physical devices or remote devices in the cluster. Operations and tensors may be placed on these devices by using the `name` of the `tf.config.LogicalDevice`. Calling `tf.config.list_logical_devices` triggers the runtime to configure any `tf.config.PhysicalDevice` visible to the runtime, thereby preventing further configuration. To avoid runtime initialization, call `tf.config.list_physical_devices` instead. For example: >>> logical_devices = tf.config.list_logical_devices('GPU') >>> if len(logical_devices) > 0: ... # Allocate on GPU:0 ... with tf.device(logical_devices[0].name): ... one = tf.constant(1) ... # Allocate on GPU:1 ... with tf.device(logical_devices[1].name): ... two = tf.constant(2) Args: device_type: (optional string) Only include devices matching this device type. For example ""CPU"" or ""GPU"". Returns: List of initialized `LogicalDevice`s" 3793,get_visible_devices,tensorflow/tensorflow/python/framework/config.py,410,function,"Get the list of visible physical devices. Returns the list of `PhysicalDevice`s currently marked as visible to the runtime. A visible device will have at least one `LogicalDevice` associated with it once the runtime is initialized. The following example verifies all visible GPUs have been disabled: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... # Disable all GPUS ... tf.config.set_visible_devices([], 'GPU') ... visible_devices = tf.config.get_visible_devices() ... for device in visible_devices: ... assert device.device_type != 'GPU' ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device_type: (optional string) Only include devices matching this device type. For example ""CPU"" or ""GPU"". Returns: List of visible `PhysicalDevice`s" 3794,set_visible_devices,tensorflow/tensorflow/python/framework/config.py,444,function,"Set the list of visible devices. Specifies which `PhysicalDevice` objects are visible to the runtime. TensorFlow will only allocate memory and place operations on visible physical devices, as otherwise no `LogicalDevice` will be created on them. By default all discovered devices are marked as visible. The following example demonstrates disabling the first GPU on the machine. >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... # Disable first GPU ... tf.config.set_visible_devices(physical_devices[1:], 'GPU') ... logical_devices = tf.config.list_logical_devices('GPU') ... # Logical device was not created for first GPU ... assert len(logical_devices) == len(physical_devices) - 1 ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: devices: List of `PhysicalDevice`s to make visible device_type: (optional) Only configure devices matching this device type. For example ""CPU"" or ""GPU"". Other devices will be left unaltered. Raises: ValueError: If argument validation fails. RuntimeError: Runtime is already initialized." 3795,get_memory_growth,tensorflow/tensorflow/python/framework/config.py,478,function,"Get if memory growth is enabled for a `PhysicalDevice`. If memory growth is enabled for a `PhysicalDevice`, the runtime initialization will not allocate all memory on the device. For example: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... tf.config.experimental.set_memory_growth(physical_devices[0], True) ... assert tf.config.experimental.get_memory_growth(physical_devices[0]) ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device: `PhysicalDevice` to query Returns: A boolean indicating the memory growth setting for the `PhysicalDevice`. Raises: ValueError: Invalid `PhysicalDevice` specified." 3796,set_memory_growth,tensorflow/tensorflow/python/framework/config.py,507,function,"Set if memory growth should be enabled for a `PhysicalDevice`. If memory growth is enabled for a `PhysicalDevice`, the runtime initialization will not allocate all memory on the device. Memory growth cannot be configured on a `PhysicalDevice` with virtual devices configured. For example: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... tf.config.experimental.set_memory_growth(physical_devices[0], True) ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device: `PhysicalDevice` to configure enable: (Boolean) Whether to enable or disable memory growth Raises: ValueError: Invalid `PhysicalDevice` specified. RuntimeError: Runtime is already initialized." 3797,get_device_details,tensorflow/tensorflow/python/framework/config.py,535,function,"Returns details about a physical devices. This API takes in a `tf.config.PhysicalDevice` returned by `tf.config.list_physical_devices`. It returns a dict with string keys containing various details about the device. Each key is only supported by a subset of devices, so you should not assume the returned dict will have any particular key. >>> gpu_devices = tf.config.list_physical_devices('GPU') >>> if gpu_devices: ... details = tf.config.experimental.get_device_details(gpu_devices[0]) ... details.get('device_name', 'Unknown GPU') Currently, details are only returned for GPUs. This function returns an empty dict if passed a non-GPU device. The returned dict may have the following keys: * `'device_name'`: A human-readable name of the device as a string, e.g. ""Titan V"". Unlike `tf.config.PhysicalDevice.name`, this will be the same for multiple devices if each device is the same model. Currently only available for GPUs. * `'compute_capability'`: The [compute capability](https://developer.nvidia.com/cuda-gpus) of the device as a tuple of two ints, in the form `(major_version, minor_version)`. Only available for NVIDIA GPUs Note: This is similar to `tf.sysconfig.get_build_info` in that both functions can return information relating to GPUs. However, this function returns run-time information about a specific device (such as a GPU's compute capability), while `tf.sysconfig.get_build_info` returns compile-time information about how TensorFlow was built (such as what version of CUDA TensorFlow was built for). Args: device: A `tf.config.PhysicalDevice` returned by `tf.config.list_physical_devices` or `tf.config.get_visible_devices`. Returns: A dict with string keys." 3798,get_logical_device_configuration,tensorflow/tensorflow/python/framework/config.py,583,function,"Get the virtual device configuration for a `tf.config.PhysicalDevice`. Returns the list of `tf.config.LogicalDeviceConfiguration` objects previously configured by a call to `tf.config.set_logical_device_configuration`. For example: >>> physical_devices = tf.config.list_physical_devices('CPU') >>> assert len(physical_devices) == 1, ""No CPUs found"" >>> configs = tf.config.get_logical_device_configuration( ... physical_devices[0]) >>> try: ... assert configs is None ... tf.config.set_logical_device_configuration( ... physical_devices[0], ... [tf.config.LogicalDeviceConfiguration(), ... tf.config.LogicalDeviceConfiguration()]) ... configs = tf.config.get_logical_device_configuration( ... physical_devices[0]) ... assert len(configs) == 2 ... except: ... # Cannot modify virtual devices once initialized. ... pass Args: device: `PhysicalDevice` to query Returns: List of `tf.config.LogicalDeviceConfiguration` objects or `None` if no virtual device configuration has been set for this physical device." 3799,set_logical_device_configuration,tensorflow/tensorflow/python/framework/config.py,624,function,"Set the logical device configuration for a `tf.config.PhysicalDevice`. A visible `tf.config.PhysicalDevice` will by default have a single `tf.config.LogicalDevice` associated with it once the runtime is initialized. Specifying a list of `tf.config.LogicalDeviceConfiguration` objects allows multiple devices to be created on the same `tf.config.PhysicalDevice`. The following example splits the CPU into 2 logical devices: >>> physical_devices = tf.config.list_physical_devices('CPU') >>> assert len(physical_devices) == 1, ""No CPUs found"" >>> # Specify 2 virtual CPUs. Note currently memory limit is not supported. >>> try: ... tf.config.set_logical_device_configuration( ... physical_devices[0], ... [tf.config.LogicalDeviceConfiguration(), ... tf.config.LogicalDeviceConfiguration()]) ... logical_devices = tf.config.list_logical_devices('CPU') ... assert len(logical_devices) == 2 ... ... tf.config.set_logical_device_configuration( ... physical_devices[0], ... [tf.config.LogicalDeviceConfiguration(), ... tf.config.LogicalDeviceConfiguration(), ... tf.config.LogicalDeviceConfiguration(), ... tf.config.LogicalDeviceConfiguration()]) ... except: ... # Cannot modify logical devices once initialized. ... pass The following example splits the GPU into 2 logical devices with 100 MB each: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... tf.config.set_logical_device_configuration( ... physical_devices[0], ... [tf.config.LogicalDeviceConfiguration(memory_limit=100), ... tf.config.LogicalDeviceConfiguration(memory_limit=100)]) ... ... logical_devices = tf.config.list_logical_devices('GPU') ... assert len(logical_devices) == len(physical_devices) + 1 ... ... tf.config.set_logical_device_configuration( ... physical_devices[0], ... [tf.config.LogicalDeviceConfiguration(memory_limit=10), ... tf.config.LogicalDeviceConfiguration(memory_limit=10)]) ... except: ... # Invalid device or cannot modify logical devices once initialized. ... pass Args: device: The `PhysicalDevice` to configure. logical_devices: (optional) List of `tf.config.LogicalDeviceConfiguration` objects to allocate for the specified `PhysicalDevice`. If None, the default configuration will be used. Raises: ValueError: If argument validation fails. RuntimeError: Runtime is already initialized." 3800,enable_mlir_bridge,tensorflow/tensorflow/python/framework/config.py,689,function,"Enables experimental MLIR-Based TensorFlow Compiler Bridge. DO NOT USE, DEV AND TESTING ONLY AT THE MOMENT. NOTE: MLIR-Based TensorFlow Compiler is under active development and has missing features, please refrain from using. This API exists for development and testing only. TensorFlow Compiler Bridge (TF Bridge) is responsible for translating parts of TensorFlow graph into a form that can be accepted as an input by a backend compiler such as XLA." 3801,enable_mlir_graph_optimization,tensorflow/tensorflow/python/framework/config.py,706,function,"Enables experimental MLIR-Based TensorFlow Compiler Optimizations. DO NOT USE, DEV AND TESTING ONLY AT THE MOMENT. NOTE: MLIR-Based TensorFlow Compiler is under active development and has missing features, please refrain from using. This API exists for development and testing only. TensorFlow Compiler Optimizations are responsible general graph level optimizations that in the current stack mostly done by Grappler graph optimizers." 3802,disable_mlir_bridge,tensorflow/tensorflow/python/framework/config.py,723,function,Disables experimental MLIR-Based TensorFlow Compiler Bridge. 3803,disable_mlir_graph_optimization,tensorflow/tensorflow/python/framework/config.py,729,function,Disables experimental MLIR-Based TensorFlow Compiler Optimizations. 3804,reset_eager,tensorflow/tensorflow/python/framework/config_test.py,42,function, 3805,ConfigTest,tensorflow/tensorflow/python/framework/config_test.py,55,class, 3806,DeviceTest,tensorflow/tensorflow/python/framework/config_test.py,369,class, 3807,TensorFloat32Test,tensorflow/tensorflow/python/framework/config_test.py,759,class, 3808,_eager_reshape,tensorflow/tensorflow/python/framework/constant_op.py,39,function,Eager-only version of Reshape op; requires tensor is an eager Tensor. 3809,_eager_fill,tensorflow/tensorflow/python/framework/constant_op.py,51,function,Eager-only version of Fill op; requires value is an eager Tensor. 3810,_eager_identity,tensorflow/tensorflow/python/framework/constant_op.py,62,function,Eager-only version of Identity op; requires tensor is an eager Tensor. 3811,convert_to_eager_tensor,tensorflow/tensorflow/python/framework/constant_op.py,70,function,"Converts the given `value` to an `EagerTensor`. Note that this function could return cached copies of created constants for performance reasons. Args: value: value to convert to EagerTensor. ctx: value of context.context(). dtype: optional desired dtype of the converted EagerTensor. Returns: EagerTensor created from value. Raises: TypeError: if `dtype` is not compatible with the type of t." 3812,constant_v1,tensorflow/tensorflow/python/framework/constant_op.py,102,function,"Creates a constant tensor. The resulting tensor is populated with values of type `dtype`, as specified by arguments `value` and (optionally) `shape` (see examples below). The argument `value` can be a constant value, or a list of values of type `dtype`. If `value` is a list, then the length of the list must be less than or equal to the number of elements implied by the `shape` argument (if specified). In the case where the list length is less than the number of elements specified by `shape`, the last element in the list will be used to fill the remaining entries. The argument `shape` is optional. If present, it specifies the dimensions of the resulting tensor. If not present, the shape of `value` is used. If the argument `dtype` is not specified, then the type is inferred from the type of `value`. For example: ```python # Constant 1-D Tensor populated with value list. tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7] # Constant 2-D tensor populated with scalar value -1. tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.] [-1. -1. -1.]] ``` `tf.constant` differs from `tf.fill` in a few ways: * `tf.constant` supports arbitrary constants, not just uniform scalar Tensors like `tf.fill`. * `tf.constant` creates a `Const` node in the computation graph with the exact value at graph construction time. On the other hand, `tf.fill` creates an Op in the graph that is expanded at runtime. * Because `tf.constant` only embeds constant values in the graph, it does not support dynamic shapes based on other runtime Tensors, whereas `tf.fill` does. Args: value: A constant value (or list) of output type `dtype`. dtype: The type of the elements of the resulting tensor. shape: Optional dimensions of resulting tensor. name: Optional name for the tensor. verify_shape: Boolean that enables verification of a shape of values. Returns: A Constant Tensor. Raises: TypeError: if shape is incorrectly specified or unsupported." 3813,constant,tensorflow/tensorflow/python/framework/constant_op.py,167,function,"Creates a constant tensor from a tensor-like object. Note: All eager `tf.Tensor` values are immutable (in contrast to `tf.Variable`). There is nothing especially _constant_ about the value returned from `tf.constant`. This function it is not fundamentally different from `tf.convert_to_tensor`. The name `tf.constant` comes from the `value` being embeded in a `Const` node in the `tf.Graph`. `tf.constant` is useful for asserting that the value can be embedded that way. If the argument `dtype` is not specified, then the type is inferred from the type of `value`. >>> # Constant 1-D Tensor from a python list. >>> tf.constant([1, 2, 3, 4, 5, 6]) >>> # Or a numpy array >>> a = np.array([[1, 2, 3], [4, 5, 6]]) >>> tf.constant(a) If `dtype` is specified the resulting tensor values are cast to the requested `dtype`. >>> tf.constant([1, 2, 3, 4, 5, 6], dtype=tf.float64) If `shape` is set, the `value` is reshaped to match. Scalars are expanded to fill the `shape`: >>> tf.constant(0, shape=(2, 3)) >>> tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) `tf.constant` has no effect if an eager Tensor is passed as the `value`, it even transmits gradients: >>> v = tf.Variable([0.0]) >>> with tf.GradientTape() as g: ... loss = tf.constant(v + v) >>> g.gradient(loss, v).numpy() array([2.], dtype=float32) But, since `tf.constant` embeds the value in the `tf.Graph` this fails for symbolic tensors: >>> with tf.compat.v1.Graph().as_default(): ... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32) ... t = tf.constant(i) Traceback (most recent call last): ... TypeError: ... `tf.constant` will _always_ create CPU (host) tensors. In order to create tensors on other devices, use `tf.identity`. (If the `value` is an eager Tensor, however, the tensor will be returned unmodified as mentioned above.) Related Ops: * `tf.convert_to_tensor` is similar but: * It has no `shape` argument. * Symbolic tensors are allowed to pass through. >>> with tf.compat.v1.Graph().as_default(): ... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32) ... t = tf.convert_to_tensor(i) * `tf.fill`: differs in a few ways: * `tf.constant` supports arbitrary constants, not just uniform scalar Tensors like `tf.fill`. * `tf.fill` creates an Op in the graph that is expanded at runtime, so it can efficiently represent large tensors. * Since `tf.fill` does not embed the value, it can produce dynamically sized outputs. Args: value: A constant value (or list) of output type `dtype`. dtype: The type of the elements of the resulting tensor. shape: Optional dimensions of resulting tensor. name: Optional name for the tensor. Returns: A Constant Tensor. Raises: TypeError: if shape is incorrectly specified or unsupported. ValueError: if called on a symbolic tensor." 3814,_constant_impl,tensorflow/tensorflow/python/framework/constant_op.py,268,function,Implementation of constant. 3815,_constant_eager_impl,tensorflow/tensorflow/python/framework/constant_op.py,299,function,Implementation of eager constant. 3816,is_constant,tensorflow/tensorflow/python/framework/constant_op.py,328,function, 3817,_constant_tensor_conversion_function,tensorflow/tensorflow/python/framework/constant_op.py,336,function, 3818,_tensor_shape_tensor_conversion_function,tensorflow/tensorflow/python/framework/constant_op.py,348,function,Function to convert TensorShape to Tensor. 3819,_dimension_tensor_conversion_function,tensorflow/tensorflow/python/framework/constant_op.py,381,function,Function to convert Dimension to Tensor. 3820,ConstantOpTest,tensorflow/tensorflow/python/framework/constant_op_test.py,29,class, 3821,_TensorData,tensorflow/tensorflow/python/framework/convert_to_constants.py,53,class,Data about a tensor that was converted to a constant. 3822,_EndPoint,tensorflow/tensorflow/python/framework/convert_to_constants.py,63,class,An endpoint in a graph. 3823,_Edge,tensorflow/tensorflow/python/framework/convert_to_constants.py,71,class,A directed graph edge. 3824,_Convertible,tensorflow/tensorflow/python/framework/convert_to_constants.py,79,class,An entity that can have variables converted to constants. 3825,_Function,tensorflow/tensorflow/python/framework/convert_to_constants.py,140,class,"A library function Convertible. Edges into functions are edges from node _inputs_ into function _inputs_: Functions get their input from their callers, not from node outputs, and the callers in turn get those values as inputs." 3826,_Node,tensorflow/tensorflow/python/framework/convert_to_constants.py,206,class,A Convertible NodeDef. 3827,_Intermediate,tensorflow/tensorflow/python/framework/convert_to_constants.py,350,class,Specialization of _Node to intermediate ops. 3828,_Merge,tensorflow/tensorflow/python/framework/convert_to_constants.py,363,class,Specialization of _Node to Merge ops. 3829,_VarHandle,tensorflow/tensorflow/python/framework/convert_to_constants.py,375,class,Specialization of _Node to VarHandleOp. 3830,_ResourceGather,tensorflow/tensorflow/python/framework/convert_to_constants.py,395,class,Specialization of _Node to ResourceGather. 3831,_ResourceGatherNd,tensorflow/tensorflow/python/framework/convert_to_constants.py,428,class,Specialization of _Node to ResourceGatherNd. 3832,_ReadVariable,tensorflow/tensorflow/python/framework/convert_to_constants.py,443,class,Specialization of _Node to ReadVariableOp. 3833,_FunctionCaller,tensorflow/tensorflow/python/framework/convert_to_constants.py,471,class,A base class for Convertibles that reference functions. 3834,_If,tensorflow/tensorflow/python/framework/convert_to_constants.py,561,class,Specialization of _Node to If-like operations. 3835,_Case,tensorflow/tensorflow/python/framework/convert_to_constants.py,574,class,Specialization of _Node to Case-like operations. 3836,_PartitionedCall,tensorflow/tensorflow/python/framework/convert_to_constants.py,587,class,Specialization of _Node to PartitionedCall-like operations. 3837,_While,tensorflow/tensorflow/python/framework/convert_to_constants.py,600,class,Specialization of _Node to While-like operations. 3838,_GraphDef,tensorflow/tensorflow/python/framework/convert_to_constants.py,631,class,A convertible GraphDef. 3839,_ConverterData,tensorflow/tensorflow/python/framework/convert_to_constants.py,705,class,"Container for constant conversion supporting data. The data includes the graph being converted, and the pre-converted tensors. This class will be specialized for ConcreteFunction and Session-based conversions, as the means to obtain that data is different for each case." 3840,_FunctionConverterData,tensorflow/tensorflow/python/framework/convert_to_constants.py,774,class,Container for ConcreteFunction-based conversion data. 3841,_SessionConverterData,tensorflow/tensorflow/python/framework/convert_to_constants.py,847,class,Container for Session-based conversion data. 3842,disable_lower_using_switch_merge,tensorflow/tensorflow/python/framework/convert_to_constants.py,881,function,"Set '_lower_using_switch_merge' attributes to False. Sets the attribute to False in the NodeDefs in the main graph and the NodeDefs in each function's graph. Args: graph_def: GraphDef proto. Returns: GraphDef" 3843,_run_inline_graph_optimization,tensorflow/tensorflow/python/framework/convert_to_constants.py,910,function,"Apply function inline optimization to the graph. Returns the GraphDef after Grappler's function inlining optimization is applied. This optimization does not work on models with control flow. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) aggressive_inlining: Boolean indicating whether or not to to aggressive function inlining (might be unsafe if function has stateful ops not properly connected to control outputs). Returns: GraphDef" 3844,_construct_concrete_function,tensorflow/tensorflow/python/framework/convert_to_constants.py,975,function,"Constructs a concrete function from the `output_graph_def`. Args: func: ConcreteFunction output_graph_def: GraphDef proto. converted_input_indices: Set of integers of input indices that were converted to constants. Returns: ConcreteFunction." 3845,_replace_variables_by_constants,tensorflow/tensorflow/python/framework/convert_to_constants.py,1012,function,"Replaces variables by constants on a given graph. Given a _ConverterData instance with converted variables in its tensor_data field, create a new graph where the respective variables are replaced with the converted constants. Args: converter_data: A pre-populated _ConverterData instance. Returns: The converted graph." 3846,convert_variables_to_constants_v2,tensorflow/tensorflow/python/framework/convert_to_constants.py,1042,function,"Replaces all the variables in a graph with constants of the same values. TensorFlow 2.0 function for converting all Variable ops into Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. This function runs Grappler's function inlining optimization in order to return a single subgraph. The current implementation only works for graphs that do not contain any control flow or embedding related ops. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) aggressive_inlining: Boolean indicating whether or not to to aggressive function inlining (might be unsafe if function has stateful ops, not properly connected to control outputs). (default False) Returns: ConcreteFunction containing a simplified version of the original." 3847,convert_variables_to_constants_v2_as_graph,tensorflow/tensorflow/python/framework/convert_to_constants.py,1080,function,"Replaces all the variables in a graph with constants of the same values. This function works as same as convert_variables_to_constants_v2, but it returns the intermediate `GraphDef` as well. This `GraphDef` contains all the debug information after all the transformations in the frozen phase. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) aggressive_inlining: Boolean indicating whether or not to to aggressive function inlining (might be unsafe if function has stateful ops, not properly connected to control outputs). Returns: ConcreteFunction containing a simplified version of the original, and also the intermediate GraphDef containing the node debug information for the transformations in the frozen phase." 3848,convert_variables_to_constants_from_session_graph,tensorflow/tensorflow/python/framework/convert_to_constants.py,1115,function,"Replaces all the variables in a graph with constants of the same values. This function works similarly to convert_variables_to_constants_v2, but it retrieves the constant values from a Session instead of from a ConcreteFunction. This is useful when converting graphs generated from TensorFlow V1, where ConcreteFunctions are not available. This also differs from graph_util.convert_variables_to_constants in that it supports resource variables when V2 control flow constructions are present. Args: session: Active TensorFlow session containing the variables. graph_def: A GraphDef to convert. output_node_names: List of name strings for the result nodes of the graph. variable_names_allowlist: The set of variable names to convert (by default, all variables are converted). variable_names_denylist: The set of variable names to omit converting to constants. Returns: An optimized GraphDef." 3849,_GraphMerger,tensorflow/tensorflow/python/framework/convert_to_constants_test.py,64,class,GraphDef merging methods for testing purposes. 3850,VariablesToConstantsTest,tensorflow/tensorflow/python/framework/convert_to_constants_test.py,145,class, 3851,ConvertVariablesToConstantsSessionTest,tensorflow/tensorflow/python/framework/convert_to_constants_test.py,524,class, 3852,check_valid,tensorflow/tensorflow/python/framework/device.py,32,function,"Check that a device spec is valid. Args: spec: a string. Raises: An exception if the spec is invalid." 3853,is_device_spec,tensorflow/tensorflow/python/framework/device.py,45,function,Abstract away the fact that DeviceSpecV2 is the base class. 3854,canonical_name,tensorflow/tensorflow/python/framework/device.py,50,function,Returns a canonical name for the given `DeviceSpec` or device name. 3855,merge_device,tensorflow/tensorflow/python/framework/device.py,67,function,"Returns a device function that merges devices specifications. This can be used to merge partial specifications of devices. The innermost setting for a device field takes precedence. For example: with tf.device(merge_device(""/device:GPU:0"")) # Nodes created here have device ""/device:GPU:0"" with tf.device(merge_device(""/job:worker"")): # Nodes created here have device ""/job:worker/device:GPU:0"" with tf.device(merge_device(""/device:CPU:0"")): # Nodes created here have device ""/job:worker/device:CPU:0"" with tf.device(merge_device(""/job:ps"")): # Nodes created here have device ""/job:ps/device:CPU:0"" Args: spec: A `DeviceSpec` or a device spec string (partially) describing the device that should be used for all nodes created in the scope of the returned device function's with block. Returns: A MergeDevice object with the above-described behavior. Raises: ValueError: if the spec was not valid." 3856,MergeDevice,tensorflow/tensorflow/python/framework/device.py,107,class,"Wraps a device specification (DeviceSpec or str) with merge functionality. When called, this class will merge a node_def with its own spec. It also exposes a `shortcut_string_merge` method which can significantly improve performance of device placement." 3857,_as_str_or_none,tensorflow/tensorflow/python/framework/device_spec.py,34,function, 3858,_as_int_or_none,tensorflow/tensorflow/python/framework/device_spec.py,38,function, 3859,_as_device_str_or_none,tensorflow/tensorflow/python/framework/device_spec.py,42,function, 3860,DeviceSpecV2,tensorflow/tensorflow/python/framework/device_spec.py,51,class,"Represents a (possibly partial) specification for a TensorFlow device. `DeviceSpec`s are used throughout TensorFlow to describe where state is stored and computations occur. Using `DeviceSpec` allows you to parse device spec strings to verify their validity, merge them or compose them programmatically. Example: ```python # Place the operations on device ""GPU:0"" in the ""ps"" job. device_spec = DeviceSpec(job=""ps"", device_type=""GPU"", device_index=0) with tf.device(device_spec.to_string()): # Both my_var and squared_var will be placed on /job:ps/device:GPU:0. my_var = tf.Variable(..., name=""my_variable"") squared_var = tf.square(my_var) ``` With eager execution disabled (by default in TensorFlow 1.x and by calling disable_eager_execution() in TensorFlow 2.x), the following syntax can be used: ```python tf.compat.v1.disable_eager_execution() # Same as previous device_spec = DeviceSpec(job=""ps"", device_type=""GPU"", device_index=0) # No need of .to_string() method. with tf.device(device_spec): my_var = tf.Variable(..., name=""my_variable"") squared_var = tf.square(my_var) ``` If a `DeviceSpec` is partially specified, it will be merged with other `DeviceSpec`s according to the scope in which it is defined. `DeviceSpec` components defined in inner scopes take precedence over those defined in outer scopes. ```python gpu0_spec = DeviceSpec(job=""ps"", device_type=""GPU"", device_index=0) with tf.device(DeviceSpec(job=""train"").to_string()): with tf.device(gpu0_spec.to_string()): # Nodes created here will be assigned to /job:ps/device:GPU:0. with tf.device(DeviceSpec(device_type=""GPU"", device_index=1).to_string()): # Nodes created here will be assigned to /job:train/device:GPU:1. ``` A `DeviceSpec` consists of 5 components -- each of which is optionally specified: * Job: The job name. * Replica: The replica index. * Task: The task index. * Device type: The device type string (e.g. ""CPU"" or ""GPU""). * Device index: The device index." 3861,DeviceSpecV1,tensorflow/tensorflow/python/framework/device_spec.py,397,class, 3862,DeviceSpecTest,tensorflow/tensorflow/python/framework/device_spec_test.py,32,class, 3863,DeviceTest,tensorflow/tensorflow/python/framework/device_test.py,36,class, 3864,DType,tensorflow/tensorflow/python/framework/dtypes.py,37,class,"Represents the type of the elements in a `Tensor`. The following `DType` objects are defined: * `tf.float16`: 16-bit half-precision floating-point. * `tf.float32`: 32-bit single-precision floating-point. * `tf.float64`: 64-bit double-precision floating-point. * `tf.bfloat16`: 16-bit truncated floating-point. * `tf.complex64`: 64-bit single-precision complex. * `tf.complex128`: 128-bit double-precision complex. * `tf.int8`: 8-bit signed integer. * `tf.uint8`: 8-bit unsigned integer. * `tf.uint16`: 16-bit unsigned integer. * `tf.uint32`: 32-bit unsigned integer. * `tf.uint64`: 64-bit unsigned integer. * `tf.int16`: 16-bit signed integer. * `tf.int32`: 32-bit signed integer. * `tf.int64`: 64-bit signed integer. * `tf.bool`: Boolean. * `tf.string`: String. * `tf.qint8`: Quantized 8-bit signed integer. * `tf.quint8`: Quantized 8-bit unsigned integer. * `tf.qint16`: Quantized 16-bit signed integer. * `tf.quint16`: Quantized 16-bit unsigned integer. * `tf.qint32`: Quantized 32-bit signed integer. * `tf.resource`: Handle to a mutable resource. * `tf.variant`: Values of arbitrary types. The `tf.as_dtype()` function converts numpy types and string type names to a `DType` object." 3865,as_dtype,tensorflow/tensorflow/python/framework/dtypes.py,607,function,"Converts the given `type_value` to a `DType`. Note: `DType` values are interned. When passed a new `DType` object, `as_dtype` always returns the interned value. Args: type_value: A value that can be converted to a `tf.DType` object. This may currently be a `tf.DType` object, a [`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto), a string type name, or a `numpy.dtype`. Returns: A `DType` corresponding to `type_value`. Raises: TypeError: If `type_value` cannot be converted to a `DType`." 3866,_is_numeric_dtype_enum,tensorflow/tensorflow/python/framework/dtypes_test.py,30,function, 3867,TypesTest,tensorflow/tensorflow/python/framework/dtypes_test.py,39,class, 3868,parse_message,tensorflow/tensorflow/python/framework/error_interpolation.py,69,function,"Parses the message. Splits the message into separators and tags. Tags are named tuples representing the string {{type name}} and they are separated by separators. For example, in ""123{{node Foo}}456{{node Bar}}789"", there are two tags and three separators. The separators are the numeric characters. Args: message: String to parse Returns: (list of separator strings, list of _ParseTags). For example, if message is ""123{{node Foo}}456"" then this function returns ([""123"", ""456""], [_ParseTag(""node"", ""Foo"")])" 3869,_compute_device_summary_from_list,tensorflow/tensorflow/python/framework/error_interpolation.py,101,function,"Return a summary of an op's device function stack. Args: name: The name of the op. device_assignment_list: The op._device_assignments list. prefix: An optional string prefix used before each line of the multi- line string returned by this function. Returns: A multi-line string similar to: Device assignments active during op 'foo' creation: with tf.device(/cpu:0): with tf.device(some_func): The first line will have no padding to its left by default. Subsequent lines will have two spaces of left-padding. Use the prefix argument to increase indentation." 3870,_compute_device_assignment_summary_from_op,tensorflow/tensorflow/python/framework/error_interpolation.py,143,function, 3871,_compute_colocation_summary_from_dict,tensorflow/tensorflow/python/framework/error_interpolation.py,150,function,"Return a summary of an op's colocation stack. Args: name: The op name. colocation_dict: The op._colocation_dict. prefix: An optional string prefix used before each line of the multi- line string returned by this function. Returns: A multi-line string similar to: Node-device colocations active during op creation: with tf.compat.v1.colocate_with(test_node_1): with tf.compat.v1.colocate_with(test_node_2): The first line will have no padding to its left by default. Subsequent lines will have two spaces of left-padding. Use the prefix argument to increase indentation." 3872,_compute_colocation_summary_from_op,tensorflow/tensorflow/python/framework/error_interpolation.py,192,function,"Fetch colocation file, line, and nesting and return a summary string." 3873,_is_framework_filename,tensorflow/tensorflow/python/framework/error_interpolation.py,200,function,"Returns whether a filename should be considered a part of the framework. A file is part of the framework if it does not match a pattern in _EXTERNAL_FILENAME_PATTERNS and it either matches a pattern in _FRAMEWORK_FILENAME_PATTERNS or starts with a _FRAMEWORK_PATH_PREFIXES prefix. Args: filename: A filename string. Returns: Whether the filename should be considered to be internal to the TensorFlow framework for the purposes of reporting errors." 3874,_find_index_of_defining_frame,tensorflow/tensorflow/python/framework/error_interpolation.py,226,function,"Return index in op.traceback with first 'useful' frame. This method reads through the stack stored in op.traceback looking for the innermost frame which (hopefully) belongs to the caller. It accomplishes this by rejecting frames deemed to be part of the TensorFlow framework (by pattern matching the filename). Args: traceback: A list of traceback frames (as from Operation.traceback). Returns: Integer index into op.traceback where the first non-TF file was found (innermost to outermost), or 0 (for the outermost stack frame) if all files came from TensorFlow." 3875,_get_defining_frame,tensorflow/tensorflow/python/framework/error_interpolation.py,254,function,Find and return stack frame where op was defined. 3876,_compute_useful_frames,tensorflow/tensorflow/python/framework/error_interpolation.py,260,function,"Return a list of frames, which form a 'useful' stack. Starting from the defining frame to the outermost one, this method computes the contiguous portion of the 'useful' stack trace and returns the selected frames. Args: traceback: A list of traceback frames (as from Operation.traceback). num: total number of frames to return. Returns: A list of frames." 3877,create_graph_debug_info_def,tensorflow/tensorflow/python/framework/error_interpolation.py,284,function,"Construct and returns a `GraphDebugInfo` protocol buffer. Args: func_named_operations: An iterable of (func_name, op.Operation) tuples where the Operation instances have a _traceback members. The func_name should be the empty string for operations in the top-level Graph. Returns: GraphDebugInfo protocol buffer. Raises: TypeError: If the arguments are not of the correct proto buffer type." 3878,_compute_field_dict,tensorflow/tensorflow/python/framework/error_interpolation.py,340,function,"Return a dictionary mapping interpolation tokens to values. Args: op: op.Operation object having a _traceback member. strip_file_prefix: The common path in the stacktrace. We remove the prefix from the file names. Returns: A dictionary mapping string tokens to string values. The keys are shown below along with example values. { ""file"": ""tool_utils.py"", ""line"": ""124"", ""defined_at"": "" (defined at tool_utils.py:124)"", ""colocations"": '''Node-device colocations active during op creation: with tf.compat.v1.colocate_with(test_node_1): with tf.compat.v1.colocate_with(test_node_2): ''' ""devices"": '''Device assignments active during op 'foo' creation: with tf.device(/cpu:0): with tf.device(some_func): ''' ""devs_and_colocs"": A concatenation of colocations and devices, e.g. '''Node-device colocations active during op creation: with tf.compat.v1.colocate_with(test_node_1): with tf.compat.v1.colocate_with(test_node_2): ''' Device assignments active during op 'foo' creation: with tf.device(/cpu:0): with tf.device(some_func): ''' }" 3879,traceback_files_common_prefix,tensorflow/tensorflow/python/framework/error_interpolation.py,404,function,"Determines the common prefix from the paths of the stacktrace of 'all_ops'. For example, if the paths are '/foo/bar/baz/' and '/foo/car', this would return '/foo'. Args: all_ops: All the input nodes in the form of a list of lists of ops. Returns: The common prefix." 3880,_sources_for_node,tensorflow/tensorflow/python/framework/error_interpolation.py,428,function,"Gets the input op nodes for 'node'. Args: node: The node. graph: The graph containing the node. Returns: The unique input nodes." 3881,_build_error_message,tensorflow/tensorflow/python/framework/error_interpolation.py,455,function,"Returns the formatted error message for the given op. Args: op: The node. input_ops: The input nodes to the 'op' node common_prefix: The prefix path common to the stacktrace of inputs. Returns: The formatted error message for the given op. The error message also includes the information about the input sources for the given op." 3882,interpolate,tensorflow/tensorflow/python/framework/error_interpolation.py,487,function,"Interpolates an error message. The error message can contain tags of the form `{{type name}}` which will be replaced. For example: ""{{node }}"" would get expanded to: ""node (defined at )"". Args: error_message: A string to interpolate. graph: ops.Graph object containing all nodes referenced in the error message. Returns: The string with tags of the form {{type name}} interpolated." 3883,_make_frame_with_filename,tensorflow/tensorflow/python/framework/error_interpolation_test.py,38,function,Return a copy of an existing stack frame with a new filename. 3884,_modify_op_stack_with_filenames,tensorflow/tensorflow/python/framework/error_interpolation_test.py,48,function,Replace op._traceback with a new traceback using special filenames. 3885,ComputeDeviceSummaryFromOpTest,tensorflow/tensorflow/python/framework/error_interpolation_test.py,70,class, 3886,ComputeColocationSummaryFromOpTest,tensorflow/tensorflow/python/framework/error_interpolation_test.py,98,class, 3887,CreateGraphDebugInfoDefTest,tensorflow/tensorflow/python/framework/error_interpolation_test.py,130,class, 3888,InterpolateFilenamesAndLineNumbersTest,tensorflow/tensorflow/python/framework/error_interpolation_test.py,189,class, 3889,InputNodesTest,tensorflow/tensorflow/python/framework/error_interpolation_test.py,263,class, 3890,InterpolateDeviceSummaryTest,tensorflow/tensorflow/python/framework/error_interpolation_test.py,294,class, 3891,InterpolateColocationSummaryTest,tensorflow/tensorflow/python/framework/error_interpolation_test.py,339,class, 3892,IsFrameworkFilenameTest,tensorflow/tensorflow/python/framework/error_interpolation_test.py,390,class, 3893,_compact_stack_trace,tensorflow/tensorflow/python/framework/errors_impl.py,35,function,Returns a traceback for `op` with common file prefixes stripped. 3894,InaccessibleTensorError,tensorflow/tensorflow/python/framework/errors_impl.py,47,class, 3895,OperatorNotAllowedInGraphError,tensorflow/tensorflow/python/framework/errors_impl.py,52,class,"An error is raised for unsupported operator in Graph execution. For example, using a `tf.Tensor` as a Python `bool` in Graph execution is not allowed." 3896,OpError,tensorflow/tensorflow/python/framework/errors_impl.py,63,class,"A generic error that is raised when TensorFlow execution fails. Whenever possible, the session will raise a more specific subclass of `OpError` from the `tf.errors` module." 3897,CancelledError,tensorflow/tensorflow/python/framework/errors_impl.py,217,class,"Raised when an operation or step is cancelled. For example, a long-running operation (e.g. `tf.QueueBase.enqueue` may be cancelled by running another operation (e.g. `tf.QueueBase.close`, or by `tf.Session.close`. A step that is running such a long-running operation will fail by raising `CancelledError`. @@__init__" 3898,UnknownError,tensorflow/tensorflow/python/framework/errors_impl.py,238,class,"Unknown error. An example of where this error may be returned is if a Status value received from another address space belongs to an error-space that is not known to this address space. Also, errors raised by APIs that do not return enough error information may be converted to this error. @@__init__" 3899,InvalidArgumentError,tensorflow/tensorflow/python/framework/errors_impl.py,256,class,"Raised when an operation receives an invalid argument. This may occur, for example, if an operation receives an input tensor that has an invalid value or shape. For example, the `tf.matmul` op will raise this error if it receives an input that is not a matrix, and the `tf.reshape` op will raise this error if the new shape does not match the number of elements in the input tensor. @@__init__" 3900,DeadlineExceededError,tensorflow/tensorflow/python/framework/errors_impl.py,277,class,"Raised when a deadline expires before an operation could complete. This exception is not currently used. @@__init__" 3901,NotFoundError,tensorflow/tensorflow/python/framework/errors_impl.py,292,class,"Raised when a requested entity (e.g., a file or directory) was not found. For example, running the `tf.WholeFileReader.read` operation could raise `NotFoundError` if it receives the name of a file that does not exist. @@__init__" 3902,AlreadyExistsError,tensorflow/tensorflow/python/framework/errors_impl.py,309,class,"Raised when an entity that we attempted to create already exists. For example, running an operation that saves a file (e.g. `tf.train.Saver.save`) could potentially raise this exception if an explicit filename for an existing file was passed. @@__init__" 3903,PermissionDeniedError,tensorflow/tensorflow/python/framework/errors_impl.py,327,class,"Raised when the caller does not have permission to run an operation. For example, running the `tf.WholeFileReader.read` operation could raise `PermissionDeniedError` if it receives the name of a file for which the user does not have the read file permission. @@__init__" 3904,UnauthenticatedError,tensorflow/tensorflow/python/framework/errors_impl.py,345,class,"The request does not have valid authentication credentials. This exception is not currently used. @@__init__" 3905,ResourceExhaustedError,tensorflow/tensorflow/python/framework/errors_impl.py,360,class,"Some resource has been exhausted. For example, this error might be raised if a per-user quota is exhausted, or perhaps the entire file system is out of space. @@__init__" 3906,FailedPreconditionError,tensorflow/tensorflow/python/framework/errors_impl.py,376,class,"Operation was rejected because the system is not in a state to execute it. This exception is most commonly raised when running an operation that reads a `tf.Variable` before it has been initialized. @@__init__" 3907,AbortedError,tensorflow/tensorflow/python/framework/errors_impl.py,393,class,"The operation was aborted, typically due to a concurrent action. For example, running a `tf.QueueBase.enqueue` operation may raise `AbortedError` if a `tf.QueueBase.close` operation previously ran. @@__init__" 3908,OutOfRangeError,tensorflow/tensorflow/python/framework/errors_impl.py,411,class,"Raised when an operation iterates past the valid input range. This exception is raised in ""end-of-file"" conditions, such as when a `tf.QueueBase.dequeue` operation is blocked on an empty queue, and a `tf.QueueBase.close` operation executes. @@__init__" 3909,UnimplementedError,tensorflow/tensorflow/python/framework/errors_impl.py,430,class,"Raised when an operation has not been implemented. Some operations may raise this error when passed otherwise-valid arguments that it does not currently support. For example, running the `tf.nn.max_pool2d` operation would raise this error if pooling was requested on the batch dimension, because this is not yet supported. @@__init__" 3910,InternalError,tensorflow/tensorflow/python/framework/errors_impl.py,449,class,"Raised when the system experiences an internal error. This exception is raised when some invariant expected by the runtime has been broken. Catching this exception is not recommended. @@__init__" 3911,UnavailableError,tensorflow/tensorflow/python/framework/errors_impl.py,464,class,"Raised when the runtime is currently unavailable. This exception is not currently used. @@__init__" 3912,DataLossError,tensorflow/tensorflow/python/framework/errors_impl.py,479,class,"Raised when unrecoverable data loss or corruption is encountered. For example, this may be raised by running a `tf.WholeFileReader.read` operation, if the file is truncated while it is being read. @@__init__" 3913,exception_type_from_error_code,tensorflow/tensorflow/python/framework/errors_impl.py,520,function, 3914,error_code_from_exception_type,tensorflow/tensorflow/python/framework/errors_impl.py,525,function, 3915,_make_specific_exception,tensorflow/tensorflow/python/framework/errors_impl.py,533,function, 3916,raise_exception_on_not_ok_status,tensorflow/tensorflow/python/framework/errors_impl.py,547,class,Context manager to check for C API status. 3917,ErrorsTest,tensorflow/tensorflow/python/framework/errors_test.py,34,class, 3918,FileSystemTest,tensorflow/tensorflow/python/framework/file_system_test.py,31,class, 3919,UnknownArgument,tensorflow/tensorflow/python/framework/func_graph.py,64,class,Signifies an argument which is not currently handled. 3920,convert_structure_to_signature,tensorflow/tensorflow/python/framework/func_graph.py,69,function,"Convert a potentially nested structure to a signature. Args: structure: Structure to convert, where top level collection is a list or a tuple. arg_names: Optional list of arguments that has equal number of elements as `structure` and is used for naming corresponding TensorSpecs. Returns: Identical structure that has TensorSpec objects instead of Tensors and UnknownArgument instead of any unsupported types." 3921,FuncGraph,tensorflow/tensorflow/python/framework/func_graph.py,134,class,"Graph representing a function body. Attributes: name: The name of the function. inputs: Placeholder tensors representing the inputs to this function. The tensors are in this FuncGraph. This represents ""regular"" inputs as well as captured inputs (i.e. the values of self.captures), with the regular inputs coming first. outputs: Tensors that will be returned by this function. The tensors are in this FuncGraph. control_outputs: Operations that must be executed before the function represented by this graph can be said to have been executed. structured_input_signature: A tuple of (args, kwargs), which are both possibly-nested python objects that were received by this function. Note that these structures might contain Python `None`s. structured_outputs: A possibly-nested python object which will be returned by this function. The Tensors in this structure are the same as those of self.outputs. Note that this structure might contain Python `None`s. variables: Variables that should be watched during function execution. outer_graph: The graph this function is defined in. May be another FuncGraph or the global default Graph. captures: Maps external tensor -> internal tensor (i.e. input placeholder). The entries are in the order they were captured. control_captures: Set of external ops on which this graph has a control dependency. seed: The graph-level random seed. capture_by_value: If True, the func graph will capture Variables by value instead of reference." 3922,func_graph_from_py_func,tensorflow/tensorflow/python/framework/func_graph.py,801,function,"Returns a `FuncGraph` generated from `python_func`. Args: name: an identifier for the function. python_func: the Python function to trace. args: the positional args with which the Python function should be called; ignored if a signature is provided. kwargs: the keyword args with which the Python function should be called; ignored if a signature is provided. signature: a possibly nested sequence of `TensorSpecs` specifying the shapes and dtypes of the arguments. When a signature is provided, `args` and `kwargs` are ignored, and `python_func` is traced with Tensors conforming to `signature`. If `None`, the shapes and dtypes are inferred from the inputs. func_graph: Optional. An instance of FuncGraph. If provided, we will use this graph else a new one is built and returned. autograph: whether to use autograph to compile `python_func`. See https://www.tensorflow.org/guide/autograph for more information. autograph_options: additional knobs to control when `autograph=True`. See https://www.tensorflow.org/guide/autograph for more information. add_control_dependencies: If True, automatically adds control dependencies to ensure program order matches execution order and stateful ops always execute. arg_names: Optional list of argument names, used to give input placeholders recognizable names. op_return_value: Optional. A Tensor. If set and `python_func` returns Operations, those return values will be replaced with this value. If not set, returning an Operation triggers an error. collections: a dictionary of collections this FuncGraph should start with. If not specified (None), the FuncGraph will read (but not write to) the outer graph's collections that are not allowlisted, and both read and write to the outer graph's collections that are allowlisted. The current allowlisted collections are the global variables, the local variables, and the trainable variables. Defaults to None. capture_by_value: An optional boolean. If True, the func graph will capture Variables by value instead of reference. By default inherit from outer graphs, and failing that will default to False. override_flat_arg_shapes: An optional list of instances that are either `None` or `TensorShape`. The length must match that of `nest.flatten((args, kwargs), expand_composites=True)`. The entries containing value `None` must match entries in flattened arguments containing non-tensors, while entries containing a `TensorShape` must match entries in the flattened arguments containing tensors. Returns: A FuncGraph. Raises: TypeError: If any of `python_func`'s return values is neither `None` nor a `Tensor`. ValueError: If both `signature` and `override_flat_arg_shapes` are passed in." 3923,maybe_captured,tensorflow/tensorflow/python/framework/func_graph.py,1037,function,"If t is a captured value placeholder, returns the original captured value. Args: tensor: Tensor. Returns: A tensor, potentially from a different Graph/FuncGraph." 3924,device_stack_has_callable,tensorflow/tensorflow/python/framework/func_graph.py,1055,function,Checks whether a device stack contains a callable. 3925,check_mutation,tensorflow/tensorflow/python/framework/func_graph.py,1061,function,Check if two list of arguments are exactly the same. 3926,flatten,tensorflow/tensorflow/python/framework/func_graph.py,1081,function,"Like nest.flatten w/ expand_composites, but returns flow for TensorArrays. Args: sequence: A nested structure of Tensors, CompositeTensors, and TensorArrays. Returns: A list of tensors." 3927,pack_sequence_as,tensorflow/tensorflow/python/framework/func_graph.py,1098,function,"Like `nest.pack_sequence_as` but also builds TensorArrays from flows. Args: structure: The structure to pack into. May contain Tensors, CompositeTensors, or TensorArrays. flat_sequence: An iterable containing tensors. Returns: A nested structure. Raises: AssertionError if `structure` and `flat_sequence` are not compatible." 3928,_create_substitute_placeholder,tensorflow/tensorflow/python/framework/func_graph.py,1123,function,Creates a placeholder for `value` and propagates shape info to it. 3929,_get_defun_inputs_from_args,tensorflow/tensorflow/python/framework/func_graph.py,1136,function,Maps Python function positional args to graph-construction inputs. 3930,_get_composite_tensor_spec,tensorflow/tensorflow/python/framework/func_graph.py,1142,function,"Returns the TypeSpec for x if it's a composite tensor, or x otherwise." 3931,_get_defun_inputs,tensorflow/tensorflow/python/framework/func_graph.py,1148,function,"Maps python function args to graph-construction inputs. Args: args: A flat list of user-specified arguments. names: A list of strings with user-specified argument names, same length as `args`. May be `None`, in which case a generic name is used. structure: The original argument list or dictionary. flat_shapes: A flat list of values that are either `None` or instances of `TensorShape`. If provided, then length must match that of `nest.flatten(args, expand_composites=True)`; and locations where `args` are instances of `Tensor` must have a corresponding `TensorShape` in `flat_shapes`. May be `None`, in which case exact shapes are read directly from the args. Returns: Placeholders with the same structure as `structure`. Raises: RuntimeError: if `flat_shapes` is provided, but `len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`. RuntimeError: if a shape from `flat_shapes` is not None for an argument that is not a `Tensor`, `TensorSpec`, or `ResourceVariable`." 3932,_get_defun_inputs_from_kwargs,tensorflow/tensorflow/python/framework/func_graph.py,1258,function,Maps Python function keyword args to graph-construction inputs. 3933,dismantle_func_graph,tensorflow/tensorflow/python/framework/func_graph.py,1269,function,"Removes reference cycles in `func_graph` FuncGraph. Helpful for making sure the garbage collector doesn't need to run when the FuncGraph goes out of scope, e.g. in tests using defun with @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True). Args: func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable after this function." 3934,override_func_graph_name_scope,tensorflow/tensorflow/python/framework/func_graph.py,1284,function, 3935,Defun,tensorflow/tensorflow/python/framework/function.py,45,class,"Decorator used to define TensorFlow functions. Use this decorator to make a Python function usable directly as a TensorFlow function. The decorated function must add ops to the default graph and return zero or more `Tensor` objects. Call the decorator with named arguments, one for each argument of the function to decorate, with the expected type of the argument as value. For example if the function to decorate accepts two `tf.float32` arguments named `x` and `y`, call the decorator with: @Defun(tf.float32, tf.float32) def foo(x, y): ... When you call the decorated function, it adds the `call` ops to the default graph. In addition, it adds the definition of the function into the default graph. Because the addition of the function into the graph is deferred, the decorator can be used anywhere in the program. Any variables created inside of the function are hoisted into the outer graph. Note that the variables are created in the variable scope that was active during the first call to the function. Subsequent function calls will refer to the same set of variables. Definitions of functions in a graph are frozen as soon as the graph is used to create a session. However, new functions and new calls to existing functions may be added to the graph, with the new functions themselves becoming immediately frozen. Example, but also see the [How To on functions](link_needed). ```python # Defining the function. @tf.Defun(tf.float32, tf.float32) def MyFunc(x, y): return x + y, x - y # Building the graph. a = tf.constant([1.0]) b = tf.constant([2.0]) c, d = MyFunc(a, b, name='mycall') ```" 3936,_DefinedFunctionDeleter,tensorflow/tensorflow/python/framework/function.py,194,class,Unregister function from eager context. 3937,_DefinedFunction,tensorflow/tensorflow/python/framework/function.py,218,class,"_DefinedFunction encapsulates a function definition and its properties. Attributes: name: The function name. definition: The definition of this function. A FunctionDef proto. grad_func_name: If not None, the name of this function's gradient function. python_grad_func: A python callable implementing the gradient of the function python-side." 3938,_OverloadedFunction,tensorflow/tensorflow/python/framework/function.py,584,class,"_OverloadedFunction encapsulates an overloaded function. _OverloadedFunction maintains a mapping from input types to instantiated _DefinedFunction in self._overload." 3939,_FuncGraph,tensorflow/tensorflow/python/framework/function.py,683,class,"A helper for constructing a function. _FuncGraph overrides ops.Graph's create_op() so that we can keep track of all inputs into every op created inside the function. If any input is from other graphs, we keep track of it in self.capture and substitute the input with a place holder. Each captured input's corresponding place holder is converted into a function argument and the caller passes in the captured tensor." 3940,func_graph_from_py_func,tensorflow/tensorflow/python/framework/function.py,907,function,"Returns a _FuncGraph generated from `func`. Args: func: A Python callable which constructs a TF function body. The arguments must correspond to `arg_types`. Returns a value or list/tuple of values. No returned value can be None. arg_names: A sequence of strings for the function argument names. arg_types: A sequence of the function's argument types. name: The function name. If None, the name is derived from `func`. capture_by_value: boolean. If True, captured values will be copied into the function body. device: device name or function. colocation_stack: A colocation stack (list) the _FuncGraph should use. container: A container name the _FuncGraph should start with. collections_ref: A reference to a collections dict the _FuncGraph should use internally. arg_shapes: A sequence of the function's argument shapes. allowlisted_stateful_ops: A set of ops that if stateful we ignore and re-create. capture_resource_var_by_value: Boolean (defaults to True). If False, captured resource variable returns the handle instead of value. Returns: A _FuncGraph. Raises: ValueError: if func returns None." 3941,_is_guaranteed_const,tensorflow/tensorflow/python/framework/function.py,997,function,"Determines whether `tensor` is guaranteed to be a constant. A tensor is guaranteed to be a constant if either it was produced by a `GuaranteeConst` op or if all of its children are guaranteed to be constants. Args: tensor: The tensor for which to determine const-ness. Returns: True if `tensor` is guaranteed to be a constant, False otherwise." 3942,_call,tensorflow/tensorflow/python/framework/function.py,1048,function,"Adds a node calling a function. This adds a `call` op to the default graph that calls the function of signature `sig`, passing the tensors in `inputs` as arguments. It returns the outputs of the call, which are one or more tensors. `sig` is OpDefArg.a `_DefinedFunction` object. You can pass an optional keyword parameter `name=string` to name the added operation. You can pass an optional keyword parameter `noinline=True|False` to instruct the runtime not to inline the function body into the call site. Args: sig: OpDefArg. The signature of the function. *inputs: arguments to the function. **kwargs: Optional keyword arguments. Can only contain 'name' or 'noinline'. Returns: A 2-element tuple. First element: a Tensor if the function returns a single value; a list of Tensors if the function returns multiple value; the Operation if the function returns no values. Second element: the Operation. Raises: ValueError: if the arguments are invalid." 3943,_from_definition,tensorflow/tensorflow/python/framework/function.py,1100,function,"Creates a _DefinedFunction initialized from a FunctionDef proto. Args: fdef: a FunctionDef grad_func: a _DefinedFunction or None Returns: A _DefinedFunction representing fdef" 3944,from_library,tensorflow/tensorflow/python/framework/function.py,1138,function,"Creates _DefinedFunctions initialized from a FunctionDefLibrary proto. This method handles assigning the correct gradient functions to each function. Args: lib: a FunctionDefLibrary Returns: A list of _DefinedFunctions Raises: ValueError: `lib` is invalid" 3945,_get_experimental_kwarg_as_attr,tensorflow/tensorflow/python/framework/function.py,1202,function,Creates an AttrValue for a python object. 3946,_get_kwarg_as_str_attr,tensorflow/tensorflow/python/framework/function.py,1217,function,Creates an AttrValue for a python object. 3947,_parse_kwargs_as_attrs,tensorflow/tensorflow/python/framework/function.py,1226,function,Parses **kwargs into a node's attributes. 3948,get_extra_vars,tensorflow/tensorflow/python/framework/function.py,1267,function,"Returns the captured variables by the function. Returns: If the default graph is being used to define a function, the returned list of variables are those created inside the function body so far. Otherwise, returns an empty list." 3949,get_extra_inputs,tensorflow/tensorflow/python/framework/function.py,1282,function,"Returns the captured input tensors by the function. Returns: If the default graph is being used to define a function, the returned list of tensors are those accessed inside the function body but defined outside the function body so far. Otherwise, returns an empty list." 3950,get_extra_args,tensorflow/tensorflow/python/framework/function.py,1298,function,"Returns the corresponding function arguments for the captured inputs. Returns: If the default graph is being used to define a function, the returned list of place holders are those used inside the function body corresponding those returned by get_extra_inputs(). Otherwise, returns an empty list." 3951,_type_list_to_str,tensorflow/tensorflow/python/framework/function.py,1314,function, 3952,function_def_from_tf_function,tensorflow/tensorflow/python/framework/function.py,1346,function,Converts a SWIG-wrapped TF_Function* to a FunctionDef proto. 3953,function_def_to_graph,tensorflow/tensorflow/python/framework/function_def_to_graph.py,33,function,"Converts a FunctionDef to a FuncGraph (sub-class Graph). The returned FuncGraph's `name`, `inputs` and `outputs` fields will be set. The input tensors are represented as placeholders. Note: `FuncGraph.inputs` and `FuncGraph.captures` are not set and may be set by the caller. Args: fdef: FunctionDef. input_shapes: Optional. A list of TensorShape objects of the shapes of function inputs. Defaults to the function's ""_input_shapes"" attribute. If specified, its length must match length of `fdef.signature.input_arg`. If a shape is None, the corresponding input placeholder will have unknown shape. Returns: A FuncGraph." 3954,is_function,tensorflow/tensorflow/python/framework/function_def_to_graph.py,107,function,Checks for a function definition with `fname` in the current context. 3955,function_def_to_graph_def,tensorflow/tensorflow/python/framework/function_def_to_graph.py,122,function,"Convert a FunctionDef to a GraphDef. Steps: 1. Creates placeholder nodes corresponding to inputs in `FunctionDef.signature.input_arg`. 2. Adds NodeDefs in `FunctionDef.node_def` to `GraphDef.node`. 3. Renames inputs of all nodes to use the convention of GraphDef instead of FunctionDef. See comment on `FunctionDef.node_def` on how the tensor naming in FunctionDefs is different from GraphDefs. Args: fdef: FunctionDef. input_shapes: Optional. A list of TensorShape objects of the shapes of function inputs. If specified, its length must match length of `fdef.signature.input_arg`. If a shape is None, the corresponding input placeholder will have unknown shape. Returns: A tuple of (GraphDef, dict). The dict contains a mapping from nested tensor names (in FunctionDef) to flattened names (in GraphDef). Raises: ValueError: If the length of input_shapes does not match the number of input_args or if the FunctionDef is invalid." 3956,_get_num_args,tensorflow/tensorflow/python/framework/function_def_to_graph.py,258,function, 3957,FunctionDefToGraphTest,tensorflow/tensorflow/python/framework/function_def_to_graph_test.py,37,class, 3958,FunctionDefToGraphDefTest,tensorflow/tensorflow/python/framework/function_def_to_graph_test.py,103,class, 3959,_OptimizerOptions,tensorflow/tensorflow/python/framework/function_test.py,57,function, 3960,FunctionTest,tensorflow/tensorflow/python/framework/function_test.py,89,class,"Test methods for verifying Function support. These test methods are used as mix-ins in two test cases: with and without C API support." 3961,FunctionsFromProtos,tensorflow/tensorflow/python/framework/function_test.py,1168,class, 3962,FunctionOverloadTest,tensorflow/tensorflow/python/framework/function_test.py,1406,class, 3963,FunctionCaptureByValueTest,tensorflow/tensorflow/python/framework/function_test.py,1459,class, 3964,UnrollLSTMTest,tensorflow/tensorflow/python/framework/function_test.py,1489,class, 3965,FunctionInlineControlTest,tensorflow/tensorflow/python/framework/function_test.py,1625,class, 3966,ModuleFunctionTest,tensorflow/tensorflow/python/framework/function_test.py,1684,class, 3967,VariableHoistingTest,tensorflow/tensorflow/python/framework/function_test.py,1708,class, 3968,TemplateTest,tensorflow/tensorflow/python/framework/function_test.py,1781,class, 3969,DevicePlacementTest,tensorflow/tensorflow/python/framework/function_test.py,1832,class, 3970,compute_capability_from_device_desc,tensorflow/tensorflow/python/framework/gpu_util.py,35,function,"Returns the GpuInfo given a DeviceAttributes proto. Args: device_attrs: A DeviceAttributes proto. Returns A gpu_info tuple. Both fields are None if `device_attrs` does not have a valid physical_device_desc field." 3971,run_benchmark,tensorflow/tensorflow/python/framework/graph_building_benchmark.py,44,function, 3972,SingleOpBenchmarks,tensorflow/tensorflow/python/framework/graph_building_benchmark.py,52,class,Benchmark for graph building time of ops. 3973,write_graph,tensorflow/tensorflow/python/framework/graph_io.py,31,function,"Writes a graph proto to a file. The graph is written as a text proto unless `as_text` is `False`. ```python v = tf.Variable(0, name='my_variable') sess = tf.compat.v1.Session() tf.io.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt') ``` or ```python v = tf.Variable(0, name='my_variable') sess = tf.compat.v1.Session() tf.io.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt') ``` Args: graph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer. logdir: Directory where to write the graph. This can refer to remote filesystems, such as Google Cloud Storage (GCS). name: Filename for the graph. as_text: If `True`, writes the graph as an ASCII proto. Returns: The path of the output proto file." 3974,_make_argname_from_tensor_name,tensorflow/tensorflow/python/framework/graph_to_function_def.py,29,function, 3975,_tensor_to_argdef,tensorflow/tensorflow/python/framework/graph_to_function_def.py,33,function,"Convert tensor t to an argdef, with a specified name or a unique name." 3976,_is_in_placeholders,tensorflow/tensorflow/python/framework/graph_to_function_def.py,54,function,Checks whether any output of this op is in func_arg_placeholders. 3977,_get_node_def,tensorflow/tensorflow/python/framework/graph_to_function_def.py,60,function, 3978,_get_op_def,tensorflow/tensorflow/python/framework/graph_to_function_def.py,64,function, 3979,_create_input_dict,tensorflow/tensorflow/python/framework/graph_to_function_def.py,68,function,Create a mapping from graph tensor names to function tensor names. 3980,_add_op_node,tensorflow/tensorflow/python/framework/graph_to_function_def.py,99,function,Converts an op to a function def node and add it to `func`. 3981,graph_to_function_def,tensorflow/tensorflow/python/framework/graph_to_function_def.py,122,function,"Returns `graph` as a `FunctionDef` protocol buffer. This method creates a [`FunctionDef`]( https://www.tensorflow.org/code/tensorflow/core/framework/function.proto) protocol buffer that contains all the ops in `operations`. The operations become the body of the function. The arguments `inputs` and `outputs` will be listed as the inputs and outputs tensors of the function. They must be lists of tensors present in the graph. The lists can optionally be empty. Args: graph: Graph. operations: the operations to put in the function. Must be a subset of the operations in the graph. inputs: List of tensors. Inputs to the function. outputs: List of tensors. Outputs of the function. out_names: Optional list of string names for the outputs. Returns: A FunctionDef protocol buffer. Raises: ValueError: if out_names is specified and the wrong length." 3982,_is_variable_op,tensorflow/tensorflow/python/framework/graph_util_impl.py,62,function,Returns true if 'op' refers to a Variable node. 3983,must_run_on_cpu,tensorflow/tensorflow/python/framework/graph_util_impl.py,71,function,"Returns True if the given node_def must run on CPU, otherwise False. Args: node: The node to be assigned to a device. Could be either an ops.Operation or NodeDef. pin_variables_on_cpu: If True, this function will return False if node_def represents a variable-related op. Returns: True if the given node must run on CPU, otherwise False." 3984,_node_name,tensorflow/tensorflow/python/framework/graph_util_impl.py,122,function, 3985,_get_colocated_node_name,tensorflow/tensorflow/python/framework/graph_util_impl.py,129,function,Decodes colocated node name and returns it without loc:@ prepended. 3986,_extract_graph_summary,tensorflow/tensorflow/python/framework/graph_util_impl.py,137,function,Extracts useful information from the graph and returns them. 3987,_assert_nodes_are_present,tensorflow/tensorflow/python/framework/graph_util_impl.py,160,function,Assert that nodes are present in the graph. 3988,_bfs_for_reachable_nodes,tensorflow/tensorflow/python/framework/graph_util_impl.py,166,function,Breadth first search for reachable nodes from target nodes. 3989,extract_sub_graph,tensorflow/tensorflow/python/framework/graph_util_impl.py,187,function,"Extract the subgraph that can reach any of the nodes in 'dest_nodes'. Args: graph_def: A graph_pb2.GraphDef proto. dest_nodes: A list of strings specifying the destination node names. Returns: The GraphDef of the sub-graph. Raises: TypeError: If 'graph_def' is not a graph_pb2.GraphDef proto." 3990,tensor_shape_from_node_def_name,tensorflow/tensorflow/python/framework/graph_util_impl.py,229,function,Convenience function to get a shape from a NodeDef's input string. 3991,convert_variables_to_constants,tensorflow/tensorflow/python/framework/graph_util_impl.py,247,function,"Replaces all the variables in a graph with constants of the same values. If you have a trained graph containing Variable ops, it can be convenient to convert them all to Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. Args: sess: Active TensorFlow session containing the variables. input_graph_def: GraphDef object holding the network. output_node_names: List of name strings for the result nodes of the graph. variable_names_whitelist: The set of variable names to convert (by default, all variables are converted). variable_names_blacklist: The set of variable names to omit converting to constants. Returns: GraphDef containing a simplified version of the original. Raises: RuntimeError: if a DT_RESOURCE op is found whose ancestor Variables are both denylisted AND whitelisted for freezing." 3992,remove_training_nodes,tensorflow/tensorflow/python/framework/graph_util_impl.py,291,function,"Prunes out nodes that aren't needed for inference. There are nodes like Identity and CheckNumerics that are only useful during training, and can be removed in graphs that will be used for nothing but inference. Here we identify and remove them, returning an equivalent graph. To be specific, CheckNumerics nodes are always removed, and Identity nodes that aren't involved in control edges are spliced out so that their input and outputs are directly connected. Args: input_graph: Model to analyze and prune. protected_nodes: An optional list of names of nodes to be kept unconditionally. This is for example useful to preserve Identity output nodes. Returns: A list of nodes with the unnecessary ones removed." 3993,test_device_func_pin_variable_to_cpu,tensorflow/tensorflow/python/framework/graph_util_test.py,36,function, 3994,DeviceFunctionsTest,tensorflow/tensorflow/python/framework/graph_util_test.py,42,class, 3995,_IsControlInput,tensorflow/tensorflow/python/framework/importer.py,37,function, 3996,_ParseTensorName,tensorflow/tensorflow/python/framework/importer.py,42,function,"Parses a tensor name into an operation name and output index. This function will canonicalize tensor names as follows: * ""foo:0"" -> (""foo"", 0) * ""foo:7"" -> (""foo"", 7) * ""foo"" -> (""foo"", 0) * ""foo:bar:baz"" -> ValueError Args: tensor_name: The name of a tensor. Returns: A tuple containing the operation name, and the output index. Raises: ValueError: If `tensor_name' cannot be interpreted as the name of a tensor." 3997,_MaybeDevice,tensorflow/tensorflow/python/framework/importer.py,77,function,Applies the given device only if device is not None or empty. 3998,_ProcessGraphDefParam,tensorflow/tensorflow/python/framework/importer.py,86,function,Type-checks and possibly canonicalizes `graph_def`. 3999,_ProcessInputMapParam,tensorflow/tensorflow/python/framework/importer.py,116,function,Type-checks and possibly canonicalizes `input_map`. 4000,_ProcessReturnElementsParam,tensorflow/tensorflow/python/framework/importer.py,128,function,Type-checks and possibly canonicalizes `return_elements`. 4001,_FindAttrInOpDef,tensorflow/tensorflow/python/framework/importer.py,138,function, 4002,_RemoveDefaultAttrs,tensorflow/tensorflow/python/framework/importer.py,145,function,"Removes unknown default attrs according to `producer_op_list`. Removes any unknown attrs in `graph_def` (i.e. attrs that do not appear in registered OpDefs) that have a default value in `producer_op_list`. Args: producer_op_list: OpList proto. graph_def: GraphDef proto" 4003,_ConvertInputMapValues,tensorflow/tensorflow/python/framework/importer.py,178,function,"Ensures all input map values are tensors. This should be called from inside the import name scope. Args: name: the `name` argument passed to import_graph_def input_map: the `input_map` argument passed to import_graph_def. Returns: An possibly-updated version of `input_map`. Raises: ValueError: if input map values cannot be converted due to empty name scope." 4004,_PopulateTFImportGraphDefOptions,tensorflow/tensorflow/python/framework/importer.py,204,function,Populates the TF_ImportGraphDefOptions `options`. 4005,_ProcessNewOps,tensorflow/tensorflow/python/framework/importer.py,237,function,Processes the newly-added TF_Operations in `graph`. 4006,_GetColocationNames,tensorflow/tensorflow/python/framework/importer.py,290,function,Returns names of the ops that `op` should be colocated with. 4007,_GatherReturnElements,tensorflow/tensorflow/python/framework/importer.py,307,function,"Returns the requested return elements from results. Args: requested_return_elements: list of strings of operation and tensor names graph: Graph results: wrapped TF_ImportGraphDefResults Returns: list of `Operation` and/or `Tensor` objects" 4008,_SetDefaultAttrValues,tensorflow/tensorflow/python/framework/importer.py,336,function,Set any default attr values in `node_def` that aren't present. 4009,import_graph_def,tensorflow/tensorflow/python/framework/importer.py,351,function,"Imports the graph from `graph_def` into the current default `Graph`. This function provides a way to import a serialized TensorFlow [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer, and extract individual objects in the `GraphDef` as `tf.Tensor` and `tf.Operation` objects. Once extracted, these objects are placed into the current default `Graph`. See `tf.Graph.as_graph_def` for a way to create a `GraphDef` proto. Args: graph_def: A `GraphDef` proto containing operations to be imported into the default graph. input_map: A dictionary mapping input names (as strings) in `graph_def` to `Tensor` objects. The values of the named input tensors in the imported graph will be re-mapped to the respective `Tensor` values. return_elements: A list of strings containing operation names in `graph_def` that will be returned as `Operation` objects; and/or tensor names in `graph_def` that will be returned as `Tensor` objects. name: (Optional.) A prefix that will be prepended to the names in `graph_def`. Note that this does not apply to imported function names. Defaults to `""import""`. op_dict: (Optional.) Deprecated, do not use. producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped) list of `OpDef`s used by the producer of the graph. If provided, unrecognized attrs for ops in `graph_def` that have their default value according to `producer_op_list` will be removed. This will allow some more `GraphDef`s produced by later binaries to be accepted by earlier binaries. Returns: A list of `Operation` and/or `Tensor` objects from the imported graph, corresponding to the names in `return_elements`, and None if `returns_elements` is None. Raises: TypeError: If `graph_def` is not a `GraphDef` proto, `input_map` is not a dictionary mapping strings to `Tensor` objects, or `return_elements` is not a list of strings. ValueError: If `input_map`, or `return_elements` contains names that do not appear in `graph_def`, or `graph_def` is not well-formed (e.g. it refers to an unknown tensor)." 4010,import_graph_def_for_function,tensorflow/tensorflow/python/framework/importer.py,408,function,Like import_graph_def but does not validate colocation constraints. 4011,_import_graph_def_internal,tensorflow/tensorflow/python/framework/importer.py,415,function,"Imports the graph from `graph_def` into the current default `Graph`. This function provides a way to import a serialized TensorFlow [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer, and extract individual objects in the `GraphDef` as `tf.Tensor` and `tf.Operation` objects. Once extracted, these objects are placed into the current default `Graph`. See `tf.Graph.as_graph_def` for a way to create a `GraphDef` proto. Args: graph_def: A `GraphDef` proto containing operations to be imported into the default graph. input_map: A dictionary mapping input names (as strings) in `graph_def` to `Tensor` objects. The values of the named input tensors in the imported graph will be re-mapped to the respective `Tensor` values. return_elements: A list of strings containing operation names in `graph_def` that will be returned as `Operation` objects; and/or tensor names in `graph_def` that will be returned as `Tensor` objects. validate_colocation_constraints: Whether to validate colocation constraints. name: (Optional.) A prefix that will be prepended to the names in `graph_def`. Note that this does not apply to imported function names. Defaults to `""import""`. producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped) list of `OpDef`s used by the producer of the graph. If provided, unrecognized attrs for ops in `graph_def` that have their default value according to `producer_op_list` will be removed. This will allow some more `GraphDef`s produced by later binaries to be accepted by earlier binaries. Returns: A list of `Operation` and/or `Tensor` objects from the imported graph, corresponding to the names in `return_elements`, and None if `returns_elements` is None. Raises: TypeError: If `graph_def` is not a `GraphDef` proto, `input_map` is not a dictionary mapping strings to `Tensor` objects, or `return_elements` is not a list of strings. ValueError: If `input_map`, or `return_elements` contains names that do not appear in `graph_def`, or `graph_def` is not well-formed (e.g. it refers to an unknown tensor)." 4012,ImportGraphDefTest,tensorflow/tensorflow/python/framework/importer_test.py,48,class, 4013,IndexedSlices,tensorflow/tensorflow/python/framework/indexed_slices.py,61,class,"A sparse representation of a set of tensor slices at given indices. This class is a simple wrapper for a pair of `Tensor` objects: * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`. * `indices`: A 1-D integer `Tensor` with shape `[D0]`. An `IndexedSlices` is typically used to represent a subset of a larger tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`. The values in `indices` are the indices in the first dimension of the slices that have been extracted from the larger tensor. The dense tensor `dense` represented by an `IndexedSlices` `slices` has ```python dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...] ``` The `IndexedSlices` class is used principally in the definition of gradients for operations that have sparse gradients (e.g. `tf.gather`). Contrast this representation with `tf.sparse.SparseTensor`, which uses multi-dimensional indices and scalar values." 4014,IndexedSlicesSpec,tensorflow/tensorflow/python/framework/indexed_slices.py,193,class,Type specification for a `tf.IndexedSlices`. 4015,convert_to_tensor_or_indexed_slices,tensorflow/tensorflow/python/framework/indexed_slices.py,260,function,"Converts the given object to a `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`." 4016,internal_convert_to_tensor_or_indexed_slices,tensorflow/tensorflow/python/framework/indexed_slices.py,284,function,"Converts the given object to a `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`." 4017,internal_convert_n_to_tensor_or_indexed_slices,tensorflow/tensorflow/python/framework/indexed_slices.py,321,function,"Converts `values` to a list of `Tensor` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: An iterable of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. Returns: A list of `Tensor`, `IndexedSlices`, `SparseTensor` and/or `None` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value." 4018,convert_n_to_tensor_or_indexed_slices,tensorflow/tensorflow/python/framework/indexed_slices.py,362,function,"Converts `values` to a list of `Output` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. Returns: A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value." 4019,_indexed_slices_to_tensor,tensorflow/tensorflow/python/framework/indexed_slices.py,394,function,"Converts an IndexedSlices object `value` to a Tensor. NOTE(mrry): This function is potentially expensive. Args: value: An ops.IndexedSlices object. dtype: The dtype of the Tensor to be returned. name: Optional name to use for the returned Tensor. as_ref: True if a ref is requested. Returns: A dense Tensor representing the values in the given IndexedSlices. Raises: ValueError: If the IndexedSlices does not have the same dtype." 4020,is_mlir_bridge_enabled,tensorflow/tensorflow/python/framework/is_mlir_bridge_test_true.py,28,function,Returns true to if MLIR bridge should be enabled for tests. 4021,is_tfrt_enabled,tensorflow/tensorflow/python/framework/is_tfrt_test_true.py,28,function,Returns true to state TFRT should be enabled for Tensorflow tests. 4022,is_xla_enabled,tensorflow/tensorflow/python/framework/is_xla_test_true.py,27,function,Returns true to state XLA should be enabled for Tensorflow tests. 4023,get_all_registered_kernels,tensorflow/tensorflow/python/framework/kernels.py,26,function,"Returns a KernelList proto of all registered kernels. " 4024,get_registered_kernels_for_op,tensorflow/tensorflow/python/framework/kernels.py,36,function,"Returns a KernelList proto of registered kernels for a given op. Args: name: A string representing the name of the op whose kernels to retrieve." 4025,GetAllRegisteredKernelsTest,tensorflow/tensorflow/python/framework/kernels_test.py,25,class, 4026,GetRegisteredKernelsForOp,tensorflow/tensorflow/python/framework/kernels_test.py,32,class, 4027,load_op_library,tensorflow/tensorflow/python/framework/load_library.py,36,function,"Loads a TensorFlow plugin, containing custom ops and kernels. Pass ""library_filename"" to a platform-specific mechanism for dynamically loading a library. The rules for determining the exact location of the library are platform-specific and are not documented here. When the library is loaded, ops and kernels registered in the library via the `REGISTER_*` macros are made available in the TensorFlow process. Note that ops with the same name as an existing op are rejected and not registered with the process. Args: library_filename: Path to the plugin. Relative or absolute filesystem path to a dynamic library file. Returns: A python module containing the Python wrappers for Ops defined in the plugin. Raises: RuntimeError: when unable to load the library or get the python wrappers." 4028,load_file_system_library,tensorflow/tensorflow/python/framework/load_library.py,83,function,"Loads a TensorFlow plugin, containing file system implementation. Pass `library_filename` to a platform-specific mechanism for dynamically loading a library. The rules for determining the exact location of the library are platform-specific and are not documented here. Args: library_filename: Path to the plugin. Relative or absolute filesystem path to a dynamic library file. Returns: None. Raises: RuntimeError: when unable to load the library." 4029,_is_shared_object,tensorflow/tensorflow/python/framework/load_library.py,103,function,"Check the file to see if it is a shared object, only using extension." 4030,load_library,tensorflow/tensorflow/python/framework/load_library.py,124,function,"Loads a TensorFlow plugin. ""library_location"" can be a path to a specific shared object, or a folder. If it is a folder, all shared objects that are named ""libtfkernel*"" will be loaded. When the library is loaded, kernels registered in the library via the `REGISTER_*` macros are made available in the TensorFlow process. Args: library_location: Path to the plugin or the folder of plugins. Relative or absolute filesystem path to a dynamic library file or folder. Returns: None Raises: OSError: When the file to be loaded is not found. RuntimeError: when unable to load the library." 4031,_get_test_name_best_effort,tensorflow/tensorflow/python/framework/memory_checker.py,32,function,"If available, return the current test name. Otherwise, `None`." 4032,MemoryChecker,tensorflow/tensorflow/python/framework/memory_checker.py,47,class,"Memory leak detection class. This is a utility class to detect Python and C++ memory leaks. It's intended for both testing and debugging. Basic usage: >>> # MemoryChecker() context manager tracks memory status inside its scope. >>> with MemoryChecker() as memory_checker: >>> tensors = [] >>> for _ in range(10): >>> # Simulating `tf.constant(1)` object leak every iteration. >>> tensors.append(tf.constant(1)) >>> >>> # Take a memory snapshot for later analysis. >>> memory_checker.record_snapshot() >>> >>> # `report()` generates a html graph file showing allocations over >>> # snapshots per every stack trace. >>> memory_checker.report() >>> >>> # This assertion will detect `tf.constant(1)` object leak. >>> memory_checker.assert_no_leak_if_all_possibly_except_one() `record_snapshot()` must be called once every iteration at the same location. This is because the detection algorithm relies on the assumption that if there is a leak, it's happening similarly on every snapshot." 4033,MemoryCheckerTest,tensorflow/tensorflow/python/framework/memory_checker_test.py,27,class, 4034,_node_def,tensorflow/tensorflow/python/framework/meta_graph.py,58,function,"Create a `NodeDef` proto with export_scope stripped. Args: from_node_def: A `node_def_pb2.NodeDef` protocol buffer. export_scope: A `string` representing the name scope to remove. unbound_inputs: An array of unbound input names if they exist. clear_devices: Boolean which controls whether to clear device information from node_def. Default false. Returns: A `node_def_pb2.NodeDef` protocol buffer." 4035,_read_file,tensorflow/tensorflow/python/framework/meta_graph.py,106,function,"Reads a file containing `GraphDef` and returns the protocol buffer. Args: filename: `graph_def` filename including the path. Returns: A `GraphDef` protocol buffer. Raises: IOError: If the file doesn't exist, or cannot be successfully parsed." 4036,ops_used_by_graph_def,tensorflow/tensorflow/python/framework/meta_graph.py,138,function,"Collect the list of ops used by a graph. Does not validate that the ops are all registered. Args: graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`. Returns: A list of strings, each naming an op used by the graph." 4037,stripped_op_list_for_graph,tensorflow/tensorflow/python/framework/meta_graph.py,179,function,"Collect the stripped OpDefs for ops used by a graph. This function computes the `stripped_op_list` field of `MetaGraphDef` and similar protos. The result can be communicated from the producer to the consumer, which can then use the C++ function `RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility. Args: graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`. Returns: An `OpList` of ops used by the graph." 4038,_get_kind_name,tensorflow/tensorflow/python/framework/meta_graph.py,205,function,"Returns the kind name in CollectionDef. Args: item: A data item. Returns: The string representation of the kind in CollectionDef." 4039,_op_name,tensorflow/tensorflow/python/framework/meta_graph.py,235,function,"Extract the Op name from a Tensor name. The Op name is everything before a colon, if present, not including any ^ prefix denoting a control dependency. Args: tensor_name: the full name of a Tensor in the graph. Returns: The name of the Op of which the given Tensor is an output. Raises: ValueError: if tensor_name is None or empty." 4040,_get_scope,tensorflow/tensorflow/python/framework/meta_graph.py,260,function,"Extract the scope name from a node name. The scope name is everything before the final slash, not including any ^ prefix denoting a control dependency. Args: node_name: the full name of an Op or a Tensor in the graph. Returns: The deepest named scope containing the node. Raises: ValueError: if tensor_name is None or empty" 4041,_find_extraneous_saver_nodes,tensorflow/tensorflow/python/framework/meta_graph.py,286,function,"Identifies any nodes in the graph_def related to unused Savers. This approach assumes that each Saver is cleanly isolated in its own name scope, so we need only identify the scopes associated with extraneous Savers and return all the nodes in those scopes. Args: graph_def: a GraphDef proto to evaluate. saver_def: a SaverDef proto referencing Save/Restore ops to be retained. Returns: An iterable of node names that may be safely omitted." 4042,_should_include_node,tensorflow/tensorflow/python/framework/meta_graph.py,342,function,"Returns `True` if a node should be included. Args: node_or_node_name: A node or `string` node name. export_scope: `string`. Name scope under which to extract the subgraph. The scope name will be stripped from the node definitions for easy import later into new name scopes. exclude_nodes: An iterable of nodes or `string` node names to omit from the export, or None. Note no sanity-checking is done, so this list must be carefully constructed to avoid producing an invalid graph. Returns: `True` if the node should be included." 4043,add_collection_def,tensorflow/tensorflow/python/framework/meta_graph.py,374,function,"Adds a collection to MetaGraphDef protocol buffer. Args: meta_graph_def: MetaGraphDef protocol buffer. key: One of the GraphKeys or user-defined string. graph: The `Graph` from which to get collections. export_scope: Optional `string`. Name scope to remove. exclude_nodes: An iterable of nodes or `string` node names to omit from the collection, or None. override_contents: An iterable of values to place in the collection, ignoring the current values (if set)." 4044,_is_default_attr_value,tensorflow/tensorflow/python/framework/meta_graph.py,448,function,Checks if given attribute matches the default value in the op def. 4045,strip_graph_default_valued_attrs,tensorflow/tensorflow/python/framework/meta_graph.py,462,function,"Strips default valued attributes for node defs in given MetaGraphDef. This method also sets `meta_info_def.stripped_default_attrs` in the given `MetaGraphDef` proto to True. Args: meta_graph_def: `MetaGraphDef` protocol buffer Returns: None." 4046,create_meta_graph_def,tensorflow/tensorflow/python/framework/meta_graph.py,509,function,"Construct and returns a `MetaGraphDef` protocol buffer. Args: meta_info_def: `MetaInfoDef` protocol buffer. graph_def: `GraphDef` protocol buffer. saver_def: `SaverDef` protocol buffer. collection_list: List of string keys to collect. graph: The `Graph` to create `MetaGraphDef` out of. export_scope: Optional `string`. Name scope to remove. exclude_nodes: An iterable of nodes or `string` node names to omit from all collection, or None. clear_extraneous_savers: Remove any preexisting SaverDefs from the SAVERS collection. Note this method does not alter the graph, so any extraneous Save/Restore ops should have been removed already, as needed. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. For a detailed guide, see [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes). Returns: MetaGraphDef protocol buffer. Raises: TypeError: If the arguments are not of the correct proto buffer type." 4047,read_meta_graph_file,tensorflow/tensorflow/python/framework/meta_graph.py,616,function,"Reads a file containing `MetaGraphDef` and returns the protocol buffer. Args: filename: `meta_graph_def` filename including the path. Returns: A `MetaGraphDef` protocol buffer. Raises: IOError: If the file doesn't exist, or cannot be successfully parsed." 4048,import_scoped_meta_graph,tensorflow/tensorflow/python/framework/meta_graph.py,648,function,"Recreates a `Graph` saved in a `MetaGraphDef` proto. This function takes a `MetaGraphDef` protocol buffer as input. If the argument is a file containing a `MetaGraphDef` protocol buffer , it constructs a protocol buffer from the file content. The function then adds all the nodes from the `graph_def` field to the current graph, recreates the desired collections, and returns a dictionary of all the Variables imported into the name scope. In combination with `export_scoped_meta_graph()`, this function can be used to * Serialize a graph along with other Python objects such as `QueueRunner`, `Variable` into a `MetaGraphDef`. * Restart training from a saved graph and checkpoints. * Run inference from a saved graph and checkpoints. Args: meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including the path) containing a `MetaGraphDef`. clear_devices: Boolean which controls whether to clear device information from graph_def. Default false. graph: The `Graph` to import into. If `None`, use the default graph. import_scope: Optional `string`. Name scope into which to import the subgraph. If `None`, the graph is imported to the root name scope. input_map: A dictionary mapping input names (as strings) in `graph_def` to `Tensor` objects. The values of the named input tensors in the imported graph will be re-mapped to the respective `Tensor` values. unbound_inputs_col_name: Collection name for looking up unbound inputs. restore_collections_predicate: a predicate on collection names. A collection named c (i.e whose key is c) will be restored iff 1) `restore_collections_predicate(c)` is True, and 2) `c != unbound_inputs_col_name`. Returns: A dictionary of all the `Variables` imported into the name scope. Raises: ValueError: If the graph_def contains unbound inputs." 4049,import_scoped_meta_graph_with_return_elements,tensorflow/tensorflow/python/framework/meta_graph.py,701,function,"Imports graph from `MetaGraphDef` and returns vars and return elements. This function takes a `MetaGraphDef` protocol buffer as input. If the argument is a file containing a `MetaGraphDef` protocol buffer , it constructs a protocol buffer from the file content. The function then adds all the nodes from the `graph_def` field to the current graph, recreates the desired collections, and returns a dictionary of all the Variables imported into the name scope. In combination with `export_scoped_meta_graph()`, this function can be used to * Serialize a graph along with other Python objects such as `QueueRunner`, `Variable` into a `MetaGraphDef`. * Restart training from a saved graph and checkpoints. * Run inference from a saved graph and checkpoints. Args: meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including the path) containing a `MetaGraphDef`. clear_devices: Boolean which controls whether to clear device information from graph_def. Default false. graph: The `Graph` to import into. If `None`, use the default graph. import_scope: Optional `string`. Name scope into which to import the subgraph. If `None`, the graph is imported to the root name scope. input_map: A dictionary mapping input names (as strings) in `graph_def` to `Tensor` objects. The values of the named input tensors in the imported graph will be re-mapped to the respective `Tensor` values. unbound_inputs_col_name: Collection name for looking up unbound inputs. restore_collections_predicate: a predicate on collection names. A collection named c (i.e whose key is c) will be restored iff 1) `restore_collections_predicate(c)` is True, and 2) `c != unbound_inputs_col_name`. return_elements: A list of strings containing operation names in the `MetaGraphDef` that will be returned as `Operation` objects; and/or tensor names in `MetaGraphDef` that will be returned as `Tensor` objects. Returns: A tuple of ( dictionary of all the `Variables` imported into the name scope, list of `Operation` or `Tensor` objects from the `return_elements` list). Raises: ValueError: If the graph_def contains unbound inputs." 4050,export_scoped_meta_graph,tensorflow/tensorflow/python/framework/meta_graph.py,908,function,"Returns `MetaGraphDef` proto. Optionally writes it to filename. This function exports the graph, saver, and collection objects into `MetaGraphDef` protocol buffer with the intention of it being imported at a later time or location to restart training, run inference, or be a subgraph. Args: filename: Optional filename including the path for writing the generated `MetaGraphDef` protocol buffer. graph_def: `GraphDef` protocol buffer. graph: The `Graph` to export. If `None`, use the default graph. export_scope: Optional `string`. Name scope under which to extract the subgraph. The scope name will be stripped from the node definitions for easy import later into new name scopes. If `None`, the whole graph is exported. as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto. unbound_inputs_col_name: Optional `string`. If provided, a string collection with the given name will be added to the returned `MetaGraphDef`, containing the names of tensors that must be remapped when importing the `MetaGraphDef`. clear_devices: Boolean which controls whether to clear device information before exporting the graph. saver_def: `SaverDef` protocol buffer. clear_extraneous_savers: Remove any Saver-related information from the graph (both Save/Restore ops and SaverDefs) that are not associated with the provided SaverDef. strip_default_attrs: Set to true if default valued attributes must be removed while exporting the GraphDef. save_debug_info: If `True`, save the GraphDebugInfo to a separate file, which in the same directory of filename and with `_debug` added before the file extension. **kwargs: Optional keyed arguments, including meta_info_def and collection_list. Returns: A `MetaGraphDef` proto and dictionary of `Variables` in the exported name scope. Raises: ValueError: When the `GraphDef` is larger than 2GB. ValueError: When executing in Eager mode and either `graph_def` or `graph` is undefined." 4051,copy_scoped_meta_graph,tensorflow/tensorflow/python/framework/meta_graph.py,1073,function,"Copies a sub-meta_graph from one scope to another. Args: from_scope: `String` name scope containing the subgraph to be copied. to_scope: `String` name scope under which the copied subgraph will reside. from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the default graph is use. to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the default graph is used. Returns: A dictionary of `Variables` that has been copied into `to_scope`. Raises: ValueError: If `from_scope` and `to_scope` are the same while `from_graph` and `to_graph` are also the same." 4052,_TestDir,tensorflow/tensorflow/python/framework/meta_graph_test.py,54,function, 4053,SimpleMetaGraphTest,tensorflow/tensorflow/python/framework/meta_graph_test.py,65,class, 4054,ScopedMetaGraphTest,tensorflow/tensorflow/python/framework/meta_graph_test.py,339,class, 4055,MetaGraphWithVariableScopeTest,tensorflow/tensorflow/python/framework/meta_graph_test.py,925,class, 4056,ExportImportAcrossScopesTest,tensorflow/tensorflow/python/framework/meta_graph_test.py,983,class, 4057,add_op_callback,tensorflow/tensorflow/python/framework/op_callbacks.py,25,function,"Add a thread-local callback that intercepts op execution and op creation. The `callback_fn` will be invoked immediately after any of the three types of events: - The execution of an TensorFlow operation (""op"" for short hereafter) under eager mode, - The execution of a FuncGraph under eager mode, - The creation of an op during graph construction (e.g., in @tf.function-decorated Python functions). Known limitations: 1. Under graph mode, overriding the output tensors of control-flow ops, including ""If"", ""StatelessIf"" and ""While"", may cause errors (b/139668453). Overriding other tensors in a graph consisting of such control-flow ops is okay. 2. Under eager mode, calling eager ops from the callback function itself may lead to recursion stack overflow. This can be prevented by returning from the callback function immediately on encountering the op type involved (b/140334369). Args: callback_fn: A callback_fn that has the following signature: def callback_fn(op_type, inputs, attrs, outputs, op_name=None, graph=None): # op_type: The type of the op, as a string. E.g., ""MatMul"". # For the special case of FuncGraph execution, op_type # takes the name of the graph name, e.g., # ""__inference_my_func_24"". # inputs: (`tuple` of `Tensor`s) Input tensors to the op or the # FuncGraph. # - In eager execution, these are `EagerTensor`s. # - In graph construction, these are non-eager `Tensor`s # that form the inputs to the just-created op. # attrs: The attributes of the op or FuncGraph of which the execution # or creation caused the current invocation of the callback. # This is applicable to both eager- and graph-based execution, # as well as graph construction. # This is a tuple of alternating attribute keys and attribute # values. E.g., `('adjoint_a', False, 'adjoint_b', False)`. # outputs: (`tuple of `Tensor`s) Output tensors from the op or # FuncGraph. # In eager execution, these are `EagerTensor`s. # In graph construction, these are non-eager `Tensor`s that # are the outputs of the just-created op. # op_name: Name of the op. # - If the current invocation of the callback is due to the # eager execution of an op or FuncGraph, this will be # `None`, as op names are meaningless in eager execution. # - In graph construction, this is the name of the op, e.g., # ""MatMul_2"". # graph: The graph that the op belongs to (if any). # - In eager execution of an op or FuncGraph, this is `None`. # - In graph construction, this is the op's enclosing graph # as a `tf.Graph` object. # # Return values: # This callback function is expected to return `None` or # a `list` or `tuple` of `Tensor`s with its length matching # `len(outputs)`, in the order that corresponds to that of the # `outputs` argument. # If the return value is `None`, downstream execution or graph # construction will be unaffected. # However, if the return value is a `list` or `tuple` of `Tensor`s, # - In eager execution, these returned `Tensor`s should be # `EagerTensor`s. Their values will replace the original values of # `outputs` for downstream eager execution. (*Not implemented yet*). # - In graph construction, these returned `Tensor`s should be # non-eager `Tensor`s. Their values will replace the original # `outputs` for downstream graph construction. Raises: ValueEror: If `callback_fn` is `None` or not callable." 4058,should_invoke_op_callbacks,tensorflow/tensorflow/python/framework/op_callbacks.py,118,function,"Determine if op callbacks are present and should be invoked. Returns: A thread-local result (boolean) indicating whether any op callback(s) exist and should be invoked." 4059,remove_op_callback,tensorflow/tensorflow/python/framework/op_callbacks.py,129,function,"Remove an already-added op callback. Args: op_callback: The op callback to be removed. Raises: KeyError: If `op_callback` has not been registered using `add_op_callback()` before." 4060,clear_op_callbacks,tensorflow/tensorflow/python/framework/op_callbacks.py,146,function,Clear all op callbacks registered in the current thread. 4061,invoke_op_callbacks,tensorflow/tensorflow/python/framework/op_callbacks.py,152,function,"Invoke the callbacks that exist in the current scope (if any). If no callbacks are present in the current scope, this method returns immediately. Args: op_type: Type of the operation (e.g., ""MatMul""). inputs: Input tensors to the op. These are `EagerTensor`s in the case of eager execution of ops or `FuncGraph`s, and are non-eager `Tensor`s in the case of graph construction. attrs: Attributes of the op, as `tuple` of alternating keys and values. outputs: Output tensors from the op. These are `EagerTensor`s in the case of eager execution and are non-eager `Tensor`s in the case of graph construction. op_name: Name of the op. Applicable if and only if this method is invoked due to the graph construction of an op or the eager execution of of a `FuncGraph`. graph: The graph involved (if any). - In the case if the eager execution of an op or FuncGraph, this is `None`. - In the case of the graph construction of an op, this is the `tf.Graph` object being built. Returns: `None`, or a `list` or `tuple` of output tenors that will override the original (input) `outputs`." 4062,_NumpyFunctionCallback,tensorflow/tensorflow/python/framework/op_callbacks_test.py,76,class, 4063,OpCallbacksTest,tensorflow/tensorflow/python/framework/op_callbacks_test.py,158,class, 4064,OpCallbacksErrorConditionsTest,tensorflow/tensorflow/python/framework/op_callbacks_test.py,798,class, 4065,_Attr,tensorflow/tensorflow/python/framework/op_def_library.py,38,function, 4066,_AttrValue,tensorflow/tensorflow/python/framework/op_def_library.py,46,function, 4067,_SatisfiesTypeConstraint,tensorflow/tensorflow/python/framework/op_def_library.py,53,function, 4068,_IsListParameter,tensorflow/tensorflow/python/framework/op_def_library.py,64,function, 4069,_NumTypeFields,tensorflow/tensorflow/python/framework/op_def_library.py,72,function, 4070,_IsListValue,tensorflow/tensorflow/python/framework/op_def_library.py,80,function, 4071,_Flatten,tensorflow/tensorflow/python/framework/op_def_library.py,84,function,"Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]." 4072,_Restructure,tensorflow/tensorflow/python/framework/op_def_library.py,92,function,"Returns the elements of list l structured according to the given structure. A structure is represented by a list whose elements are either `None` or a non-negative integer. `None` corresponds to a single element in the output list, and an integer N corresponds to a nested list of length N. The function returns a data structure whose shape is given by `structure`, and whose elements are taken from `l`. If `structure` is a singleton, the function returns the single data structure implied by the 0th element of `structure`. For example: _Restructure([""foo"", ""bar"", ""baz"", ""qux""], [None, 2, None]) -> [""foo"", [""bar"", ""baz""], ""qux""] _Restructure([""foo""], [None]) -> ""foo"" _Restructure([""foo""], [1]) -> [""foo""] _Restructure([], [0]) -> [] Args: l: A list. structure: A list whose elements are either `None` or a non-negative integer. Returns: The elements of `l`, restructured according to `structure`. If `structure` is a list of length 1, this function returns the single data structure implied by `structure[0]`." 4073,_MakeFloat,tensorflow/tensorflow/python/framework/op_def_library.py,141,function, 4074,_MakeInt,tensorflow/tensorflow/python/framework/op_def_library.py,148,function, 4075,_MakeStr,tensorflow/tensorflow/python/framework/op_def_library.py,159,function, 4076,_MakeBool,tensorflow/tensorflow/python/framework/op_def_library.py,166,function, 4077,_MakeType,tensorflow/tensorflow/python/framework/op_def_library.py,173,function, 4078,_MakeShape,tensorflow/tensorflow/python/framework/op_def_library.py,184,function,Convert v into a TensorShapeProto. 4079,_MakeTensor,tensorflow/tensorflow/python/framework/op_def_library.py,207,function,Ensure v is a TensorProto. 4080,_MakeFunc,tensorflow/tensorflow/python/framework/op_def_library.py,216,function,Ensure v is a func. 4081,_MaybeColocateWith,tensorflow/tensorflow/python/framework/op_def_library.py,236,function,"A context manager for (maybe) colocating with a list of input tensors. Args: inputs: A list of `Tensor` or `Operation` objects. Returns: A context manager." 4082,apply_op,tensorflow/tensorflow/python/framework/op_def_library.py,255,function,"Add a node invoking a registered Op to a graph. Example usage: # input1 and input2 can be Tensors or anything ops.convert_to_tensor() # will convert to a Tensor. op_def_library.apply_op(""op"", input1=input1, input2=input2) # Can specify a node name. op_def_library.apply_op(""op"", input1=input1, name=""node_name"") # Must use keyword arguments, with the names specified in the OpDef. op_def_library.apply_op(""op"", input_name=input, attr_name=attr) All attrs must either be inferred from an input or specified. (If inferred, the attr must not be specified.) If an attr has a default value specified in the Op's OpDef, then you may pass None as the value of that attr to get the default. Args: op_type_name: string. Must match the name field of a registered Op. name: string. Optional name of the created op. **keywords: input Tensor and attr arguments specified by name, and optional parameters to pass when constructing the Operation. Returns: The Tensor(s) representing the output of the operation, or the Operation itself if there are no outputs. Raises: RuntimeError: On some errors. TypeError: On some errors. ValueError: On some errors." 4083,_apply_op_helper,tensorflow/tensorflow/python/framework/op_def_library.py,299,function,"Implementation of apply_op that returns output_structure, op." 4084,OpDefLibraryTest,tensorflow/tensorflow/python/framework/op_def_library_test.py,35,class, 4085,OpDefLibraryGraphTest,tensorflow/tensorflow/python/framework/op_def_library_test.py,1361,class, 4086,get,tensorflow/tensorflow/python/framework/op_def_registry.py,34,function,Returns an OpDef for a given `name` or None if the lookup fails. 4087,sync,tensorflow/tensorflow/python/framework/op_def_registry.py,59,function,No-op. Used to synchronize the contents of the Python registry with C++. 4088,tensor_id,tensorflow/tensorflow/python/framework/ops.py,114,function,Returns a unique identifier for this Tensor. 4089,_UserDeviceSpec,tensorflow/tensorflow/python/framework/ops.py,119,class,Store user-specified device and provide computation of merged device. 4090,NullContextmanager,tensorflow/tensorflow/python/framework/ops.py,166,class, 4091,_override_helper,tensorflow/tensorflow/python/framework/ops.py,178,function,"Overrides (string) operator on Tensors to call func. Args: clazz_object: the class to override for; either Tensor or SparseTensor. operator: the string name of the operator to override. func: the function that replaces the overridden operator. Raises: ValueError: If operator is not allowed to be overwritten." 4092,_as_graph_element,tensorflow/tensorflow/python/framework/ops.py,194,function,"Convert `obj` to a graph element if possible, otherwise return `None`. Args: obj: Object to convert. Returns: The result of `obj._as_graph_element()` if that method is available; otherwise `None`." 4093,is_dense_tensor_like,tensorflow/tensorflow/python/framework/ops.py,213,function, 4094,uid,tensorflow/tensorflow/python/framework/ops.py,217,function,A unique (within this program execution) integer. 4095,numpy_text,tensorflow/tensorflow/python/framework/ops.py,222,function,Human readable representation of a tensor's numpy value. 4096,enable_tensor_equality,tensorflow/tensorflow/python/framework/ops.py,235,function,"Compare Tensors with element-wise comparison and thus be unhashable. Comparing tensors with element-wise allows comparisons such as tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are unhashable. Thus tensors can no longer be directly used in sets or as a key in a dictionary." 4097,disable_tensor_equality,tensorflow/tensorflow/python/framework/ops.py,248,function,"Compare Tensors by their id and be hashable. This is a legacy behaviour of TensorFlow and is highly discouraged." 4098,Tensor,tensorflow/tensorflow/python/framework/ops.py,259,class,"A tensor is a multidimensional array of elements represented by a `tf.Tensor` object. All elements are of a single known data type. When writing a TensorFlow program, the main object that is manipulated and passed around is the `tf.Tensor`. A `tf.Tensor` has the following properties: * a single data type (float32, int32, or string, for example) * a shape TensorFlow supports eager execution and graph execution. In eager execution, operations are evaluated immediately. In graph execution, a computational graph is constructed for later evaluation. TensorFlow defaults to eager execution. In the example below, the matrix multiplication results are calculated immediately. >>> # Compute some values using a Tensor >>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]]) >>> e = tf.matmul(c, d) >>> print(e) tf.Tensor( [[1. 3.] [3. 7.]], shape=(2, 2), dtype=float32) Note that during eager execution, you may discover your `Tensors` are actually of type `EagerTensor`. This is an internal detail, but it does give you access to a useful function, `numpy`: >>> type(e) >>> print(e.numpy()) [[1. 3.] [3. 7.]] In TensorFlow, `tf.function`s are a common way to define graph execution. A Tensor's shape (that is, the rank of the Tensor and the size of each dimension) may not always be fully known. In `tf.function` definitions, the shape may only be partially known. Most operations produce tensors of fully-known shapes if the shapes of their inputs are also fully known, but in some cases it's only possible to find the shape of a tensor at execution time. A number of specialized tensors are available: see `tf.Variable`, `tf.constant`, `tf.placeholder`, `tf.sparse.SparseTensor`, and `tf.RaggedTensor`. For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor)." 4099,_EagerTensorBase,tensorflow/tensorflow/python/framework/ops.py,965,class,Base class for EagerTensor. 4100,convert_to_tensor_v1_with_dispatch,tensorflow/tensorflow/python/framework/ops.py,1263,function,"Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: ```python import numpy as np def my_func(arg): arg = tf.convert_to_tensor(arg, dtype=tf.float32) return tf.matmul(arg, arg) + arg # The following calls are equivalent. value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) ``` This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. dtype_hint: same meaning as preferred_dtype, and overrides it. Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode." 4101,convert_to_tensor_v1,tensorflow/tensorflow/python/framework/ops.py,1323,function,Converts the given `value` to a `Tensor` (with the TF1 API). 4102,convert_to_tensor_v2_with_dispatch,tensorflow/tensorflow/python/framework/ops.py,1336,function,"Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: >>> import numpy as np >>> def my_func(arg): ... arg = tf.convert_to_tensor(arg, dtype=tf.float32) ... return arg >>> # The following calls are equivalent. ... >>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) >>> print(value_1) tf.Tensor( [[1. 2.] [3. 4.]], shape=(2, 2), dtype=float32) >>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) >>> print(value_2) tf.Tensor( [[1. 2.] [3. 4.]], shape=(2, 2), dtype=float32) >>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) >>> print(value_3) tf.Tensor( [[1. 2.] [3. 4.]], shape=(2, 2), dtype=float32) This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. dtype_hint: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. name: Optional name to use if a new `Tensor` is created. Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode." 4103,convert_to_tensor_v2,tensorflow/tensorflow/python/framework/ops.py,1402,function,Converts the given `value` to a `Tensor`. 4104,_error_prefix,tensorflow/tensorflow/python/framework/ops.py,1412,function, 4105,pack_eager_tensors,tensorflow/tensorflow/python/framework/ops.py,1416,function,"Pack multiple `EagerTensor`s of the same dtype and shape. Args: tensors: a list of EagerTensors to pack. ctx: context.context(). Returns: A packed EagerTensor." 4106,convert_to_tensor,tensorflow/tensorflow/python/framework/ops.py,1475,function,Implementation of the public convert_to_tensor. 4107,internal_convert_n_to_tensor,tensorflow/tensorflow/python/framework/ops.py,1550,function,"Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: The value of context.context(). Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value." 4108,convert_n_to_tensor,tensorflow/tensorflow/python/framework/ops.py,1598,function,"Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value." 4109,convert_to_tensor_or_composite,tensorflow/tensorflow/python/framework/ops.py,1629,function,"Converts the given object to a `Tensor` or `CompositeTensor`. If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: A `CompositeTensor` or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `CompositeTensor`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: A `Tensor` or `CompositeTensor`, based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`." 4110,internal_convert_to_tensor_or_composite,tensorflow/tensorflow/python/framework/ops.py,1652,function,"Converts the given object to a `Tensor` or `CompositeTensor`. If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: A `CompositeTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `CompositeTensor`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: A `Tensor` or `CompositeTensor`, based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`." 4111,internal_convert_n_to_tensor_or_composite,tensorflow/tensorflow/python/framework/ops.py,1691,function,"Converts `values` to a list of `Tensor` or `CompositeTensor` objects. Any `CompositeTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `CompositeTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor`s or `CompositeTensor`s. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. Returns: A list of `Tensor`, `CompositeTensor`, and/or `None` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value." 4112,convert_n_to_tensor_or_composite,tensorflow/tensorflow/python/framework/ops.py,1731,function,"Converts `values` to a list of `Output` or `CompositeTensor` objects. Any `CompositeTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `CompositeTensor``, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor`s or `CompositeTensor`s. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. Returns: A list of `Tensor` and/or `CompositeTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value." 4113,_device_string,tensorflow/tensorflow/python/framework/ops.py,1757,function, 4114,_NodeDef,tensorflow/tensorflow/python/framework/ops.py,1764,function,"Create a NodeDef proto. Args: op_type: Value for the ""op"" attribute of the NodeDef proto. name: Value for the ""name"" attribute of the NodeDef proto. attrs: Dictionary where the key is the attribute name (a string) and the value is the respective ""attr"" attribute of the NodeDef proto (an AttrValue). Returns: A node_def_pb2.NodeDef protocol buffer." 4115,_create_c_op,tensorflow/tensorflow/python/framework/ops.py,1791,function,"Creates a TF_Operation. Args: graph: a `Graph`. node_def: `node_def_pb2.NodeDef` for the operation to create. inputs: A flattened list of `Tensor`s. This function handles grouping tensors into lists as per attributes in the `node_def`. control_inputs: A list of `Operation`s to set as control dependencies. op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not specified, is looked up from the `graph` using `node_def.op`. Returns: A wrapped TF_Operation*." 4116,Operation,tensorflow/tensorflow/python/framework/ops.py,1848,class,"Represents a graph node that performs computation on tensors. An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor` objects as input, and produces zero or more `Tensor` objects as output. Objects of type `Operation` are created by calling a Python op constructor (such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default` context manager. For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an `Operation` of type ""MatMul"" that takes tensors `a` and `b` as input, and produces `c` as output. If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for calling `tf.compat.v1.get_default_session().run(op)`." 4117,RegisterGradient,tensorflow/tensorflow/python/framework/ops.py,2584,class,"A decorator for registering the gradient function for an op type. This decorator is only used when defining a new op type. For an op with `m` inputs and `n` outputs, the gradient function is a function that takes the original `Operation` and `n` `Tensor` objects (representing the gradients with respect to each output of the op), and returns `m` `Tensor` objects (representing the partial gradients with respect to each input of the op). For example, assuming that operations of type `""Sub""` take two inputs `x` and `y`, and return a single output `x - y`, the following gradient function would be registered: ```python @tf.RegisterGradient(""Sub"") def _sub_grad(unused_op, grad): return grad, tf.negative(grad) ``` The decorator argument `op_type` is the string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation." 4118,no_gradient,tensorflow/tensorflow/python/framework/ops.py,2633,function,"Specifies that ops of type `op_type` is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as `tf.size()` that are not differentiable. For example: ```python tf.no_gradient(""Size"") ``` The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. Raises: TypeError: If `op_type` is not a string." 4119,get_gradient_function,tensorflow/tensorflow/python/framework/ops.py,2671,function,"Returns the function that computes gradients for ""op""." 4120,set_shape_and_handle_data_for_outputs,tensorflow/tensorflow/python/framework/ops.py,2687,function,No op. TODO(b/74620627): Remove this. 4121,OpStats,tensorflow/tensorflow/python/framework/ops.py,2692,class,"A holder for statistics about an operator. This class holds information about the resource requirements for an op, including the size of its weight parameters on-disk and how many FLOPS it requires to execute forward inference. If you define a new operation, you can create a function that will return a set of information about its usage of the CPU and disk space when serialized. The function itself takes a Graph object that's been set up so you can call methods like get_tensor_by_name to help calculate the results, and a NodeDef argument." 4122,RegisterStatistics,tensorflow/tensorflow/python/framework/ops.py,2744,class,"A decorator for registering the statistics function for an op type. This decorator can be defined for an op type so that it gives a report on the resources used by an instance of an operator, in the form of an OpStats object. Well-known types of statistics include these so far: - flops: When running a graph, the bulk of the computation happens doing numerical calculations like matrix multiplications. This type allows a node to return how many floating-point operations it takes to complete. The total number of FLOPs for a graph is a good guide to its expected latency. You can add your own statistics just by picking a new type string, registering functions for the ops you care about, and then calling get_stats_for_node_def. If a statistic for an op is registered multiple times, a KeyError will be raised. Since the statistics is counted on a per-op basis. It is not suitable for model parameters (capacity), which is expected to be counted only once, even if it is shared by multiple ops. (e.g. RNN) For example, you can define a new metric called doohickey for a Foo operation by placing this in your code: ```python @ops.RegisterStatistics(""Foo"", ""doohickey"") def _calc_foo_bojangles(unused_graph, unused_node_def): return ops.OpStats(""doohickey"", 20) ``` Then in client code you can retrieve the value by making this call: ```python doohickey = ops.get_stats_for_node_def(graph, node_def, ""doohickey"") ``` If the NodeDef is for an op with a registered doohickey function, you'll get back the calculated amount in doohickey.value, or None if it's not defined." 4123,get_stats_for_node_def,tensorflow/tensorflow/python/framework/ops.py,2809,function,"Looks up the node's statistics function in the registry and calls it. This function takes a Graph object and a NodeDef from a GraphDef, and if there's an associated statistics method, calls it and returns a result. If no function has been registered for the particular node type, it returns an empty statistics object. Args: graph: A Graph object that's been set up with the node's graph. node: A NodeDef describing the operator. statistic_type: A string identifying the statistic we're interested in. Returns: An OpStats object containing information about resource usage." 4124,name_from_scope_name,tensorflow/tensorflow/python/framework/ops.py,2834,function,"Returns the name of an op given the name of its scope. Args: name: the name of the scope. Returns: the name of the op (equal to scope name minus any trailing slash)." 4125,Graph,tensorflow/tensorflow/python/framework/ops.py,2851,class,"A TensorFlow computation, represented as a dataflow graph. Graphs are used by `tf.function`s to represent the function's computations. Each graph contains a set of `tf.Operation` objects, which represent units of computation; and `tf.Tensor` objects, which represent the units of data that flow between operations. ### Using graphs directly (deprecated) A `tf.Graph` can be constructed and used directly without a `tf.function`, as was required in TensorFlow 1, but this is deprecated and it is recommended to use a `tf.function` instead. If a graph is directly used, other deprecated TensorFlow 1 classes are also required to execute the graph, such as a `tf.compat.v1.Session`. A default graph can be registered with the `tf.Graph.as_default` context manager. Then, operations will be added to the graph instead of being executed eagerly. For example: ```python g = tf.Graph() with g.as_default(): # Define operations and tensors in `g`. c = tf.constant(30.0) assert c.graph is g ``` `tf.compat.v1.get_default_graph()` can be used to obtain the default graph. Important note: This class *is not* thread-safe for graph construction. All operations should be created from a single thread, or external synchronization must be provided. Unless otherwise specified, all methods are not thread-safe. A `Graph` instance supports an arbitrary number of ""collections"" that are identified by name. For convenience when building a large graph, collections can store groups of related objects: for example, the `tf.Variable` uses a collection (named `tf.GraphKeys.GLOBAL_VARIABLES`) for all variables that are created during the construction of a graph. The caller may define additional collections by specifying a new name." 4126,enable_auto_cast_variables,tensorflow/tensorflow/python/framework/ops.py,5212,class,"Enables the autocasting of `AutoCastVariable`s. Under this context manager, `AutoCastVariable`s will be cast to `dtype` if `dtype` is floating-point. Otherwise, `AutoCastVariable`s will not be cast." 4127,device,tensorflow/tensorflow/python/framework/ops.py,5249,function,"Wrapper for `Graph.device()` using the default graph. See `tf.Graph.device` for more details. Args: device_name_or_function: The device name or function to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If eager execution is enabled and a function is passed in." 4128,device_v2,tensorflow/tensorflow/python/framework/ops.py,5285,function,"Specifies the device for ops created/executed in this context. This function specifies the device to be used for ops created/executed in a particular context. Nested contexts will inherit and also create/execute their ops on the specified device. If a specific device is not required, consider not using this function so that a device can be automatically assigned. In general the use of this function is optional. `device_name` can be fully specified, as in ""/job:worker/task:1/device:cpu:0"", or partially specified, containing only a subset of the ""/""-separated fields. Any fields which are specified will override device annotations from outer scopes. For example: ```python with tf.device('/job:foo'): # ops created here have devices with /job:foo with tf.device('/job:bar/task:0/device:gpu:2'): # ops created here have the fully specified device above with tf.device('/device:gpu:1'): # ops created here have the device '/job:foo/device:gpu:1' ``` Args: device_name: The device name to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If a function is passed in." 4129,container,tensorflow/tensorflow/python/framework/ops.py,5324,function,"Wrapper for `Graph.container()` using the default graph. Args: container_name: The container string to use in the context. Returns: A context manager that specifies the default container to use for newly created stateful ops." 4130,_colocate_with_for_gradient,tensorflow/tensorflow/python/framework/ops.py,5337,function, 4131,colocate_with,tensorflow/tensorflow/python/framework/ops.py,5360,function, 4132,_colocate_with,tensorflow/tensorflow/python/framework/ops.py,5367,function, 4133,control_dependencies,tensorflow/tensorflow/python/framework/ops.py,5372,function,"Wrapper for `Graph.control_dependencies()` using the default graph. See `tf.Graph.control_dependencies` for more details. Note: *In TensorFlow 2 with eager and/or Autograph, you should not require this method, as code executes in the expected order.* Only use `tf.control_dependencies` when working with v1-style code or in a graph context such as inside `Dataset.map`. When eager execution is enabled, any callable object in the `control_inputs` list will be called. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. If eager execution is enabled, any callable object in the `control_inputs` list will be called. Returns: A context manager that specifies control dependencies for all operations constructed within the context." 4134,_DefaultStack,tensorflow/tensorflow/python/framework/ops.py,5408,class,A thread-local stack of objects for providing implicit defaults. 4135,default_session,tensorflow/tensorflow/python/framework/ops.py,5455,function,"Python ""with"" handler for defining a default session. This function provides a means of registering a session for handling Tensor.eval() and Operation.run() calls. It is primarily intended for use by session.Session, but can be used with any object that implements the Session.run() interface. Use with the ""with"" keyword to specify that Tensor.eval() and Operation.run() invocations within the scope of a block should be executed by a particular session. The default session applies to the current thread only, so it is always possible to inspect the call stack and determine the scope of a default session. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a ""with ops.default_session(sess):"" block in that thread's function. Example: The following code examples are equivalent: # 1. Using the Session object directly: sess = ... c = tf.constant(5.0) sess.run(c) # 2. Using default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) result = c.eval() # 3. Overriding default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) with ops.default_session(...): c.eval(session=sess) Args: session: The session to be installed as the default session. Returns: A context manager for the default session." 4136,get_default_session,tensorflow/tensorflow/python/framework/ops.py,5504,function,"Returns the default session for the current thread. The returned `Session` will be the innermost session on which a `Session` or `Session.as_default()` context has been entered. NOTE: The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a `with sess.as_default():` in that thread's function. Returns: The default `Session` being used in the current thread." 4137,_eval_using_default_session,tensorflow/tensorflow/python/framework/ops.py,5521,function,"Uses the default session to evaluate one or more tensors. Args: tensors: A single Tensor, or a list of Tensor objects. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which the tensors are defined. session: (Optional) A different session to use to evaluate ""tensors"". Returns: Either a single numpy ndarray if ""tensors"" is a single tensor; or a list of numpy ndarrays that each correspond to the respective element in ""tensors"". Raises: ValueError: If no default session is available; the default session does not have ""graph"" as its graph; or if ""session"" is specified, and it does not have ""graph"" as its graph." 4138,_run_using_default_session,tensorflow/tensorflow/python/framework/ops.py,5561,function,"Uses the default session to run ""operation"". Args: operation: The Operation to be run. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which ""operation"" is defined. session: (Optional) A different session to use to run ""operation"". Raises: ValueError: If no default session is available; the default session does not have ""graph"" as its graph; or if ""session"" is specified, and it does not have ""graph"" as its graph." 4139,_DefaultGraphStack,tensorflow/tensorflow/python/framework/ops.py,5596,class,A thread-local stack of objects for providing an implicit default graph. 4140,_get_outer_context_and_inner_device_stack,tensorflow/tensorflow/python/framework/ops.py,5647,function,Get the outermost context not building a function. 4141,init_scope,tensorflow/tensorflow/python/framework/ops.py,5687,function,"A context manager that lifts ops out of control-flow scopes and function-building graphs. There is often a need to lift variable initialization ops out of control-flow scopes, function-building graphs, and gradient tapes. Entering an `init_scope` is a mechanism for satisfying these desiderata. In particular, entering an `init_scope` has three effects: (1) All control dependencies are cleared the moment the scope is entered; this is equivalent to entering the context manager returned from `control_dependencies(None)`, which has the side-effect of exiting control-flow scopes like `tf.cond` and `tf.while_loop`. (2) All operations that are created while the scope is active are lifted into the lowest context on the `context_stack` that is not building a graph function. Here, a context is defined as either a graph or an eager context. Every context switch, i.e., every installation of a graph as the default graph and every switch into eager mode, is logged in a thread-local stack called `context_switches`; the log entry for a context switch is popped from the stack when the context is exited. Entering an `init_scope` is equivalent to crawling up `context_switches`, finding the first context that is not building a graph function, and entering it. A caveat is that if graph mode is enabled but the default graph stack is empty, then entering an `init_scope` will simply install a fresh graph as the default one. (3) The gradient tape is paused while the scope is active. When eager execution is enabled, code inside an init_scope block runs with eager execution enabled even when tracing a `tf.function`. For example: ```python tf.compat.v1.enable_eager_execution() @tf.function def func(): # A function constructs TensorFlow graphs, # it does not execute eagerly. assert not tf.executing_eagerly() with tf.init_scope(): # Initialization runs with eager execution enabled assert tf.executing_eagerly() ``` Raises: RuntimeError: if graph state is incompatible with this initialization." 4142,executing_eagerly_outside_functions,tensorflow/tensorflow/python/framework/ops.py,5791,function,"Returns True if executing eagerly, even if inside a graph function. This function will check the outermost context for the program and see if it is in eager mode. It is useful comparing to `tf.executing_eagerly()`, which checks the current context and will return `False` within a `tf.function` body. It can be used to build library that behave differently in eager runtime and v1 session runtime (deprecated). Example: >>> tf.compat.v1.enable_eager_execution() >>> @tf.function ... def func(): ... # A function constructs TensorFlow graphs, it does not execute eagerly, ... # but the outer most context is still eager. ... assert not tf.executing_eagerly() ... return tf.compat.v1.executing_eagerly_outside_functions() >>> func() Returns: boolean, whether the outermost context is in eager mode." 4143,inside_function,tensorflow/tensorflow/python/framework/ops.py,5823,function, 4144,enable_eager_execution,tensorflow/tensorflow/python/framework/ops.py,5828,function,"Enables eager execution for the lifetime of this program. Eager execution provides an imperative interface to TensorFlow. With eager execution enabled, TensorFlow functions execute operations immediately (as opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`) and return concrete values (as opposed to symbolic references to a node in a computational graph). For example: ```python tf.compat.v1.enable_eager_execution() # After eager execution is enabled, operations are executed as they are # defined and Tensor objects hold concrete values, which can be accessed as # numpy.ndarray`s through the numpy() method. assert tf.multiply(6, 7).numpy() == 42 ``` Eager execution cannot be enabled after TensorFlow APIs have been used to create or execute graphs. It is typically recommended to invoke this function at program startup and not in a library (as most libraries should be usable both with and without eager execution). Args: config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the environment in which operations are executed. Note that `tf.compat.v1.ConfigProto` is also used to configure graph execution (via `tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto` are not implemented (or are irrelevant) when eager execution is enabled. device_policy: (Optional.) Policy controlling how operations requiring inputs on a specific device (e.g., a GPU 0) handle inputs on a different device (e.g. GPU 1 or CPU). When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not correct. - tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right device but logs a warning. - tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors. Note that this may hide performance problems as there is no notification provided when operations are blocked on the tensor being copied between devices. - tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors, raising errors on the other ones. execution_mode: (Optional.) Policy controlling how operations dispatched are actually executed. When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - tf.contrib.eager.SYNC: executes each operation synchronously. - tf.contrib.eager.ASYNC: executes each operation asynchronously. These operations may return ""non-ready"" handles. Raises: ValueError: If eager execution is enabled after creating/executing a TensorFlow graph, or if options provided conflict with a previous call to this function." 4145,disable_eager_execution,tensorflow/tensorflow/python/framework/ops.py,5900,function,"Disables eager execution. This function can only be called before any Graphs, Ops, or Tensors have been created. It can be used at the beginning of the program for complex migration projects from TensorFlow 1.x to 2.x." 4146,enable_eager_execution_internal,tensorflow/tensorflow/python/framework/ops.py,5914,function,"Enables eager execution for the lifetime of this program. Most of the doc string for enable_eager_execution is relevant here as well. Args: config: See enable_eager_execution doc string device_policy: See enable_eager_execution doc string execution_mode: See enable_eager_execution doc string server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on remote devices. GrpcServers need to be started by creating an identical server_def to this, and setting the appropriate task_indexes, so that the servers can communicate. It will then be possible to execute operations on remote devices. Raises: ValueError" 4147,eager_run,tensorflow/tensorflow/python/framework/ops.py,5987,function,"Runs the program with an optional main function and argv list. The program will run with eager execution enabled. Example: ```python import tensorflow as tf # Import subject to future changes: from tensorflow.contrib.eager.python import tfe def main(_): u = tf.constant(6.0) v = tf.constant(7.0) print(u * v) if __name__ == ""__main__"": tfe.run() ``` Args: main: the main function to run. argv: the arguments to pass to it." 4148,reset_default_graph,tensorflow/tensorflow/python/framework/ops.py,6016,function,"Clears the default graph stack and resets the global default graph. NOTE: The default graph is a property of the current thread. This function applies only to the current thread. Calling this function while a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will result in undefined behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects after calling this function will result in undefined behavior. Raises: AssertionError: If this function is called within a nested graph." 4149,get_default_graph,tensorflow/tensorflow/python/framework/ops.py,6036,function,"Returns the default graph for the current thread. The returned graph will be the innermost graph on which a `Graph.as_default()` context has been entered, or a global default graph if none has been explicitly created. NOTE: The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. Returns: The default `Graph` being used in the current thread." 4150,has_default_graph,tensorflow/tensorflow/python/framework/ops.py,6054,function,Returns True if there is a default graph. 4151,get_name_scope,tensorflow/tensorflow/python/framework/ops.py,6059,function,"Returns the current name scope in the default_graph. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope." 4152,_assert_same_graph,tensorflow/tensorflow/python/framework/ops.py,6079,function,"Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match." 4153,_get_graph_from_inputs,tensorflow/tensorflow/python/framework/ops.py,6097,function,"Returns the appropriate graph to use for the given inputs. This library method provides a consistent algorithm for choosing the graph in which an Operation should be constructed: 1. If the default graph is being used to construct a function, we use the default graph. 2. If the ""graph"" is specified explicitly, we validate that all of the inputs in ""op_input_list"" are compatible with that graph. 3. Otherwise, we attempt to select a graph from the first Operation- or Tensor-valued input in ""op_input_list"", and validate that all other such inputs are in the same graph. 4. If the graph was not specified and it could not be inferred from ""op_input_list"", we attempt to use the default graph. Args: op_input_list: A list of inputs to an operation, which may include `Tensor`, `Operation`, and other objects that may be converted to a graph element. graph: (Optional) The explicit graph to use. Raises: TypeError: If op_input_list is not a list or tuple, or if graph is not a Graph. ValueError: If a graph is explicitly passed and not all inputs are from it, or if the inputs are from multiple graphs, or we could not find a graph and there was no default graph. Returns: The appropriate graph to use for the given inputs." 4154,GraphKeys,tensorflow/tensorflow/python/framework/ops.py,6168,class,"Standard names to use for graph collections. The standard library uses various well-known names to collect and retrieve values associated with a graph. For example, the `tf.Optimizer` subclasses default to optimizing the variables collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is specified, but it is also possible to pass an explicit list of variables. The following standard keys are defined: * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared across distributed environment (model variables are subset of these). See `tf.compat.v1.global_variables` for more details. Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`, and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`. * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each machine. Usually used for temporarily variables, like counters. Note: use `tf.contrib.framework.local_variable` to add to this collection. * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the model for inference (feed forward). Note: use `tf.contrib.framework.model_variable` to add to this collection. * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will be trained by an optimizer. See `tf.compat.v1.trainable_variables` for more details. * `SUMMARIES`: the summary `Tensor` objects that have been created in the graph. See `tf.compat.v1.summary.merge_all` for more details. * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to produce input for a computation. See `tf.compat.v1.train.start_queue_runners` for more details. * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also keep moving averages. See `tf.compat.v1.moving_average_variables` for more details. * `REGULARIZATION_LOSSES`: regularization losses collected during graph construction. The following standard keys are _defined_, but their collections are **not** automatically populated as many of the others are: * `WEIGHTS` * `BIASES` * `ACTIVATIONS`" 4155,dismantle_graph,tensorflow/tensorflow/python/framework/ops.py,6313,function,"Cleans up reference cycles from a `Graph`. Helpful for making sure the garbage collector doesn't need to run after a temporary `Graph` is no longer needed. Args: graph: A `Graph` object to destroy. Neither it nor any of its ops are usable after this function runs." 4156,add_to_collection,tensorflow/tensorflow/python/framework/ops.py,6334,function,"Wrapper for `Graph.add_to_collection()` using the default graph. See `tf.Graph.add_to_collection` for more details. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. @compatibility(eager) Collections are only supported in eager when variables are created inside an EagerVariableStore (e.g. as part of a layer or template). @end_compatibility" 4157,add_to_collections,tensorflow/tensorflow/python/framework/ops.py,6354,function,"Wrapper for `Graph.add_to_collections()` using the default graph. See `tf.Graph.add_to_collections` for more details. Args: names: The key for the collections. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. @compatibility(eager) Collections are only supported in eager when variables are created inside an EagerVariableStore (e.g. as part of a layer or template). @end_compatibility" 4158,get_collection_ref,tensorflow/tensorflow/python/framework/ops.py,6374,function,"Wrapper for `Graph.get_collection_ref()` using the default graph. See `tf.Graph.get_collection_ref` for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. Note that this returns the collection list itself, which can be modified in place to change the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility" 4159,get_collection,tensorflow/tensorflow/python/framework/ops.py,6398,function,"Wrapper for `Graph.get_collection()` using the default graph. See `tf.Graph.get_collection` for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) If supplied, the resulting list is filtered to include only items whose `name` attribute matches using `re.match`. Items without a `name` attribute are never returned if a scope is supplied and the choice or `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility" 4160,get_all_collection_keys,tensorflow/tensorflow/python/framework/ops.py,6426,function,Returns a list of collections used in the default graph. 4161,name_scope,tensorflow/tensorflow/python/framework/ops.py,6431,function,"Internal-only entry point for `name_scope*`. Internal ops do not use the public API and instead rely on `ops.name_scope` regardless of the execution mode. This function dispatches to the correct `name_scope*` implementation based on the arguments provided and the current mode. Specifically, * if `values` contains a graph tensor `Graph.name_scope` is used; * `name_scope_v1` is used in graph mode; * `name_scope_v2` -- in eager mode. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. skip_on_eager: Indicates to return NullContextmanager if executing eagerly. By default this is True since naming tensors and operations in eager mode have little use and cause unnecessary performance overhead. However, it is important to preserve variable names since they are often useful for debugging and saved models. Returns: `name_scope*` context manager." 4162,internal_name_scope_v1,tensorflow/tensorflow/python/framework/ops.py,6476,class,Graph-only version of `name_scope_v1`. 4163,name_scope_v1,tensorflow/tensorflow/python/framework/ops.py,6552,class,"A context manager for use when defining a Python op. This context manager validates that the given `values` are from the same graph, makes that graph the default graph, and pushes a name scope in that graph (see `tf.Graph.name_scope` for more details on that). For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope(name, ""MyOp"", [a, b, c]) as scope: a = tf.convert_to_tensor(a, name=""a"") b = tf.convert_to_tensor(b, name=""b"") c = tf.convert_to_tensor(c, name=""c"") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ```" 4164,name_scope_v2,tensorflow/tensorflow/python/framework/ops.py,6603,class,"A context manager for use when defining a Python op. This context manager pushes a name scope, which will make the name of all operations added within it have a prefix. For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope(""MyOp"") as scope: a = tf.convert_to_tensor(a, name=""a"") b = tf.convert_to_tensor(b, name=""b"") c = tf.convert_to_tensor(c, name=""c"") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ``` When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`, and `MyOp/c`. Inside a `tf.function`, if the scope name already exists, the name will be made unique by appending `_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`, etc." 4165,strip_name_scope,tensorflow/tensorflow/python/framework/ops.py,6695,function,"Removes name scope from a name. Args: name: A `string` name. export_scope: Optional `string`. Name scope to remove. Returns: Name with name scope removed, or the original name if export_scope is None." 4166,prepend_name_scope,tensorflow/tensorflow/python/framework/ops.py,6723,function,"Prepends name scope to a name. Args: name: A `string` name. import_scope: Optional `string`. Name scope to add. Returns: Name with name scope added, or the original name if import_scope is None." 4167,op_scope,tensorflow/tensorflow/python/framework/ops.py,6754,function,"DEPRECATED. Same as name_scope above, just different argument order." 4168,register_proto_function,tensorflow/tensorflow/python/framework/ops.py,6765,function,"Registers `to_proto` and `from_proto` functions for collection_name. `to_proto` function converts a Python object to the corresponding protocol buffer, and returns the protocol buffer. `from_proto` function converts protocol buffer into a Python object, and returns the object.. Args: collection_name: Name of the collection. proto_type: Protobuf type, such as `saver_pb2.SaverDef`, `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`.. to_proto: Function that implements Python object to protobuf conversion. from_proto: Function that implements protobuf to Python object conversion." 4169,get_collection_proto_type,tensorflow/tensorflow/python/framework/ops.py,6793,function,Returns the proto_type for collection_name. 4170,get_to_proto_function,tensorflow/tensorflow/python/framework/ops.py,6801,function,Returns the to_proto function for collection_name. 4171,get_from_proto_function,tensorflow/tensorflow/python/framework/ops.py,6809,function,Returns the from_proto function for collection_name. 4172,_operation_conversion_error,tensorflow/tensorflow/python/framework/ops.py,6817,function,Produce a nice error if someone converts an Operation to a Tensor. 4173,_op_to_colocate_with,tensorflow/tensorflow/python/framework/ops.py,6824,function,Operation object corresponding to v to use for colocation constraints. 4174,_is_keras_symbolic_tensor,tensorflow/tensorflow/python/framework/ops.py,6853,function, 4175,to_raw_op,tensorflow/tensorflow/python/framework/ops.py,6881,function,"Make a given op wrapper function `f` raw. Raw op wrappers can only be called with keyword arguments. Args: f: An op wrapper function to make raw. Returns: Raw `f`." 4176,raise_from_not_ok_status,tensorflow/tensorflow/python/framework/ops.py,6899,function, 4177,add_exit_callback_to_default_func_graph,tensorflow/tensorflow/python/framework/ops.py,6906,function,"Add a callback to run when the default function graph goes out of scope. Usage: ```python @tf.function def fn(x, v): expensive = expensive_object(v) add_exit_callback_to_default_func_graph(lambda: expensive.release()) return g(x, expensive) fn(x=tf.constant(...), v=...) # `expensive` has been released. ``` Args: fn: A callable that takes no arguments and whose output is ignored. To be executed when exiting func graph scope. Raises: RuntimeError: If executed when the current default graph is not a FuncGraph, or not currently executing in function creation mode (e.g., if inside an init_scope)." 4178,_reconstruct_sequence_inputs,tensorflow/tensorflow/python/framework/ops.py,6939,function,"Regroups a flat list of input tensors into scalar and sequence inputs. Args: op_def: The `op_def_pb2.OpDef` (for knowing the input types) inputs: a list of input `Tensor`s to the op. attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define how long each sequence is) Returns: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs)." 4179,_TensorIterator,tensorflow/tensorflow/python/framework/ops.py,6975,class,Iterates over the leading dim of a Tensor. Performs no error checks. 4180,set_int_list_attr,tensorflow/tensorflow/python/framework/ops.py,6998,function,TF internal method used to set a list(int) attribute in the node_def. 4181,OpsEnableAndDisableEagerTest,tensorflow/tensorflow/python/framework/ops_enable_eager_test.py,26,class, 4182,ResourceTest,tensorflow/tensorflow/python/framework/ops_test.py,67,class, 4183,TensorAndShapeTest,tensorflow/tensorflow/python/framework/ops_test.py,95,class, 4184,IndexedSlicesTest,tensorflow/tensorflow/python/framework/ops_test.py,464,class, 4185,IndexedSlicesSpecTest,tensorflow/tensorflow/python/framework/ops_test.py,509,class, 4186,NodeDefConstructorTest,tensorflow/tensorflow/python/framework/ops_test.py,638,class, 4187,_apply_op,tensorflow/tensorflow/python/framework/ops_test.py,645,function, 4188,OperationTest,tensorflow/tensorflow/python/framework/ops_test.py,654,class, 4189,CreateOpTest,tensorflow/tensorflow/python/framework/ops_test.py,1128,class, 4190,CreateOpFromTFOperationTest,tensorflow/tensorflow/python/framework/ops_test.py,1192,class, 4191,ApplyOpTest,tensorflow/tensorflow/python/framework/ops_test.py,1353,class, 4192,NameStackTest,tensorflow/tensorflow/python/framework/ops_test.py,1406,class, 4193,NameTest,tensorflow/tensorflow/python/framework/ops_test.py,1526,class, 4194,DeviceTest,tensorflow/tensorflow/python/framework/ops_test.py,1596,class, 4195,MultithreadedGraphStateTest,tensorflow/tensorflow/python/framework/ops_test.py,1855,class, 4196,ObjectWithName,tensorflow/tensorflow/python/framework/ops_test.py,2057,class, 4197,CollectionTest,tensorflow/tensorflow/python/framework/ops_test.py,2067,class, 4198,_CopyGrad,tensorflow/tensorflow/python/framework/ops_test.py,2209,function, 4199,_CopyOverrideGrad,tensorflow/tensorflow/python/framework/ops_test.py,2215,function, 4200,RegistrationTest,tensorflow/tensorflow/python/framework/ops_test.py,2220,class, 4201,ComparisonTest,tensorflow/tensorflow/python/framework/ops_test.py,2248,class, 4202,ControlDependenciesTest,tensorflow/tensorflow/python/framework/ops_test.py,2260,class, 4203,OpScopeTest,tensorflow/tensorflow/python/framework/ops_test.py,2467,class, 4204,InitScopeTest,tensorflow/tensorflow/python/framework/ops_test.py,2605,class, 4205,GraphTest,tensorflow/tensorflow/python/framework/ops_test.py,2907,class, 4206,AttrScopeTest,tensorflow/tensorflow/python/framework/ops_test.py,3027,class, 4207,KernelLabelTest,tensorflow/tensorflow/python/framework/ops_test.py,3076,class, 4208,AsGraphDefTest,tensorflow/tensorflow/python/framework/ops_test.py,3109,class, 4209,_calc_a_forward_flops,tensorflow/tensorflow/python/framework/ops_test.py,3171,function, 4210,StatisticsTest,tensorflow/tensorflow/python/framework/ops_test.py,3175,class, 4211,DeviceStackTest,tensorflow/tensorflow/python/framework/ops_test.py,3199,class, 4212,ColocationGroupTest,tensorflow/tensorflow/python/framework/ops_test.py,3250,class, 4213,DeprecatedTest,tensorflow/tensorflow/python/framework/ops_test.py,3409,class, 4214,NameScopeTest,tensorflow/tensorflow/python/framework/ops_test.py,3429,class, 4215,EnableEagerExecutionTest,tensorflow/tensorflow/python/framework/ops_test.py,3479,class, 4216,_TupleTensor,tensorflow/tensorflow/python/framework/ops_test.py,3493,class,`Tensor`-like `tuple`-like for custom `Tensor` conversion masquerading. 4217,_TupleTensorSpec,tensorflow/tensorflow/python/framework/ops_test.py,3514,class, 4218,_MyTuple,tensorflow/tensorflow/python/framework/ops_test.py,3532,class,Pretend user-side class for `ConvertToCompositeTensorTest . 4219,CustomConvertToCompositeTensorTest,tensorflow/tensorflow/python/framework/ops_test.py,3553,class, 4220,PackEagerTensorTest,tensorflow/tensorflow/python/framework/ops_test.py,3570,class, 4221,ProtoTest,tensorflow/tensorflow/python/framework/proto_test.py,28,class, 4222,_get_typename,tensorflow/tensorflow/python/framework/python_memory_checker.py,33,function,Return human readable pretty type name string. 4223,_create_python_object_snapshot,tensorflow/tensorflow/python/framework/python_memory_checker.py,44,function, 4224,_snapshot_diff,tensorflow/tensorflow/python/framework/python_memory_checker.py,53,function, 4225,_PythonMemoryChecker,tensorflow/tensorflow/python/framework/python_memory_checker.py,64,class,Python memory leak detection class. 4226,_truncate_seed,tensorflow/tensorflow/python/framework/random_seed.py,37,function, 4227,get_seed,tensorflow/tensorflow/python/framework/random_seed.py,43,function,"Returns the local seeds an operation should use given an op-specific seed. Given operation-specific seed, `op_seed`, this helper function returns two seeds derived from graph-level and op-level seeds. Many random operations internally use the two seeds to allow user to change the seed globally for a graph, or for only specific operations. For details on how the graph-level seed interacts with op seeds, see `tf.compat.v1.random.set_random_seed`. Args: op_seed: integer. Returns: A tuple of two integers that should be used for the local seed of this operation." 4228,set_random_seed,tensorflow/tensorflow/python/framework/random_seed.py,93,function,"Sets the graph-level random seed for the default graph. Operations that rely on a random seed actually derive it from two seeds: the graph-level and operation-level seeds. This sets the graph-level seed. Its interactions with operation-level seeds is as follows: 1. If neither the graph-level nor the operation seed is set: A random seed is used for this op. 2. If the graph-level seed is set, but the operation seed is not: The system deterministically picks an operation seed in conjunction with the graph-level seed so that it gets a unique random sequence. Within the same version of tensorflow and user code, this sequence is deterministic. However across different versions, this sequence might change. If the code depends on particular seeds to work, specify both graph-level and operation-level seeds explicitly. 3. If the graph-level seed is not set, but the operation seed is set: A default graph-level seed and the specified operation seed are used to determine the random sequence. 4. If both the graph-level and the operation seed are set: Both seeds are used in conjunction to determine the random sequence. To illustrate the user-visible effects, consider these examples: To generate different sequences across sessions, set neither graph-level nor op-level seeds: ```python a = tf.random.uniform([1]) b = tf.random.normal([1]) print(""Session 1"") with tf.compat.v1.Session() as sess1: print(sess1.run(a)) # generates 'A1' print(sess1.run(a)) # generates 'A2' print(sess1.run(b)) # generates 'B1' print(sess1.run(b)) # generates 'B2' print(""Session 2"") with tf.compat.v1.Session() as sess2: print(sess2.run(a)) # generates 'A3' print(sess2.run(a)) # generates 'A4' print(sess2.run(b)) # generates 'B3' print(sess2.run(b)) # generates 'B4' ``` To generate the same repeatable sequence for an op across sessions, set the seed for the op: ```python a = tf.random.uniform([1], seed=1) b = tf.random.normal([1]) # Repeatedly running this block with the same graph will generate the same # sequence of values for 'a', but different sequences of values for 'b'. print(""Session 1"") with tf.compat.v1.Session() as sess1: print(sess1.run(a)) # generates 'A1' print(sess1.run(a)) # generates 'A2' print(sess1.run(b)) # generates 'B1' print(sess1.run(b)) # generates 'B2' print(""Session 2"") with tf.compat.v1.Session() as sess2: print(sess2.run(a)) # generates 'A1' print(sess2.run(a)) # generates 'A2' print(sess2.run(b)) # generates 'B3' print(sess2.run(b)) # generates 'B4' ``` To make the random sequences generated by all ops be repeatable across sessions, set a graph-level seed: ```python tf.compat.v1.random.set_random_seed(1234) a = tf.random.uniform([1]) b = tf.random.normal([1]) # Repeatedly running this block with the same graph will generate the same # sequences of 'a' and 'b'. print(""Session 1"") with tf.compat.v1.Session() as sess1: print(sess1.run(a)) # generates 'A1' print(sess1.run(a)) # generates 'A2' print(sess1.run(b)) # generates 'B1' print(sess1.run(b)) # generates 'B2' print(""Session 2"") with tf.compat.v1.Session() as sess2: print(sess2.run(a)) # generates 'A1' print(sess2.run(a)) # generates 'A2' print(sess2.run(b)) # generates 'B1' print(sess2.run(b)) # generates 'B2' ``` Args: seed: integer." 4229,set_seed,tensorflow/tensorflow/python/framework/random_seed.py,199,function,"Sets the global random seed. Operations that rely on a random seed actually derive it from two seeds: the global and operation-level seeds. This sets the global seed. Its interactions with operation-level seeds is as follows: 1. If neither the global seed nor the operation seed is set: A randomly picked seed is used for this op. 2. If the graph-level seed is set, but the operation seed is not: The system deterministically picks an operation seed in conjunction with the graph-level seed so that it gets a unique random sequence. Within the same version of tensorflow and user code, this sequence is deterministic. However across different versions, this sequence might change. If the code depends on particular seeds to work, specify both graph-level and operation-level seeds explicitly. 3. If the operation seed is set, but the global seed is not set: A default global seed and the specified operation seed are used to determine the random sequence. 4. If both the global and the operation seed are set: Both seeds are used in conjunction to determine the random sequence. To illustrate the user-visible effects, consider these examples: If neither the global seed nor the operation seed is set, we get different results for every call to the random op and every re-run of the program: ```python print(tf.random.uniform([1])) # generates 'A1' print(tf.random.uniform([1])) # generates 'A2' ``` (now close the program and run it again) ```python print(tf.random.uniform([1])) # generates 'A3' print(tf.random.uniform([1])) # generates 'A4' ``` If the global seed is set but the operation seed is not set, we get different results for every call to the random op, but the same sequence for every re-run of the program: ```python tf.random.set_seed(1234) print(tf.random.uniform([1])) # generates 'A1' print(tf.random.uniform([1])) # generates 'A2' ``` (now close the program and run it again) ```python tf.random.set_seed(1234) print(tf.random.uniform([1])) # generates 'A1' print(tf.random.uniform([1])) # generates 'A2' ``` The reason we get 'A2' instead 'A1' on the second call of `tf.random.uniform` above is because the second call uses a different operation seed. Note that `tf.function` acts like a re-run of a program in this case. When the global seed is set but operation seeds are not set, the sequence of random numbers are the same for each `tf.function`. For example: ```python tf.random.set_seed(1234) @tf.function def f(): a = tf.random.uniform([1]) b = tf.random.uniform([1]) return a, b @tf.function def g(): a = tf.random.uniform([1]) b = tf.random.uniform([1]) return a, b print(f()) # prints '(A1, A2)' print(g()) # prints '(A1, A2)' ``` If the operation seed is set, we get different results for every call to the random op, but the same sequence for every re-run of the program: ```python print(tf.random.uniform([1], seed=1)) # generates 'A1' print(tf.random.uniform([1], seed=1)) # generates 'A2' ``` (now close the program and run it again) ```python print(tf.random.uniform([1], seed=1)) # generates 'A1' print(tf.random.uniform([1], seed=1)) # generates 'A2' ``` The reason we get 'A2' instead 'A1' on the second call of `tf.random.uniform` above is because the same `tf.random.uniform` kernel (i.e. internal representation) is used by TensorFlow for all calls of it with the same arguments, and the kernel maintains an internal counter which is incremented every time it is executed, generating different results. Calling `tf.random.set_seed` will reset any such counters: ```python tf.random.set_seed(1234) print(tf.random.uniform([1], seed=1)) # generates 'A1' print(tf.random.uniform([1], seed=1)) # generates 'A2' tf.random.set_seed(1234) print(tf.random.uniform([1], seed=1)) # generates 'A1' print(tf.random.uniform([1], seed=1)) # generates 'A2' ``` When multiple identical random ops are wrapped in a `tf.function`, their behaviors change because the ops no long share the same counter. For example: ```python @tf.function def foo(): a = tf.random.uniform([1], seed=1) b = tf.random.uniform([1], seed=1) return a, b print(foo()) # prints '(A1, A1)' print(foo()) # prints '(A2, A2)' @tf.function def bar(): a = tf.random.uniform([1]) b = tf.random.uniform([1]) return a, b print(bar()) # prints '(A1, A2)' print(bar()) # prints '(A3, A4)' ``` The second call of `foo` returns '(A2, A2)' instead of '(A1, A1)' because `tf.random.uniform` maintains an internal counter. If you want `foo` to return '(A1, A1)' every time, use the stateless random ops such as `tf.random.stateless_uniform`. Also see `tf.random.experimental.Generator` for a new set of stateful random ops that use external variables to manage their states. Args: seed: integer." 4230,RandomSeedTest,tensorflow/tensorflow/python/framework/random_seed_test.py,27,class, 4231,Registry,tensorflow/tensorflow/python/framework/registry.py,36,class,Provides a registry for saving objects. 4232,bar,tensorflow/tensorflow/python/framework/registry_test.py,28,function, 4233,RegistryTest,tensorflow/tensorflow/python/framework/registry_test.py,32,class, 4234,smart_cond,tensorflow/tensorflow/python/framework/smart_cond.py,27,function,"Return either `true_fn()` if predicate `pred` is true else `false_fn()`. If `pred` is a bool or has a constant value, we return either `true_fn()` or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. Arguments: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using `tf.cond`. Returns: Tensors returned by the call to either `true_fn` or `false_fn`. Raises: TypeError: If `true_fn` or `false_fn` is not callable." 4235,smart_constant_value,tensorflow/tensorflow/python/framework/smart_cond.py,62,function,"Return the bool value for `pred`, or None if `pred` had a dynamic value. Arguments: pred: A scalar, either a Python bool or tensor. Returns: True or False if `pred` has a constant boolean value, None otherwise. Raises: TypeError: If `pred` is not a Tensor or bool." 4236,smart_case,tensorflow/tensorflow/python/framework/smart_cond.py,93,function,"Like tf.case, except attempts to statically evaluate predicates. If any predicate in `pred_fn_pairs` is a bool or has a constant value, the associated callable will be called or omitted depending on its value. Otherwise this functions like tf.case. Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. name: A name for this operation (optional). Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by `default` if none does. Raises: TypeError: If `pred_fn_pairs` is not a list/dictionary. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable." 4237,raise_exception,tensorflow/tensorflow/python/framework/smart_cond_test.py,32,function, 4238,SmartCondTest,tensorflow/tensorflow/python/framework/smart_cond_test.py,36,class, 4239,SmartCaseTest,tensorflow/tensorflow/python/framework/smart_cond_test.py,102,class, 4240,SmartConstantValueTest,tensorflow/tensorflow/python/framework/smart_cond_test.py,145,class, 4241,SparseTensor,tensorflow/tensorflow/python/framework/sparse_tensor.py,47,class,"Represents a sparse tensor. TensorFlow represents a sparse tensor as three separate dense tensors: `indices`, `values`, and `dense_shape`. In Python, the three tensors are collected into a `SparseTensor` class for ease of use. If you have separate `indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor` object before passing to the ops below. Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)` comprises the following components, where `N` and `ndims` are the number of values and number of dimensions in the `SparseTensor`, respectively: * `indices`: A 2-D int64 tensor of shape `[N, ndims]`, which specifies the indices of the elements in the sparse tensor that contain nonzero values (elements are zero-indexed). For example, `indices=[[1,3], [2,4]]` specifies that the elements with indexes of [1,3] and [2,4] have nonzero values. * `values`: A 1-D tensor of any type and shape `[N]`, which supplies the values for each element in `indices`. For example, given `indices=[[1,3], [2,4]]`, the parameter `values=[18, 3.6]` specifies that element [1,3] of the sparse tensor has a value of 18, and element [2,4] of the tensor has a value of 3.6. * `dense_shape`: A 1-D int64 tensor of shape `[ndims]`, which specifies the dense_shape of the sparse tensor. Takes a list indicating the number of elements in each dimension. For example, `dense_shape=[3,6]` specifies a two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a one-dimensional tensor with 9 elements. The corresponding dense tensor satisfies: ```python dense.shape = dense_shape dense[tuple(indices[i])] = values[i] ``` By convention, `indices` should be sorted in row-major order (or equivalently lexicographic order on the tuples `indices[i]`). This is not enforced when `SparseTensor` objects are constructed, but most ops assume correct ordering. If the ordering of sparse tensor `st` is wrong, a fixed version can be obtained by calling `tf.sparse.reorder(st)`. Example: The sparse tensor ```python SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) ``` represents the dense tensor ```python [[1, 0, 0, 0] [0, 0, 2, 0] [0, 0, 0, 0]] ```" 4242,SparseTensorSpec,tensorflow/tensorflow/python/framework/sparse_tensor.py,270,class,Type specification for a `tf.sparse.SparseTensor`. 4243,convert_to_tensor_or_sparse_tensor,tensorflow/tensorflow/python/framework/sparse_tensor.py,415,function,"Converts value to a `SparseTensor` or `Tensor`. Args: value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. Returns: A `SparseTensor` or `Tensor` based on `value`. Raises: RuntimeError: If result type is incompatible with `dtype`." 4244,is_sparse,tensorflow/tensorflow/python/framework/sparse_tensor.py,443,function,"Check whether `x` is sparse. Check whether an object is a `tf.sparse.SparseTensor` or `tf.compat.v1.SparseTensorValue`. Args: x: A python object to check. Returns: `True` iff `x` is a `tf.sparse.SparseTensor` or `tf.compat.v1.SparseTensorValue`." 4245,SparseTensorTest,tensorflow/tensorflow/python/framework/sparse_tensor_test.py,38,class, 4246,ConvertToTensorOrSparseTensorTest,tensorflow/tensorflow/python/framework/sparse_tensor_test.py,101,class, 4247,SparseTensorShapeTest,tensorflow/tensorflow/python/framework/sparse_tensor_test.py,129,class, 4248,SparseTensorSpecTest,tensorflow/tensorflow/python/framework/sparse_tensor_test.py,208,class, 4249,_recursive_apply,tensorflow/tensorflow/python/framework/subscribe.py,30,function,"Helper method to recursively apply a function to structure of tensors. The structure of the tensors should take the form similar to fetches in `tf.compat.v1.Session` and includes single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. Args: tensors: Single `Tensor`, `list`, nested `list, `tuple`, `namedtuple`, or `dict`. apply_fn: Function to apply to each `Tensor` and should return a `Tensor`. Returns: Returns the modified tensors with the same structure. Raises: `TypeError` if undefined type in the tensors structure." 4250,_ControlOutputCache,tensorflow/tensorflow/python/framework/subscribe.py,67,class,Helper class to manage calculating and caching control_outputs in graph. 4251,_subscribe_new,tensorflow/tensorflow/python/framework/subscribe.py,109,function,"Helper method that subscribes a single tensor to a list of side_effects. Args: tensor: `tf.Tensor` side_effects: List of side_effect functions see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects." 4252,_subscribe_extend,tensorflow/tensorflow/python/framework/subscribe.py,156,function,"Helper method to extend the list of side_effects for a subscribed tensor. Args: tensor: A `tf.Tensor` as returned by subscribe(). side_effects: List of side_effect functions, see subscribe for details. Returns: The given subscribed tensor (for API consistency)." 4253,_is_subscribed_identity,tensorflow/tensorflow/python/framework/subscribe.py,184,function,"Checks if the given tensor is an identity op returned by `subscribe()`. Args: tensor: A `tf.Tensor` to check. Returns: True if the given tensor matches the criteria for subscription identities: its op type is `Identity`, its name matches the name of its input and conforms to the convention for subscribed nodes. False otherwise." 4254,_subscribe,tensorflow/tensorflow/python/framework/subscribe.py,218,function,"Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to `subscribe()` and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed." 4255,_preserve_control_flow_context,tensorflow/tensorflow/python/framework/subscribe.py,261,function,"Preserve the control flow context for the given tensor. Sets the graph context to the tensor's context so that side effect ops are added under the same context. This is needed when subscribing to tensors defined within a conditional block or a while loop. In these cases we need that the side-effect ops are created within the same control flow context as that of the tensor they are attached to. Args: tensor: tensor whose context should be preserved. Yields: None" 4256,_scoped_subscribe,tensorflow/tensorflow/python/framework/subscribe.py,291,function,"Helper method that subscribes a single tensor to a list of side_effects. This is a thin wrapper around `_subscribe` and ensures that the side effect ops are added within the same device and control flow context of the subscribed tensor. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed." 4257,subscribe,tensorflow/tensorflow/python/framework/subscribe.py,313,function,"Subscribe to a tensor. This method will attach side effect graphs to a given set of tensors. Set of tensors follows from session.run and supports single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It returns the tensors in the same passed in structure, but as clones with side effects applied. The supplied side effect graphs are specified as a constructor function which takes the target tensor and constructs a side effect graph and returns a list of ops that should be control dependencies on fetching the tensor. It will append 'subscription' to the name scope of the tensor for every node in the side effect graph. These control dependencies are what trigger the side effects. Subscribe will construct the additions to your graph and return the created identity tensor downstream of the control dependencies. Use these tensors as you would normally in the rest of your tensorflow code. If a given tensor has already been subscribed or a tensor returned by a call to subscribe is passed, the previously created identity tensor will be reused and the side effect graphs will be added to the existing ones. Args: tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format follows from `Session.run` and supports single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and return a nonempty list of control dependencies. This can be a single function or list of functions. Returns: Subscribed tensors, which are identity copies of the passed in tensors in the same passed in structure, but the graph has been modified such that these are downstream of the control dependencies for the side effect graphs. Use these functionally equivalent tensors instead of the passed in tensors for further construction or running." 4258,SubscribeTest,tensorflow/tensorflow/python/framework/subscribe_test.py,39,class, 4259,_default_conversion_function,tensorflow/tensorflow/python/framework/tensor_conversion_registry.py,50,function, 4260,register_tensor_conversion_function,tensorflow/tensorflow/python/framework/tensor_conversion_registry.py,57,function,"Registers a function for converting objects of `base_type` to `Tensor`. The conversion function must have the following signature: ```python def conversion_func(value, dtype=None, name=None, as_ref=False): # ... ``` It must return a `Tensor` with the given `dtype` if specified. If the conversion function creates a new `Tensor`, it should use the given `name` if specified. All exceptions will be propagated to the caller. The conversion function may return `NotImplemented` for some inputs. In this case, the conversion process will continue to try subsequent conversion functions. If `as_ref` is true, the function must return a `Tensor` reference, such as a `Variable`. NOTE: The conversion functions will execute in order of priority, followed by order of registration. To ensure that a conversion function `F` runs before another conversion function `G`, ensure that `F` is registered with a smaller priority than `G`. Args: base_type: The base type or tuple of base types for all objects that `conversion_func` accepts. conversion_func: A function that converts instances of `base_type` to `Tensor`. priority: Optional integer that indicates the priority for applying this conversion function. Conversion functions with smaller priority values run earlier than conversion functions with larger priority values. Defaults to 100. Raises: TypeError: If the arguments do not have the appropriate type." 4261,get,tensorflow/tensorflow/python/framework/tensor_conversion_registry.py,114,function,"Get conversion function for objects of `cls`. Args: query: The type to query for. Returns: A list of conversion functions in increasing order of priority." 4262,enable_v2_tensorshape,tensorflow/tensorflow/python/framework/tensor_shape.py,35,function,"In TensorFlow 2.0, iterating over a TensorShape instance returns values. This enables the new behavior. Concretely, `tensor_shape[i]` returned a Dimension instance in V1, but it V2 it returns either an integer, or None. Examples: ``` ####################### # If you had this in V1: value = tensor_shape[i].value # Do this in V2 instead: value = tensor_shape[i] ####################### # If you had this in V1: for dim in tensor_shape: value = dim.value print(value) # Do this in V2 instead: for value in tensor_shape: print(value) ####################### # If you had this in V1: dim = tensor_shape[i] dim.assert_is_compatible_with(other_shape) # or using any other shape method # Do this in V2 instead: if tensor_shape.rank is None: dim = Dimension(None) else: dim = tensor_shape.dims[i] dim.assert_is_compatible_with(other_shape) # or using any other shape method # The V2 suggestion above is more explicit, which will save you from # the following trap (present in V1): # you might do in-place modifications to `dim` and expect them to be reflected # in `tensor_shape[i]`, but they would not be. ```" 4263,disable_v2_tensorshape,tensorflow/tensorflow/python/framework/tensor_shape.py,87,function,"Disables the V2 TensorShape behavior and reverts to V1 behavior. See docstring for `enable_v2_tensorshape` for details about the new behavior." 4264,dimension_value,tensorflow/tensorflow/python/framework/tensor_shape.py,99,function,"Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to coexist with the new behavior. This utility is a bridge between the two. When accessing the value of a TensorShape dimension, use this utility, like this: ``` # If you had this in your V1 code: value = tensor_shape[i].value # Use `dimension_value` as direct replacement compatible with both V1 & V2: value = dimension_value(tensor_shape[i]) # This would be the V2 equivalent: value = tensor_shape[i] # Warning: this will return the dim value in V2! ``` Arguments: dimension: Either a `Dimension` instance, an integer, or None. Returns: A plain value, i.e. an integer or None." 4265,dimension_at_index,tensorflow/tensorflow/python/framework/tensor_shape.py,133,function,"Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to coexist with the new behavior. This utility is a bridge between the two. If you want to retrieve the Dimension instance corresponding to a certain index in a TensorShape instance, use this utility, like this: ``` # If you had this in your V1 code: dim = tensor_shape[i] # Use `dimension_at_index` as direct replacement compatible with both V1 & V2: dim = dimension_at_index(tensor_shape, i) # Another possibility would be this, but WARNING: it only works if the # tensor_shape instance has a defined rank. dim = tensor_shape.dims[i] # `dims` may be None if the rank is undefined! # In native V2 code, we recommend instead being more explicit: if tensor_shape.rank is None: dim = Dimension(None) else: dim = tensor_shape.dims[i] # Being more explicit will save you from the following trap (present in V1): # you might do in-place modifications to `dim` and expect them to be reflected # in `tensor_shape[i]`, but they would not be (as the Dimension object was # instantiated on the fly. ``` Arguments: shape: A TensorShape instance. index: An integer index. Returns: A dimension object." 4266,Dimension,tensorflow/tensorflow/python/framework/tensor_shape.py,180,class,Represents the value of one dimension in a TensorShape. 4267,as_dimension,tensorflow/tensorflow/python/framework/tensor_shape.py,704,function,"Converts the given value to a Dimension. A Dimension input will be returned unmodified. An input of `None` will be converted to an unknown Dimension. An integer input will be converted to a Dimension with that value. Args: value: The value to be converted. Returns: A Dimension corresponding to the given value." 4268,TensorShape,tensorflow/tensorflow/python/framework/tensor_shape.py,724,class,"Represents the shape of a `Tensor`. A `TensorShape` represents a possibly-partial shape specification for a `Tensor`. It may be one of the following: * *Fully-known shape:* has a known number of dimensions and a known size for each dimension. e.g. `TensorShape([16, 256])` * *Partially-known shape:* has a known number of dimensions, and an unknown size for one or more dimension. e.g. `TensorShape([None, 256])` * *Unknown shape:* has an unknown number of dimensions, and an unknown size in all dimensions. e.g. `TensorShape(None)` If a tensor is produced by an operation of type `""Foo""`, its shape may be inferred if there is a registered shape function for `""Foo""`. See [Shape functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c) for details of shape functions and how to register them. Alternatively, the shape may be set explicitly using `tf.Tensor.set_shape`." 4269,as_shape,tensorflow/tensorflow/python/framework/tensor_shape.py,1230,function,Converts the given object to a TensorShape. 4270,unknown_shape,tensorflow/tensorflow/python/framework/tensor_shape.py,1238,function,"Returns an unknown TensorShape, optionally with a known rank. Args: rank: (Optional) If specified, the number of dimensions in the shape. **kwargs: For backwards compatibility. Returns: An unknown TensorShape. Raises: TypeError: In case of invalid arguments." 4271,DimensionDivTest,tensorflow/tensorflow/python/framework/tensor_shape_div_test.py,28,class, 4272,DimensionTest,tensorflow/tensorflow/python/framework/tensor_shape_test.py,30,class, 4273,ShapeTest,tensorflow/tensorflow/python/framework/tensor_shape_test.py,232,class, 4274,DenseSpec,tensorflow/tensorflow/python/framework/tensor_spec.py,32,class,"Describes a dense object with shape, dtype, and name." 4275,TensorSpec,tensorflow/tensorflow/python/framework/tensor_spec.py,121,class,"Describes a tf.Tensor. Metadata for describing the `tf.Tensor` objects accepted or returned by some TensorFlow APIs." 4276,BoundedTensorSpec,tensorflow/tensorflow/python/framework/tensor_spec.py,197,class,"A `TensorSpec` that specifies minimum and maximum values. Example usage: ```python spec = tensor_spec.BoundedTensorSpec((1, 2, 3), tf.float32, 0, (5, 5, 5)) tf_minimum = tf.convert_to_tensor(spec.minimum, dtype=spec.dtype) tf_maximum = tf.convert_to_tensor(spec.maximum, dtype=spec.dtype) ``` Bounds are meant to be inclusive. This is especially important for integer types. The following spec will be satisfied by tensors with values in the set {0, 1, 2}: ```python spec = tensor_spec.BoundedTensorSpec((3, 5), tf.int32, 0, 2) ```" 4277,TensorSpecTest,tensorflow/tensorflow/python/framework/tensor_spec_test.py,36,class, 4278,BoundedTensorSpecTest,tensorflow/tensorflow/python/framework/tensor_spec_test.py,172,class, 4279,ExtractBitsFromFloat16,tensorflow/tensorflow/python/framework/tensor_util.py,46,function, 4280,SlowAppendFloat16ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,50,function, 4281,_MediumAppendFloat16ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,55,function, 4282,ExtractBitsFromBFloat16,tensorflow/tensorflow/python/framework/tensor_util.py,62,function, 4283,SlowAppendBFloat16ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,67,function, 4284,FastAppendBFloat16ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,72,function, 4285,SlowAppendFloat32ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,126,function, 4286,SlowAppendFloat64ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,129,function, 4287,SlowAppendIntArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,132,function, 4288,SlowAppendInt64ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,135,function, 4289,SlowAppendQIntArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,138,function, 4290,SlowAppendUInt32ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,141,function, 4291,SlowAppendUInt64ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,144,function, 4292,SlowAppendComplex64ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,147,function, 4293,SlowAppendComplex128ArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,151,function, 4294,SlowAppendObjectArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,155,function, 4295,SlowAppendBoolArrayToTensorProto,tensorflow/tensorflow/python/framework/tensor_util.py,158,function, 4296,GetFromNumpyDTypeDict,tensorflow/tensorflow/python/framework/tensor_util.py,187,function, 4297,GetNumpyAppendFn,tensorflow/tensorflow/python/framework/tensor_util.py,195,function, 4298,TensorShapeProtoToList,tensorflow/tensorflow/python/framework/tensor_util.py,208,function,"Convert a TensorShape to a list. Args: shape: A TensorShapeProto. Returns: List of integers representing the dimensions of the tensor." 4299,_GetDenseDimensions,tensorflow/tensorflow/python/framework/tensor_util.py,220,function,Returns the inferred dense dimensions of a list of lists. 4300,_FlattenToStrings,tensorflow/tensorflow/python/framework/tensor_util.py,230,function, 4301,_check_failed,tensorflow/tensorflow/python/framework/tensor_util.py,247,function, 4302,_check_quantized,tensorflow/tensorflow/python/framework/tensor_util.py,253,function, 4303,_generate_isinstance_check,tensorflow/tensorflow/python/framework/tensor_util.py,263,function, 4304,_check_not_tensor,tensorflow/tensorflow/python/framework/tensor_util.py,281,function, 4305,_AssertCompatible,tensorflow/tensorflow/python/framework/tensor_util.py,310,function, 4306,_is_array_like,tensorflow/tensorflow/python/framework/tensor_util.py,340,function,Check if a given object is array-like. 4307,make_tensor_proto,tensorflow/tensorflow/python/framework/tensor_util.py,362,function,"Create a TensorProto. In TensorFlow 2.0, representing tensors as protos should no longer be a common workflow. That said, this utility function is still useful for generating TF Serving request protos: ```python request = tensorflow_serving.apis.predict_pb2.PredictRequest() request.model_spec.name = ""my_model"" request.model_spec.signature_name = ""serving_default"" request.inputs[""images""].CopyFrom(tf.make_tensor_proto(X_new)) ``` `make_tensor_proto` accepts ""values"" of a python scalar, a python list, a numpy ndarray, or a numpy scalar. If ""values"" is a python scalar or a python list, make_tensor_proto first convert it to numpy ndarray. If dtype is None, the conversion tries its best to infer the right numpy data type. Otherwise, the resulting numpy array has a compatible data type with the given dtype. In either case above, the numpy ndarray (either the caller provided or the auto-converted) must have the compatible type with dtype. `make_tensor_proto` then converts the numpy array to a tensor proto. If ""shape"" is None, the resulting tensor proto represents the numpy array precisely. Otherwise, ""shape"" specifies the tensor's shape and the numpy array can not have more elements than what ""shape"" specifies. Args: values: Values to put in the TensorProto. dtype: Optional tensor_pb2 DataType value. shape: List of integers representing the dimensions of tensor. verify_shape: Boolean that enables verification of a shape of values. allow_broadcast: Boolean that enables allowing scalars and 1 length vector broadcasting. Cannot be true when verify_shape is true. Returns: A `TensorProto`. Depending on the type, it may contain data in the ""tensor_content"" attribute, which is not directly useful to Python programs. To access the values you should convert the proto back to a numpy ndarray with `tf.make_ndarray(proto)`. If `values` is a `TensorProto`, it is immediately returned; `dtype` and `shape` are ignored. Raises: TypeError: if unsupported types are provided. ValueError: if arguments have inappropriate values or if verify_shape is True and shape of values is not equals to a shape from the argument." 4308,MakeNdarray,tensorflow/tensorflow/python/framework/tensor_util.py,571,function,"Create a numpy ndarray from a tensor. Create a numpy ndarray with the same shape and data as the tensor. For example: ```python # Tensor a has shape (2,3) a = tf.constant([[1,2,3],[4,5,6]]) proto_tensor = tf.make_tensor_proto(a) # convert `tensor a` to a proto tensor tf.make_ndarray(proto_tensor) # output: array([[1, 2, 3], # [4, 5, 6]], dtype=int32) # output has shape (2,3) ``` Args: tensor: A TensorProto. Returns: A numpy array with the tensor contents. Raises: TypeError: if tensor has unsupported type." 4309,ShapeEquals,tensorflow/tensorflow/python/framework/tensor_util.py,651,function,"Returns True if ""tensor_proto"" has the given ""shape"". Args: tensor_proto: A TensorProto. shape: A tensor shape, expressed as a TensorShape, list, or tuple. Returns: True if ""tensor_proto"" has the given ""shape"", otherwise False. Raises: TypeError: If ""tensor_proto"" is not a TensorProto, or shape is not a TensorShape, list, or tuple." 4310,_ConstantValue,tensorflow/tensorflow/python/framework/tensor_util.py,675,function, 4311,constant_value,tensorflow/tensorflow/python/framework/tensor_util.py,806,function,"Returns the constant value of the given tensor, if efficiently calculable. This function attempts to partially evaluate the given tensor, and returns its value as a numpy ndarray if this succeeds. Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it will no longer be possible to feed a different value for `tensor`. This allows the result of this function to influence the graph that is constructed, and permits static shape optimizations. Args: tensor: The Tensor to be evaluated. partial: If True, the returned numpy array is allowed to have partially evaluated values. Values that can't be evaluated will be None. Returns: A numpy ndarray containing the constant value of the given `tensor`, or None if it cannot be calculated. Raises: TypeError: if tensor is not an ops.Tensor." 4312,constant_value_as_shape,tensorflow/tensorflow/python/framework/tensor_util.py,848,function,"A version of `constant_value()` that returns a `TensorShape`. This version should be used when a constant tensor value is interpreted as a (possibly partial) shape, e.g. in the shape function for `tf.reshape()`. By explicitly requesting a `TensorShape` as the return value, it is possible to represent unknown dimensions; by contrast, `constant_value()` is all-or-nothing. Args: tensor: The rank-0 or rank-1 Tensor to be evaluated. Returns: A `TensorShape` based on the constant value of the given `tensor`. Raises: ValueError: If the shape is rank-0 and is not statically known to be -1." 4313,is_tensor,tensorflow/tensorflow/python/framework/tensor_util.py,993,function,"Checks whether `x` is a TF-native type that can be passed to many TF ops. Use is_tensor to differentiate types that can ingested by TensorFlow ops without any conversion (e.g., `tf.Tensor`, `tf.SparseTensor`, and `tf.RaggedTensor`) from types that need to be converted into tensors before they are ingested (e.g., numpy `ndarray` and Python scalars). For example, in the following code block: ```python if not tf.is_tensor(t): t = tf.convert_to_tensor(t) return t.dtype ``` we check to make sure that `t` is a tensor (and convert it if not) before accessing its `shape` and `dtype`. Args: x: A python object to check. Returns: `True` if `x` is a tensor or ""tensor-like"", `False` if not." 4314,shape_tensor,tensorflow/tensorflow/python/framework/tensor_util.py,1023,function,"Convert to an int32 or int64 tensor, defaulting to int32 if empty." 4315,maybe_set_static_shape,tensorflow/tensorflow/python/framework/tensor_util.py,1042,function,"Sets the shape of `tensor` to the `shape`'s constant value, if inferrable. This is a temporary workaround to fix shape inference across functional op boundaries. E.g. ```python shape = tf.constant([3]) @tf.function def f(): u = tf.random_uniform(shape) return u ``` If we were to rely solely on C++ shape inference, the shape of `u` inside `f` would be unknown because C++ shape inference is not aware of the outer graph and all it sees is a Placeholder node when backtracing the captured tensor for `shape`. `maybe_set_static_shape` computes the static shape value of `shape` by traversing the `FuncGraph` boundaries and sets the correct shape. A longer term solution would be to fix C++ shape inference. Args: tensor: A tensor. shape: A shape tensor." 4316,TensorUtilTest,tensorflow/tensorflow/python/framework/tensor_util_test.py,43,class, 4317,IsTensorTest,tensorflow/tensorflow/python/framework/tensor_util_test.py,775,class, 4318,ConstantValueTest,tensorflow/tensorflow/python/framework/tensor_util_test.py,808,class, 4319,ConstantValueAsShapeTest,tensorflow/tensorflow/python/framework/tensor_util_test.py,1041,class, 4320,MaybeSetStaticShapeTest,tensorflow/tensorflow/python/framework/tensor_util_test.py,1182,class, 4321,ShapeTensorTest,tensorflow/tensorflow/python/framework/tensor_util_test.py,1228,class, 4322,TestCombination,tensorflow/tensorflow/python/framework/test_combinations.py,62,class,"Customize the behavior of `generate()` and the tests that it executes. Here is sequence of steps for executing a test combination: 1. The test combination is evaluated for whether it should be executed in the given environment by calling `should_execute_combination`. 2. If the test combination is going to be executed, then the arguments for all combined parameters are validated. Some arguments can be handled in a special way. This is achieved by implementing that logic in `ParameterModifier` instances that returned from `parameter_modifiers`. 3. Before executing the test, `context_managers` are installed around it." 4323,ParameterModifier,tensorflow/tensorflow/python/framework/test_combinations.py,115,class,Customizes the behavior of a particular parameter. 4324,OptionalParameter,tensorflow/tensorflow/python/framework/test_combinations.py,172,class,A parameter that is optional in `combine()` and in the test signature. 4325,generate,tensorflow/tensorflow/python/framework/test_combinations.py,182,function,"A decorator for generating combinations of a test method or a test class. Parameters of the test method must match by name to get the corresponding value of the combination. Tests must accept all parameters that are passed other than the ones that are `OptionalParameter`. Args: combinations: a list of dictionaries created using combine() and times(). test_combinations: a tuple of `TestCombination` instances that customize the execution of generated tests. Returns: a decorator that will cause the test method or the test class to be run under the specified conditions. Raises: ValueError: if any parameters were not accepted by the test method" 4326,_augment_with_special_arguments,tensorflow/tensorflow/python/framework/test_combinations.py,246,function, 4327,combine,tensorflow/tensorflow/python/framework/test_combinations.py,319,function,"Generate combinations based on its keyword arguments. Two sets of returned combinations can be concatenated using +. Their product can be computed using `times()`. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values." 4328,times,tensorflow/tensorflow/python/framework/test_combinations.py,356,function,"Generate a product of N sets of combinations. times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4]) Args: *combined: N lists of dictionaries that specify combinations. Returns: a list of dictionaries for each combination. Raises: ValueError: if some of the inputs have overlapping keys." 4329,NamedObject,tensorflow/tensorflow/python/framework/test_combinations.py,389,class,A class that translates an object into a good test name. 4330,_get_name,tensorflow/tensorflow/python/framework/test_combinations.py,410,function, 4331,TestingCombinationsTest,tensorflow/tensorflow/python/framework/test_combinations_test.py,29,class, 4332,CombineTheTestSuite,tensorflow/tensorflow/python/framework/test_combinations_test.py,133,class, 4333,is_xla_enabled,tensorflow/tensorflow/python/framework/test_util.py,93,function, 4334,is_mlir_bridge_enabled,tensorflow/tensorflow/python/framework/test_util.py,104,function, 4335,is_tfrt_enabled,tensorflow/tensorflow/python/framework/test_util.py,115,function, 4336,_get_object_count_by_type,tensorflow/tensorflow/python/framework/test_util.py,125,function, 4337,gpu_device_name,tensorflow/tensorflow/python/framework/test_util.py,129,function,Returns the name of a GPU device if available or the empty string. 4338,assert_ops_in_graph,tensorflow/tensorflow/python/framework/test_util.py,137,function,"Assert all expected operations are found. Args: expected_ops: `dict` of op name to op type. graph: Graph to check. Returns: `dict` of node name to node. Raises: ValueError: If the expected ops are not present in the graph." 4339,assert_equal_graph_def_v2,tensorflow/tensorflow/python/framework/test_util.py,165,function,"Asserts that two `GraphDef`s are (mostly) the same. Compares two `GraphDef` protos for equality, ignoring versions and ordering of nodes, attrs, and control inputs. Node names are used to match up nodes between the graphs, so the naming of nodes must be consistent. This function ignores randomized attribute values that may appear in V2 checkpoints. Args: expected: The `GraphDef` we expected. actual: The `GraphDef` we have. Raises: AssertionError: If the `GraphDef`s do not match. TypeError: If either argument is not a `GraphDef`." 4340,assert_equal_graph_def_v1,tensorflow/tensorflow/python/framework/test_util.py,186,function,"Asserts that two `GraphDef`s are (mostly) the same. Compares two `GraphDef` protos for equality, ignoring versions and ordering of nodes, attrs, and control inputs. Node names are used to match up nodes between the graphs, so the naming of nodes must be consistent. Args: actual: The `GraphDef` we have. expected: The `GraphDef` we expected. checkpoint_v2: boolean determining whether to ignore randomized attribute values that appear in V2 checkpoints. hash_table_shared_name: boolean determining whether to ignore randomized shared_names that appear in HashTableV2 op defs. Raises: AssertionError: If the `GraphDef`s do not match. TypeError: If either argument is not a `GraphDef`." 4341,assert_equal_graph_def,tensorflow/tensorflow/python/framework/test_util.py,210,function, 4342,assert_meta_graph_protos_equal,tensorflow/tensorflow/python/framework/test_util.py,233,function,Compares MetaGraphDefs `a` and `b` in unit test class `tester`. 4343,_strip_checkpoint_v2_randomized,tensorflow/tensorflow/python/framework/test_util.py,277,function, 4344,_strip_hash_table_shared_name,tensorflow/tensorflow/python/framework/test_util.py,294,function, 4345,IsGoogleCudaEnabled,tensorflow/tensorflow/python/framework/test_util.py,304,function, 4346,IsBuiltWithROCm,tensorflow/tensorflow/python/framework/test_util.py,308,function, 4347,IsBuiltWithXLA,tensorflow/tensorflow/python/framework/test_util.py,312,function, 4348,IsBuiltWithNvcc,tensorflow/tensorflow/python/framework/test_util.py,316,function, 4349,GpuSupportsHalfMatMulAndConv,tensorflow/tensorflow/python/framework/test_util.py,320,function, 4350,IsMklEnabled,tensorflow/tensorflow/python/framework/test_util.py,324,function, 4351,InstallStackTraceHandler,tensorflow/tensorflow/python/framework/test_util.py,328,function, 4352,NHWCToNCHW,tensorflow/tensorflow/python/framework/test_util.py,332,function,"Converts the input from the NHWC format to NCHW. Args: input_tensor: a 4- or 5-D tensor, or an array representing shape Returns: converted tensor or shape array" 4353,NHWCToNCHW_VECT_C,tensorflow/tensorflow/python/framework/test_util.py,351,function,"Transforms the input from the NHWC layout to NCHW_VECT_C layout. Note: Does not include quantization or type conversion steps, which should be applied afterwards. Args: input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape Returns: tensor or shape array transformed into NCHW_VECT_C Raises: ValueError: if last dimension of `input_shape_or_tensor` is not evenly divisible by 4." 4354,NCHW_VECT_CToNHWC,tensorflow/tensorflow/python/framework/test_util.py,386,function,"Transforms the input from the NCHW_VECT_C layout to NHWC layout. Note: Does not include de-quantization or type conversion steps, which should be applied beforehand. Args: input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape Returns: tensor or shape array transformed into NHWC Raises: ValueError: if last dimension of `input_shape_or_tensor` is not 4." 4355,NCHWToNHWC,tensorflow/tensorflow/python/framework/test_util.py,418,function,"Converts the input from the NCHW format to NHWC. Args: input_tensor: a 4- or 5-D tensor, or an array representing shape Returns: converted tensor or shape array" 4356,skip_if,tensorflow/tensorflow/python/framework/test_util.py,437,function,"Skips the decorated function if condition is or evaluates to True. Args: condition: Either an expression that can be used in ""if not condition"" statement, or a callable whose result should be a boolean. Returns: The wrapped function" 4357,skip_if_error,tensorflow/tensorflow/python/framework/test_util.py,464,function,"Context manager to skip cases not considered failures by the tests. Note that this does not work if used in setUpClass/tearDownClass. Usage in setUp/tearDown works fine just like regular test methods. Args: test_obj: A test object provided as `self` in the test methods; this object is usually an instance of `unittest.TestCase`'s subclass and should have `skipTest` method. error_type: The error type to skip. Note that if `messages` are given, both `error_type` and `messages` need to match for the test to be skipped. messages: Optional, a string or list of strings. If `None`, the test will be skipped if `error_type` matches what is raised; otherwise, the test is skipped if any of the `messages` is contained in the message of the error raised, and `error_type` matches the error raised. Yields: Nothing." 4358,enable_c_shapes,tensorflow/tensorflow/python/framework/test_util.py,495,function,No-op. TODO(b/74620627): Remove this. 4359,with_c_shapes,tensorflow/tensorflow/python/framework/test_util.py,500,function,No-op. TODO(b/74620627): Remove this. 4360,enable_control_flow_v2,tensorflow/tensorflow/python/framework/test_util.py,505,function,"Decorator for enabling CondV2 and WhileV2 on a test. Note this enables using CondV2 and WhileV2 after running the test class's setup/teardown methods. In addition to this, callers must import the while_v2 module in order to set the _while_v2 module in control_flow_ops. Args: fn: the function to be wrapped Returns: The wrapped function" 4361,with_control_flow_v2,tensorflow/tensorflow/python/framework/test_util.py,532,function,"Adds methods that call original methods with WhileV2 and CondV2 enabled. Note this enables CondV2 and WhileV2 in new methods after running the test class's setup method. In addition to this, callers must import the while_v2 module in order to set the _while_v2 module in control_flow_ops. If a test function has _disable_control_flow_v2 attr set to True (using the @disable_control_flow_v2 decorator), the v2 function is not generated for it. Example: @test_util.with_control_flow_v2 class ControlFlowTest(test.TestCase): def testEnabledForV2(self): ... @test_util.disable_control_flow_v2(""b/xyzabc"") def testDisabledForV2(self): ... Generated class: class ControlFlowTest(test.TestCase): def testEnabledForV2(self): ... def testEnabledForV2WithControlFlowV2(self): // Enable V2 flags. testEnabledForV2(self) // Restore V2 flags. def testDisabledForV2(self): ... Args: cls: class to decorate Returns: cls with new test methods added" 4362,disable_control_flow_v2,tensorflow/tensorflow/python/framework/test_util.py,587,function,"Decorator for a function in a with_control_flow_v2 enabled test class. Blocks the function from being run with v2 control flow ops. Args: unused_msg: Reason for disabling. Returns: The wrapped function with _disable_control_flow_v2 attr set to True." 4363,enable_output_all_intermediates,tensorflow/tensorflow/python/framework/test_util.py,606,function,"Force-enable outputing all intermediates from functional control flow ops. Args: fn: the function to be wrapped Returns: The wrapped function" 4364,assert_no_new_pyobjects_executing_eagerly,tensorflow/tensorflow/python/framework/test_util.py,629,function,"Decorator for asserting that no new Python objects persist after a test. Runs the test multiple times executing eagerly, first as a warmup and then to let objects accumulate. The warmup helps ignore caches which do not grow as the test is run repeatedly. Useful for checking that there are no missing Py_DECREFs in the C exercised by a bit of Python. Args: func: The function to test. warmup_iters: The numer of warmup iterations, excluded from measuring. Returns: The wrapped function performing the test." 4365,assert_no_new_tensors,tensorflow/tensorflow/python/framework/test_util.py,712,function,"Decorator for asserting that no new Tensors persist after a test. Mainly useful for checking that code using the Python C API has correctly manipulated reference counts. Clears the caches that it knows about, runs the garbage collector, then checks that there are no Tensor or Tensor-like objects still around. This includes Tensors to which something still has a reference (e.g. from missing Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one of the objects has __del__ defined). Args: f: The test case to run. Returns: The decorated test case." 4366,_find_reference_cycle,tensorflow/tensorflow/python/framework/test_util.py,774,function, 4367,assert_no_garbage_created,tensorflow/tensorflow/python/framework/test_util.py,877,function,"Test method decorator to assert that no garbage has been created. Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters cannot be un-set (i.e. will disable garbage collection for any other unit tests in the same file/shard). Args: f: The function to decorate. Returns: The decorated function." 4368,_combine_named_parameters,tensorflow/tensorflow/python/framework/test_util.py,952,function,"Generate combinations based on its keyword arguments. Two sets of returned combinations can be concatenated using +. Their product can be computed using `times()`. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values." 4369,generate_combinations_with_testcase_name,tensorflow/tensorflow/python/framework/test_util.py,977,function,"Generate combinations based on its keyword arguments using combine(). This function calls combine() and appends a testcase name to the list of dictionaries returned. The 'testcase_name' key is a required for named parameterized tests. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values." 4370,run_all_in_graph_and_eager_modes,tensorflow/tensorflow/python/framework/test_util.py,1010,function,Execute all test methods in the given class with and without eager. 4371,build_as_function_and_v1_graph,tensorflow/tensorflow/python/framework/test_util.py,1025,function,"Run a test case in v1 graph mode and inside tf.function in eager mode. WARNING: This decorator can only be used in test cases that statically checks generated graph. Attempting to evaluate graph or function results via. session.run() or self.evaluate() will fail. WARNING: This decorator can only be used for test cases that inherit from absl.testing.parameterized.TestCase. Args: func: Test case function to be decorated. Returns: Decorated test case function." 4372,run_in_async_and_sync_mode,tensorflow/tensorflow/python/framework/test_util.py,1077,function,Execute the test in async mode and sync mode. 4373,eager_lazy_remote_copy_on_and_off,tensorflow/tensorflow/python/framework/test_util.py,1092,function,Execute the test method w/o lazy tensor copy for function remote inputs. 4374,run_in_graph_and_eager_modes,tensorflow/tensorflow/python/framework/test_util.py,1107,function,"Execute the decorated test with and without enabling eager execution. This function returns a decorator intended to be applied to test methods in a `tf.test.TestCase` class. Doing so will cause the contents of the test method to be executed twice - once normally, and once with eager execution enabled. This allows unittests to confirm the equivalence between eager and graph execution (see `tf.compat.v1.enable_eager_execution`). For example, consider the following unittest: ```python class MyTests(tf.test.TestCase): @run_in_graph_and_eager_modes def test_foo(self): x = tf.constant([1, 2]) y = tf.constant([3, 4]) z = tf.add(x, y) self.assertAllEqual([4, 6], self.evaluate(z)) if __name__ == ""__main__"": tf.test.main() ``` This test validates that `tf.add()` has the same behavior when computed with eager execution enabled as it does when constructing a TensorFlow graph and executing the `z` tensor in a session. `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and `run_in_graph_and_eager_modes` are available decorators for different v1/v2/eager/graph combinations. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. use_gpu: If True, attempt to run as many operations as possible on GPU. assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage collector and asserts that no extra garbage has been created when running the test with eager execution enabled. This will fail if there are reference cycles (e.g. a = []; a.append(a)). Off by default because some tests may create garbage for legitimate reasons (e.g. they define a class which inherits from `object`), and because DEBUG_SAVEALL is sticky in some Python interpreters (meaning that tests which rely on objects being collected elsewhere in the unit test file will not work). Additionally, checks that nothing still has a reference to Tensors that the test allocated. Returns: Returns a decorator that will run the decorated test method twice: once by constructing and executing a graph in a session and once with eager execution enabled." 4375,py_func_if_in_function,tensorflow/tensorflow/python/framework/test_util.py,1214,function, 4376,also_run_as_tf_function,tensorflow/tensorflow/python/framework/test_util.py,1238,function,"Runs the decorated test twice--once as is, once inside a tf.function. This allows you to run a test both in eager execution and inside a tf.function, exercising the two execution modes supported in tf 2.0. The test assertions are automatically done inside tf.py_funcs, and tf.function ensures that they run in the proper order and with the proper side effects. Currently variable creation is not supported in tests annotated with this decorator since it's tricky to ensure the variable doesn't get repeatedly created when retracing the tf.function. Args: f: the test method to be decorated Returns: The decorated test method, which will run both in eager and inside a tf.function." 4377,deprecated_graph_mode_only,tensorflow/tensorflow/python/framework/test_util.py,1273,function,"Execute the decorated test in graph mode. This function returns a decorator intended to be applied to tests that are not compatible with eager mode. When this decorator is applied, the test body will be run in an environment where API calls construct graphs instead of executing eagerly. `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and `run_in_graph_and_eager_modes` are available decorators for different v1/v2/eager/graph combinations. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will run the decorated test method in graph mode." 4378,run_all_in_deprecated_graph_mode_only,tensorflow/tensorflow/python/framework/test_util.py,1325,function,Execute all tests in a class in graph mode. 4379,run_v1_only,tensorflow/tensorflow/python/framework/test_util.py,1338,function,"Execute the decorated test only if running in v1 mode. This function is intended to be applied to tests that exercise v1 only functionality. If the test is run in v2 mode it will simply be skipped. `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and `run_in_graph_and_eager_modes` are available decorators for different v1/v2/eager/graph combinations. Args: reason: string giving a reason for limiting the test to v1 only. func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method." 4380,run_v2_only,tensorflow/tensorflow/python/framework/test_util.py,1391,function,"Execute the decorated test only if running in v2 mode. This function is intended to be applied to tests that exercise v2 only functionality. If the test is run in v1 mode it will simply be skipped. `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and `run_in_graph_and_eager_modes` are available decorators for different v1/v2/eager/graph combinations. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method." 4381,run_gpu_only,tensorflow/tensorflow/python/framework/test_util.py,1428,function,"Execute the decorated test only if a GPU is available. This function is intended to be applied to tests that require the presence of a GPU. If a GPU is absent, it will simply be skipped. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method." 4382,run_cuda_only,tensorflow/tensorflow/python/framework/test_util.py,1461,function,"Execute the decorated test only if a GPU is available. This function is intended to be applied to tests that require the presence of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped. Args: func: function to be annotated. If `func` is None, this method returns a decorator the can be applied to a function. If `func` is not None this returns the decorator applied to `func`. Returns: Returns a decorator that will conditionally skip the decorated test method." 4383,with_forward_compatibility_horizons,tensorflow/tensorflow/python/framework/test_util.py,1494,function,"Executes the decorated test with the specified forward-compat horizons. Args: *horizons: A list of (year, month, day) tuples. If the list includes `None`, then the test will also be run with no forward-compatibility horizon set. Returns: A decorator that will execute the test with the specified horizons." 4384,is_gpu_available,tensorflow/tensorflow/python/framework/test_util.py,1532,function,"Returns whether TensorFlow can access a GPU. Warning: if a non-GPU version of the package is installed, the function would also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow was build with CUDA support. Args: cuda_only: limit the search to CUDA GPUs. min_cuda_compute_capability: a (major,minor) pair that indicates the minimum CUDA compute capability required, or None if no requirement. Note that the keyword arg name ""cuda_only"" is misleading (since routine will return true when a GPU device is available irrespective of whether TF was built with CUDA support or ROCm support. However no changes here because ++ Changing the name ""cuda_only"" to something more generic would break backward compatibility ++ Adding an equivalent ""rocm_only"" would require the implementation check the build type. This in turn would require doing the same for CUDA and thus potentially break backward compatibility ++ Adding a new ""cuda_or_rocm_only"" would not break backward compatibility, but would require most (if not all) callers to update the call to use ""cuda_or_rocm_only"" instead of ""cuda_only"" Returns: True if a GPU device of the requested kind is available." 4385,device,tensorflow/tensorflow/python/framework/test_util.py,1581,function,Uses gpu when requested and available. 4386,use_gpu,tensorflow/tensorflow/python/framework/test_util.py,1592,function,Uses gpu when requested and available. 4387,force_gpu,tensorflow/tensorflow/python/framework/test_util.py,1599,function,Force the gpu to be used. 4388,force_cpu,tensorflow/tensorflow/python/framework/test_util.py,1606,function,Force the cpu to be used. 4389,CapturedWrites,tensorflow/tensorflow/python/framework/test_util.py,1612,class,A utility class to load the captured writes made to a stream. 4390,FakeEagerSession,tensorflow/tensorflow/python/framework/test_util.py,1625,class,"Fake session so tests that conditionally use placeholders can use eager. There are a number of tests that conditionally use placeholders for shape inference. The pattern is demonstrated here: ```python with self.cached_session() as sess: if static_shape: y = math_ops.matmul(x, ...) feed_dict = {} else: x_ph = array_ops.placeholder(...) y = math_ops.matmul(x_ph, ...) feed_dict = {x_ph: x} val = sess.run(y, feed_dict=feed_dict) ``` Since the feed_dict is empty when not using placeholders we should be able to call self.evaluate(), however this requires rewriting the test case. This class should be considered a stop-gap solution to get tests running with eager with minimal changes to the actual test." 4391,ErrorLoggingSession,tensorflow/tensorflow/python/framework/test_util.py,1682,class,Wrapper around a Session that logs errors in run(). 4392,disable_cudnn_autotune,tensorflow/tensorflow/python/framework/test_util.py,1697,function,"Disable autotuning during the call to this function. Some tests want to base assertions on a graph being isomorphic with a copy. To ensure this, this decorator disables autotuning. Args: func: Function to run with CuDNN autotuning turned off. Returns: Decorated function." 4393,enable_tf_xla_constant_folding,tensorflow/tensorflow/python/framework/test_util.py,1743,function, 4394,_disable_test,tensorflow/tensorflow/python/framework/test_util.py,1781,function, 4395,disable_xla,tensorflow/tensorflow/python/framework/test_util.py,1802,function,Execute the test method only if xla is not enabled. 4396,disable_mlir_bridge,tensorflow/tensorflow/python/framework/test_util.py,1809,function,Execute the test method only if MLIR bridge is not enabled. 4397,disable_tfrt,tensorflow/tensorflow/python/framework/test_util.py,1816,function, 4398,for_all_test_methods,tensorflow/tensorflow/python/framework/test_util.py,1845,function,"Generate class-level decorator from given method-level decorator. It is expected for the given decorator to take some arguments and return a method that is then called on the test method to produce a decorated method. Args: decorator: The decorator to apply. *args: Positional arguments **kwargs: Keyword arguments Returns: Function that will decorate a given classes test methods with the decorator." 4399,no_xla_auto_jit,tensorflow/tensorflow/python/framework/test_util.py,1873,function,This test is not intended to be run with XLA auto jit enabled. 4400,xla_allow_fallback,tensorflow/tensorflow/python/framework/test_util.py,1880,function, 4401,EagerSessionWarner,tensorflow/tensorflow/python/framework/test_util.py,1909,class, 4402,TensorFlowTestCase,tensorflow/tensorflow/python/framework/test_util.py,1922,class,Base class for tests that need to test TensorFlow. 4403,create_local_cluster,tensorflow/tensorflow/python/framework/test_util.py,3164,function,"Create and start local servers and return the associated `Server` objects. ""PS"" stands for ""parameter server"": a task responsible for storing and updating the model's parameters. Other tasks send updates to these parameters as they work on optimizing the parameters. This particular division of labor between tasks is not required, but is common for distributed training. Read more at https://www.tensorflow.org/guide/extend/architecture ![components](https://www.tensorflow.org/images/diag1.svg ""components"") Figure illustrates the interaction of these components. ""/job:worker/task:0"" and ""/job:ps/task:0"" are both tasks with worker services. Example: ```python workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2) worker_sessions = [tf.compat.v1.Session(w.target) for w in workers] with tf.device(""/job:ps/task:0""): ... with tf.device(""/job:ps/task:1""): ... with tf.device(""/job:worker/task:0""): ... with tf.device(""/job:worker/task:1""): ... worker_sessions[0].run(...) ``` Args: num_workers: Number of worker servers to start. num_ps: Number of PS servers to start. protocol: Communication protocol. Allowed values are documented in the documentation of `tf.distribute.Server`. worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be used to instantiate multiple devices etc. ps_config: (optional) `tf.ConfigProto` to initialize PS servers. Returns: A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list of `num_workers` objects of type `tf.distribute.Server` (all running locally); and `ps_servers` is a list of `num_ps` objects of similar type. Raises: ImportError: if portpicker module was not found at load time" 4404,get_node_def_from_graph,tensorflow/tensorflow/python/framework/test_util.py,3252,function,"Returns the `NodeDef` instance for given node name in the graph def. This method explores only the NodeDefs in `graph_def.node`. Args: node_name: Name of the NodeDef to search for. graph_def: An instance of `GraphDef` proto. Returns: the `NodeDef` instance whose name field matches the given node_name or None." 4405,set_producer_version,tensorflow/tensorflow/python/framework/test_util.py,3270,function,Sets graph.graph_def_versions.producer to `producer_version`. 4406,TestUtilTest,tensorflow/tensorflow/python/framework/test_util_test.py,53,class, 4407,SkipTestTest,tensorflow/tensorflow/python/framework/test_util_test.py,812,class, 4408,GraphAndEagerNoVariableSharing,tensorflow/tensorflow/python/framework/test_util_test.py,874,class, 4409,GarbageCollectionTest,tensorflow/tensorflow/python/framework/test_util_test.py,888,class, 4410,set_environ,tensorflow/tensorflow/python/framework/tf2_test.py,30,function, 4411,unset_environ,tensorflow/tensorflow/python/framework/tf2_test.py,34,function, 4412,EnablingTF2Behavior,tensorflow/tensorflow/python/framework/tf2_test.py,38,class, 4413,TraceableObject,tensorflow/tensorflow/python/framework/traceable_stack.py,24,class,Wrap an object together with its the code definition location. 4414,TraceableStack,tensorflow/tensorflow/python/framework/traceable_stack.py,80,class,A stack of TraceableObjects. 4415,TraceableObjectTest,tensorflow/tensorflow/python/framework/traceable_stack_test.py,30,class, 4416,TraceableStackTest,tensorflow/tensorflow/python/framework/traceable_stack_test.py,77,class, 4417,TypeSpec,tensorflow/tensorflow/python/framework/type_spec.py,49,class,"Specifies a TensorFlow value type. A `tf.TypeSpec` provides metadata describing an object accepted or returned by TensorFlow APIs. Concrete subclasses, such as `tf.TensorSpec` and `tf.RaggedTensorSpec`, are used to describe different value types. For example, `tf.function`'s `input_signature` argument accepts a list (or nested structure) of `TypeSpec`s. Creating new subclasses of TypeSpec (outside of TensorFlow core) is not currently supported. In particular, we may make breaking changes to the private methods and properties defined by this base class." 4418,BatchableTypeSpec,tensorflow/tensorflow/python/framework/type_spec.py,459,class,"TypeSpec with a batchable tensor encoding. The batchable tensor encoding is a list of `tf.Tensor`s that supports batching and unbatching. In particular, stacking (or unstacking) values with the same `TypeSpec` must be equivalent to stacking (or unstacking) each of their tensor lists. Unlike the component encoding (returned by `self._to_components)`, the batchable tensor encoding may require using encoding/decoding ops. If a subclass's batchable tensor encoding is not simply a flattened version of the component encoding, then the subclass must override `_to_tensor_list`, `_from_tensor_list`, and _flat_tensor_specs`." 4419,type_spec_from_value,tensorflow/tensorflow/python/framework/type_spec.py,507,function,"Returns a `tf.TypeSpec` that represents the given `value`. Examples: >>> tf.type_spec_from_value(tf.constant([1, 2, 3])) TensorSpec(shape=(3,), dtype=tf.int32, name=None) >>> tf.type_spec_from_value(np.array([4.0, 5.0], np.float64)) TensorSpec(shape=(2,), dtype=tf.float64, name=None) >>> tf.type_spec_from_value(tf.ragged.constant([[1, 2], [3, 4, 5]])) RaggedTensorSpec(TensorShape([2, None]), tf.int32, 1, tf.int64) >>> example_input = tf.ragged.constant([[1, 2], [3]]) >>> @tf.function(input_signature=[tf.type_spec_from_value(example_input)]) ... def f(x): ... return tf.reduce_sum(x, axis=1) Args: value: A value that can be accepted or returned by TensorFlow APIs. Accepted types for `value` include `tf.Tensor`, any value that can be converted to `tf.Tensor` using `tf.convert_to_tensor`, and any subclass of `CompositeTensor` (such as `tf.RaggedTensor`). Returns: A `TypeSpec` that is compatible with `value`. Raises: TypeError: If a TypeSpec cannot be built for `value`, because its type is not supported." 4420,_type_spec_from_value,tensorflow/tensorflow/python/framework/type_spec.py,555,function,Returns a `TypeSpec` that represents the given `value`. 4421,register_type_spec_from_value_converter,tensorflow/tensorflow/python/framework/type_spec.py,590,function,"Registers a function for converting values with a given type to TypeSpecs. If multiple registered `type_object`s match a value, then the most recent registration takes precedence. Custom converters should not be defined for `CompositeTensor`s; use `CompositeTensor._type_spec` instead. Args: type_object: A Python `type` object representing the type of values accepted by `converter_fn`. converter_fn: A function that takes one argument (an instance of the type represented by `type_object`) and returns a `TypeSpec`. allow_subclass: If true, then use `isinstance(value, type_object)` to check for matches. If false, then use `type(value) is type_object`." 4422,TwoTensors,tensorflow/tensorflow/python/framework/type_spec_test.py,35,class,"A simple value type to test TypeSpec. Contains two tensors (x, y) and a string (color). The color value is a stand-in for any extra type metadata we might need to store." 4423,TwoTensorsSpec,tensorflow/tensorflow/python/framework/type_spec_test.py,49,class,A TypeSpec for the TwoTensors value type. 4424,TypeSpecTest,tensorflow/tensorflow/python/framework/type_spec_test.py,85,class, 4425,VersionTest,tensorflow/tensorflow/python/framework/versions_test.py,25,class, 4426,ArithmeticOptimizerTest,tensorflow/tensorflow/python/grappler/arithmetic_optimizer_test.py,28,class, 4427,_input,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,54,function,Generates an input of a given shape. 4428,_weight,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,59,function,Generates a weight of a given shape. 4429,_bias,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,66,function,Generates a bias of a given shape. 4430,_conv2d,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,71,function,Returns a 2d convolution layer with full stride. 4431,_conv3d,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,76,function,Returns a 3d convolution layer with full stride. 4432,_max_pool_2x2,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,81,function,Downsamples a feature map by 2X. 4433,_fused_batchnorm,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,87,function,Batchnorm. 4434,_conv_bn,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,93,function,Conv followed by batchnorm. 4435,_conv3d_bn,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,105,function,Conv3D followed by batchnorm. 4436,_matmul_act,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,118,function,Matmul followed by activation. 4437,_conv_pool,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,127,function,(Conv -> bias -> relu -> max_pool) x2. 4438,_simple_loop,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,141,function,Simple loop whose body is provided by the functor. 4439,_loop_vars_intertwined,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,150,function,Loop whose loop variables are intertwined. 4440,_lstm_cell,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,159,function,Create an LSTM cell. 4441,_recurrent_lstm,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,180,function,Dynamic single-layer LSTM with TensorArray. 4442,_make_node_with_color,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,201,function,Returns a node representative of the specified list type. 4443,_build_simple_loop_graph,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,216,function,Builds a test graph with a simple loop. 4444,_get_config,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,234,function,Returns a ConfigProto with auto mixed precision enabled if appropriate. 4445,_is_cast_to_fp16,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,257,function, 4446,_is_cast_to_bf16,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,261,function, 4447,_is_cast_to_fp32,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,265,function, 4448,_count_casts,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,269,function,Counts the number of casts to f16 and fp32. 4449,_build_node_map,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,290,function, 4450,_example_noninlined_funcdef_shape,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,297,function, 4451,_example_noninlined_funcdef_grad,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,305,function,Gradient of Swish function defined below. 4452,_example_noninlined_funcdef,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,318,function,Computes the Swish activation function: `x * sigmoid(x)`. 4453,AutoMixedPrecisionTest,tensorflow/tensorflow/python/grappler/auto_mixed_precision_test.py,323,class,Tests the Grappler auto mixed precision optimizer. 4454,Cluster,tensorflow/tensorflow/python/grappler/cluster.py,29,class,Grappler Clusters. 4455,Provision,tensorflow/tensorflow/python/grappler/cluster.py,115,function, 4456,ClusterTest,tensorflow/tensorflow/python/grappler/cluster_test.py,32,class, 4457,ConstantFoldingTest,tensorflow/tensorflow/python/grappler/constant_folding_test.py,37,class, 4458,GenerateCostReport,tensorflow/tensorflow/python/grappler/cost_analyzer.py,26,function,"Analyze the cost of each TensorFlow op and node in the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. per_node_report: by default the report contains stats aggregated on a per op type basis, setting per_node_report to True adds results for each individual node to the report. verbose: Prints out the entire operation proto instead of a summary table. cluster: Analyze the costs using the specified cluster, or the local machine if no cluster was specified. Returns: A string of cost report." 4459,GenerateMemoryReport,tensorflow/tensorflow/python/grappler/cost_analyzer.py,52,function,"Analyze the peak memory usage for the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. detailed_report: print the live tensors in addition to the peak memory usage. cluster: Analyze the memory using the specified cluster, or the local machine if no cluster was specified. Returns: A string with the formatted memory usage." 4460,CostAnalysisTest,tensorflow/tensorflow/python/grappler/cost_analyzer_test.py,39,class, 4461,get_metagraph,tensorflow/tensorflow/python/grappler/cost_analyzer_tool.py,40,function,Constructs and returns a MetaGraphDef from the input file. 4462,main,tensorflow/tensorflow/python/grappler/cost_analyzer_tool.py,80,function, 4463,GrapplerTest,tensorflow/tensorflow/python/grappler/datasets_test.py,34,class, 4464,main,tensorflow/tensorflow/python/grappler/graph_analyzer.py,32,function, 4465,Item,tensorflow/tensorflow/python/grappler/item.py,26,class,GrapplerItem. 4466,ItemTest,tensorflow/tensorflow/python/grappler/item_test.py,36,class, 4467,_weight,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,51,function,Generates a weight of a given shape. 4468,_bias,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,56,function,Generates a bias of a given shape. 4469,_conv2d,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,61,function,Returns a 2d convolution layer with full stride. 4470,_max_pool_2x2,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,66,function,Downsamples a feature map by 2X. 4471,_two_layer_model,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,73,function, 4472,_model_with_second_port,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,86,function, 4473,_model_with_branch,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,97,function, 4474,_model_with_vec_and_4d,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,107,function, 4475,_loop,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,116,function, 4476,_loop_with_branch,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,127,function, 4477,_loop_with_vec_and_4d,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,138,function, 4478,_get_config,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,149,function, 4479,_simple_metagraph,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,168,function, 4480,_get_cluster,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,183,function, 4481,_is_transpose,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,194,function, 4482,_is_permute,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,199,function, 4483,LayoutOptimizerTest,tensorflow/tensorflow/python/grappler/layout_optimizer_test.py,206,class,Tests the Grappler layout optimizer. 4484,MemoryOptimizerSwapTest,tensorflow/tensorflow/python/grappler/memory_optimizer_test.py,38,class,Tests the Grappler memory optimizer. 4485,MemoryOptimizerRecomputeTest,tensorflow/tensorflow/python/grappler/memory_optimizer_test.py,107,class,"Tests the Python interface to recomputation rewrites. See core/grappler/optimizers/memory_optimizer_test.cc for functional tests." 4486,GenerateModelReport,tensorflow/tensorflow/python/grappler/model_analyzer.py,24,function,"Report what's known statically about each node in the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. assume_valid_feeds: If True, assume that the shape of the fed nodes is valid debug: Add some information useful for debugging. Returns: A string containing the report." 4487,PyWrapOptimizeGraphTest,tensorflow/tensorflow/python/grappler/model_analyzer_test.py,30,class, 4488,OptimizeGraph,tensorflow/tensorflow/python/grappler/tf_optimizer.py,27,function,"Optimize the provided metagraph. For best results, the signature_def field in `metagraph` should be populated with information about input (feed) and output (fetch) tensors. Args: config_proto: a ConfigProto protobuf. metagraph: a MetagraphDef protobuf. verbose: whether to log optimization results. graph_id: a string identifying this graph. cluster: a grappler cluster object representing hardware resources available to run this graph. strip_default_attributes: whether graph node attributes having default values should be removed after all the optimization passes. This option is useful if the resulting graph will be executed by an older process that might not know some of the recently added attributes." 4489,PyWrapOptimizeGraphTest,tensorflow/tensorflow/python/grappler/tf_optimizer_test.py,37,class, 4490,softmax,tensorflow/tensorflow/python/keras/activations.py,46,function,"Softmax converts a real vector to a vector of categorical probabilities. The elements of the output vector are in range (0, 1) and sum to 1. Each vector is handled independently. The `axis` argument sets which axis of the input the function is applied along. Softmax is often used as the activation for the last layer of a classification network because the result could be interpreted as a probability distribution. The softmax of each vector x is computed as `exp(x) / tf.reduce_sum(exp(x))`. The input values in are the log-odds of the resulting probability. Arguments: x : Input tensor. axis: Integer, axis along which the softmax normalization is applied. Returns: Tensor, output of softmax transformation (all values are non-negative and sum to 1). Raises: ValueError: In case `dim(x) == 1`." 4491,elu,tensorflow/tensorflow/python/keras/activations.py,88,function,"Exponential Linear Unit. The exponential linear unit (ELU) with `alpha > 0` is: `x` if `x > 0` and `alpha * (exp(x) - 1)` if `x < 0` The ELU hyperparameter `alpha` controls the value to which an ELU saturates for negative net inputs. ELUs diminish the vanishing gradient effect. ELUs have negative values which pushes the mean of the activations closer to zero. Mean activations that are closer to zero enable faster learning as they bring the gradient closer to the natural gradient. ELUs saturate to a negative value when the argument gets smaller. Saturation means a small derivative which decreases the variation and the information that is propagated to the next layer. Example Usage: >>> import tensorflow as tf >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu', ... input_shape=(28, 28, 1))) >>> model.add(tf.keras.layers.MaxPooling2D((2, 2))) >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu')) >>> model.add(tf.keras.layers.MaxPooling2D((2, 2))) >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu')) Arguments: x: Input tensor. alpha: A scalar, slope of negative section. `alpha` controls the value to which an ELU saturates for negative net inputs. Returns: The exponential linear unit (ELU) activation function: `x` if `x > 0` and `alpha * (exp(x) - 1)` if `x < 0`. Reference: [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)" 4492,selu,tensorflow/tensorflow/python/keras/activations.py,138,function,"Scaled Exponential Linear Unit (SELU). The Scaled Exponential Linear Unit (SELU) activation function is defined as: - `if x > 0: return scale * x` - `if x < 0: return scale * alpha * (exp(x) - 1)` where `alpha` and `scale` are pre-defined constants (`alpha=1.67326324` and `scale=1.05070098`). Basically, the SELU activation function multiplies `scale` (> 1) with the output of the `tf.keras.activations.elu` function to ensure a slope larger than one for positive inputs. The values of `alpha` and `scale` are chosen so that the mean and variance of the inputs are preserved between two consecutive layers as long as the weights are initialized correctly (see `tf.keras.initializers.LecunNormal` initializer) and the number of input units is ""large enough"" (see reference paper for more information). Example Usage: >>> num_classes = 10 # 10-class problem >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(64, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(32, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(16, kernel_initializer='lecun_normal', ... activation='selu')) >>> model.add(tf.keras.layers.Dense(num_classes, activation='softmax')) Arguments: x: A tensor or variable to compute the activation function for. Returns: The scaled exponential unit activation: `scale * elu(x, alpha)`. Notes: - To be used together with the `tf.keras.initializers.LecunNormal` initializer. - To be used together with the dropout variant `tf.keras.layers.AlphaDropout` (not regular dropout). References: - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)" 4493,softplus,tensorflow/tensorflow/python/keras/activations.py,192,function,"Softplus activation function, `softplus(x) = log(exp(x) + 1)`. Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.softplus(a) >>> b.numpy() array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00, 2.0000000e+01], dtype=float32) Arguments: x: Input tensor. Returns: The softplus activation: `log(exp(x) + 1)`." 4494,softsign,tensorflow/tensorflow/python/keras/activations.py,214,function,"Softsign activation function, `softsign(x) = x / (abs(x) + 1)`. Example Usage: >>> a = tf.constant([-1.0, 0.0, 1.0], dtype = tf.float32) >>> b = tf.keras.activations.softsign(a) >>> b.numpy() array([-0.5, 0. , 0.5], dtype=float32) Arguments: x: Input tensor. Returns: The softsign activation: `x / (abs(x) + 1)`." 4495,swish,tensorflow/tensorflow/python/keras/activations.py,235,function,"Swish activation function, `swish(x) = x * sigmoid(x)`. Swish activation function which returns `x*sigmoid(x)`. It is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks, it is unbounded above and bounded below. Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.swish(a) >>> b.numpy() array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01, 2.0000000e+01], dtype=float32) Arguments: x: Input tensor. Returns: The swish activation applied to `x` (see reference paper for details). Reference: - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)" 4496,relu,tensorflow/tensorflow/python/keras/activations.py,266,function,"Applies the rectified linear unit activation function. With default values, this returns the standard ReLU activation: `max(x, 0)`, the element-wise maximum of 0 and the input tensor. Modifying default parameters allows you to use non-zero thresholds, change the max value of the activation, and to use a non-zero multiple of the input for values below the threshold. For example: >>> foo = tf.constant([-10, -5, 0.0, 5, 10], dtype = tf.float32) >>> tf.keras.activations.relu(foo).numpy() array([ 0., 0., 0., 5., 10.], dtype=float32) >>> tf.keras.activations.relu(foo, alpha=0.5).numpy() array([-5. , -2.5, 0. , 5. , 10. ], dtype=float32) >>> tf.keras.activations.relu(foo, max_value=5).numpy() array([0., 0., 0., 5., 5.], dtype=float32) >>> tf.keras.activations.relu(foo, threshold=5).numpy() array([-0., -0., 0., 0., 10.], dtype=float32) Arguments: x: Input `tensor` or `variable`. alpha: A `float` that governs the slope for values lower than the threshold. max_value: A `float` that sets the saturation threshold (the largest value the function will return). threshold: A `float` giving the threshold value of the activation function below which values will be damped or set to zero. Returns: A `Tensor` representing the input tensor, transformed by the relu activation function. Tensor will be of the same shape and dtype of input `x`." 4497,tanh,tensorflow/tensorflow/python/keras/activations.py,307,function,"Hyperbolic tangent activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.tanh(a) >>> b.numpy() array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32) Arguments: x: Input tensor. Returns: Tensor of same shape and dtype of input `x`, with tanh activation: `tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x)))`." 4498,sigmoid,tensorflow/tensorflow/python/keras/activations.py,329,function,"Sigmoid activation function, `sigmoid(x) = 1 / (1 + exp(-x))`. Applies the sigmoid activation function. For small values (<-5), `sigmoid` returns a value close to zero, and for large values (>5) the result of the function gets close to 1. Sigmoid is equivalent to a 2-element Softmax, where the second element is assumed to be zero. The sigmoid function always returns a value between 0 and 1. For example: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.sigmoid(a) >>> b.numpy() array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01, 1.0000000e+00], dtype=float32) Arguments: x: Input tensor. Returns: Tensor with the sigmoid activation: `1 / (1 + exp(-x))`." 4499,exponential,tensorflow/tensorflow/python/keras/activations.py,359,function,"Exponential activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.exponential(a) >>> b.numpy() array([0.04978707, 0.36787945, 1., 2.7182817 , 20.085537], dtype=float32) Arguments: x: Input tensor. Returns: Tensor with exponential activation: `exp(x)`." 4500,hard_sigmoid,tensorflow/tensorflow/python/keras/activations.py,380,function,"Hard sigmoid activation function. A faster approximation of the sigmoid activation. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.hard_sigmoid(a) >>> b.numpy() array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32) Arguments: x: Input tensor. Returns: The hard sigmoid activation, defined as: - `if x < -2.5: return 0` - `if x > 2.5: return 1` - `if -2.5 <= x <= 2.5: return 0.2 * x + 0.5`" 4501,linear,tensorflow/tensorflow/python/keras/activations.py,407,function,"Linear activation function (pass-through). For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.linear(a) >>> b.numpy() array([-3., -1., 0., 1., 3.], dtype=float32) Arguments: x: Input tensor. Returns: The input, unmodified." 4502,serialize,tensorflow/tensorflow/python/keras/activations.py,428,function,"Returns the string identifier of an activation function. Arguments: activation : Function object. Returns: String denoting the name attribute of the input function For example: >>> tf.keras.activations.serialize(tf.keras.activations.tanh) 'tanh' >>> tf.keras.activations.serialize(tf.keras.activations.sigmoid) 'sigmoid' >>> tf.keras.activations.serialize('abcd') Traceback (most recent call last): ... ValueError: ('Cannot serialize', 'abcd') Raises: ValueError: The input function is not a valid one." 4503,deserialize,tensorflow/tensorflow/python/keras/activations.py,459,function,"Returns activation function given a string identifier. Arguments: x : String identifier. Returns: Corresponding activation function. For example: >>> tf.keras.activations.deserialize('linear') >>> tf.keras.activations.deserialize('sigmoid') >>> tf.keras.activations.deserialize('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function:abcd Args: name: The name of the activation function. custom_objects: Optional `{function_name: function_obj}` dictionary listing user-provided activation functions. Raises: ValueError: `Unknown activation function` if the input string does not denote any defined Tensorflow activation function." 4504,get,tensorflow/tensorflow/python/keras/activations.py,497,function,"Returns function. Arguments: identifier: Function or string Returns: Function corresponding to the input string or input function. For example: >>> tf.keras.activations.get('softmax') >>> tf.keras.activations.get(tf.keras.activations.softmax) >>> tf.keras.activations.get(None) >>> tf.keras.activations.get(abs) >>> tf.keras.activations.get('abcd') Traceback (most recent call last): ... ValueError: Unknown activation function:abcd Raises: ValueError: Input is an unknown function or string, i.e., the input does not denote any defined function." 4505,_ref_softmax,tensorflow/tensorflow/python/keras/activations_test.py,34,function, 4506,KerasActivationsTest,tensorflow/tensorflow/python/keras/activations_test.py,41,class, 4507,_DummyEagerGraph,tensorflow/tensorflow/python/keras/backend.py,116,class,"_DummyEagerGraph provides a thread local `key` attribute. We can't use threading.local directly, i.e. without subclassing, because gevent monkey patches threading.local and its version does not support weak references." 4508,backend,tensorflow/tensorflow/python/keras/backend.py,167,function,"Publicly accessible method for determining the current backend. Only exists for API compatibility with multi-backend Keras. Returns: The string ""tensorflow""." 4509,cast_to_floatx,tensorflow/tensorflow/python/keras/backend.py,180,function,"Cast a Numpy array to the default Keras float type. Arguments: x: Numpy array or TensorFlow tensor. Returns: The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor if `x` was a tensor), cast to its new type. Example: >>> tf.keras.backend.floatx() 'float32' >>> arr = np.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = cast_to_floatx(arr) >>> new_arr array([1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32')" 4510,get_uid,tensorflow/tensorflow/python/keras/backend.py,218,function,"Associates a string prefix with an integer counter in a TensorFlow graph. Arguments: prefix: String prefix to index. Returns: Unique integer ID. Example: >>> get_uid('dense') 1 >>> get_uid('dense') 2" 4511,reset_uids,tensorflow/tensorflow/python/keras/backend.py,244,function,"Resets graph identifiers. " 4512,clear_session,tensorflow/tensorflow/python/keras/backend.py,252,function,"Resets all state generated by Keras. Keras manages a global state, which it uses to implement the Functional model-building API and to uniquify autogenerated layer names. If you are creating many models in a loop, this global state will consume an increasing amount of memory over time, and you may want to clear it. Calling `clear_session()` releases the global state: this helps avoid clutter from old models and layers, especially when memory is limited. Example 1: calling `clear_session()` when creating models in a loop ```python for _ in range(100): # Without `clear_session()`, each iteration of this loop will # slightly increase the size of the global state managed by Keras model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)]) for _ in range(100): # With `clear_session()` called at the beginning, # Keras starts with a blank state at each iteration # and memory consumption is constant over time. tf.keras.backend.clear_session() model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)]) ``` Example 2: resetting the layer name generation counter >>> import tensorflow as tf >>> layers = [tf.keras.layers.Dense(10) for _ in range(10)] >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense_10 >>> tf.keras.backend.set_learning_phase(1) >>> print(tf.keras.backend.learning_phase()) 1 >>> tf.keras.backend.clear_session() >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense" 4513,manual_variable_initialization,tensorflow/tensorflow/python/keras/backend.py,314,function,"Sets the manual variable initialization flag. This boolean flag determines whether variables should be initialized as they are instantiated (default), or if the user should handle the initialization (e.g. via `tf.compat.v1.initialize_all_variables()`). Arguments: value: Python boolean." 4514,learning_phase,tensorflow/tensorflow/python/keras/backend.py,331,function,"Returns the learning phase flag. The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time. Returns: Learning phase (scalar integer tensor or Python integer)." 4515,global_learning_phase_is_set,tensorflow/tensorflow/python/keras/backend.py,360,function, 4516,_mark_func_graph_as_unsaveable,tensorflow/tensorflow/python/keras/backend.py,364,function,"Mark func graph as unsaveable due to use of symbolic keras learning phase. Functions that capture the symbolic learning phase cannot be exported to SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised if it is exported. Args: graph: Graph or FuncGraph object. learning_phase: Learning phase placeholder or int defined in the graph." 4517,symbolic_learning_phase,tensorflow/tensorflow/python/keras/backend.py,383,function, 4518,_default_learning_phase,tensorflow/tensorflow/python/keras/backend.py,389,function, 4519,set_learning_phase,tensorflow/tensorflow/python/keras/backend.py,402,function,"Sets the learning phase to a fixed value. The backend learning phase affects any code that calls `backend.learning_phase()` In particular, all Keras built-in layers use the learning phase as the default for the `training` arg to `Layer.__call__`. User-written layers and models can achieve the same behavior with code that looks like: ```python def call(self, inputs, training=None): if training is None: training = backend.learning_phase() ``` Arguments: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Raises: ValueError: if `value` is neither `0` nor `1`." 4520,deprecated_internal_set_learning_phase,tensorflow/tensorflow/python/keras/backend.py,429,function,"A deprecated internal implementation of set_learning_phase. This method is an internal-only version of `set_learning_phase` that does not raise a deprecation error. It is required because saved_model needs to keep working with user code that uses the deprecated learning phase methods until those apis are fully removed from the public api. Specifically SavedModel saving needs to make sure the learning phase is 0 during tracing even if users overwrote it to a different value. But, we don't want to raise deprecation warnings for users when savedmodel sets learning phase just for compatibility with code that relied on explicitly setting the learning phase for other values. Arguments: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Raises: ValueError: if `value` is neither `0` nor `1`." 4521,learning_phase_scope,tensorflow/tensorflow/python/keras/backend.py,467,function,"Provides a scope within which the learning phase is equal to `value`. The learning phase gets restored to its original value upon exiting the scope. Arguments: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if `value` is neither `0` nor `1`." 4522,deprecated_internal_learning_phase_scope,tensorflow/tensorflow/python/keras/backend.py,490,function,"An internal-only version of `learning_phase_scope`. Unlike the public method, this method does not raise a deprecation warning. This is needed because saved model saving needs to set learning phase to maintain compatibility with code that sets/gets the learning phase, but saved model saving itself shouldn't raise a deprecation warning. We can get rid of this method and its usages when the public api is removed. Arguments: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if `value` is neither `0` nor `1`." 4523,eager_learning_phase_scope,tensorflow/tensorflow/python/keras/backend.py,544,function,"Internal scope that sets the learning phase in eager / tf.function only. Arguments: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if `value` is neither `0` nor `1`." 4524,_current_graph,tensorflow/tensorflow/python/keras/backend.py,574,function,"Return the graph members of `op_input_list`, or the current graph." 4525,_get_session,tensorflow/tensorflow/python/keras/backend.py,579,function,Returns the session object for the current thread. 4526,get_session,tensorflow/tensorflow/python/keras/backend.py,605,function,"Returns the TF session to be used by the backend. If a default TensorFlow session is available, we will return it. Else, we will return the global Keras session assuming it matches the current graph. If no global Keras session exists at this point: we will create a new global session. Note that you can manually set the global session via `K.set_session(sess)`. Arguments: op_input_list: An option sequence of tensors or ops, which will be used to determine the current graph. Otherwise the default graph will be used. Returns: A TensorFlow session." 4527,get_graph,tensorflow/tensorflow/python/keras/backend.py,639,function, 4528,_scratch_graph,tensorflow/tensorflow/python/keras/backend.py,650,function,"Retrieve a shared and temporary func graph. The eager execution path lifts a subgraph from the keras global graph into a scratch graph in order to create a function. DistributionStrategies, in turn, constructs multiple functions as well as a final combined function. In order for that logic to work correctly, all of the functions need to be created on the same scratch FuncGraph. Args: graph: A graph to be used as the current scratch graph. If not set then a scratch graph will either be retrieved or created: Yields: The current scratch graph." 4529,set_session,tensorflow/tensorflow/python/keras/backend.py,686,function,"Sets the global TensorFlow session. Arguments: session: A TF Session." 4530,get_default_session_config,tensorflow/tensorflow/python/keras/backend.py,696,function, 4531,get_default_graph_uid_map,tensorflow/tensorflow/python/keras/backend.py,708,function, 4532,_TfDeviceCaptureOp,tensorflow/tensorflow/python/keras/backend.py,720,class,Class for capturing the TF device scope. 4533,_get_current_tf_device,tensorflow/tensorflow/python/keras/backend.py,736,function,"Return explicit device of current context, otherwise returns `None`. Returns: If the current device scope is explicitly set, it returns a string with the device (`CPU` or `GPU`). If the scope is not explicitly set, it will return `None`." 4534,_is_current_explicit_device,tensorflow/tensorflow/python/keras/backend.py,753,function,"Check if the current device is explicitly set on the device type specified. Arguments: device_type: A string containing `GPU` or `CPU` (case-insensitive). Returns: A boolean indicating if the current device scope is explicitly set on the device type. Raises: ValueError: If the `device_type` string indicates an unsupported device." 4535,_get_available_gpus,tensorflow/tensorflow/python/keras/backend.py,773,function,"Get a list of available gpu devices (formatted as strings). Returns: A list of available GPU devices." 4536,_has_nchw_support,tensorflow/tensorflow/python/keras/backend.py,789,function,"Check whether the current scope supports NCHW ops. TensorFlow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw" 4537,_constant_to_tensor,tensorflow/tensorflow/python/keras/backend.py,808,function,"Convert the input `x` to a tensor of type `dtype`. This is slightly faster than the _to_tensor function, at the cost of handling fewer cases. Arguments: x: An object to be converted (numpy arrays, floats, ints and lists of them). dtype: The destination type. Returns: A tensor." 4538,_to_tensor,tensorflow/tensorflow/python/keras/backend.py,825,function,"Convert the input `x` to a tensor of type `dtype`. Arguments: x: An object to be converted (numpy array, list, tensors). dtype: The destination type. Returns: A tensor." 4539,is_sparse,tensorflow/tensorflow/python/keras/backend.py,839,function,"Returns whether a tensor is a sparse tensor. Arguments: tensor: A tensor instance. Returns: A boolean. Example: >>> a = tf.keras.backend.placeholder((2, 2), sparse=False) >>> print(tf.keras.backend.is_sparse(a)) False >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True" 4540,to_dense,tensorflow/tensorflow/python/keras/backend.py,867,function,"Converts a sparse tensor into a dense tensor and returns it. Arguments: tensor: A tensor instance (potentially sparse). Returns: A dense tensor. Examples: >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True >>> c = tf.keras.backend.to_dense(b) >>> print(tf.keras.backend.is_sparse(c)) False" 4541,name_scope,tensorflow/tensorflow/python/keras/backend.py,894,function,"A context manager for use when defining a Python op. This context manager pushes a name scope, which will make the name of all operations added within it have a prefix. For example, to define a new Python op called `my_op`: def my_op(a): with tf.name_scope(""MyOp"") as scope: a = tf.convert_to_tensor(a, name=""a"") # Define some computation that uses `a`. return foo_op(..., name=scope) When executed, the Tensor `a` will have the name `MyOp/a`. Args: name: The prefix to use on all names created within the name scope. Returns: Name scope context manager." 4542,variable,tensorflow/tensorflow/python/keras/backend.py,925,function,"Instantiates a variable and returns it. Arguments: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar) " 4543,track_tf_optimizer,tensorflow/tensorflow/python/keras/backend.py,974,function,Tracks the given TF optimizer for initialization of its variables. 4544,track_variable,tensorflow/tensorflow/python/keras/backend.py,982,function,Tracks the given variable for initialization. 4545,unique_object_name,tensorflow/tensorflow/python/keras/backend.py,990,function,"Makes a object name (or arbitrary string) unique within a TensorFlow graph. Arguments: name: String name to make unique. name_uid_map: An optional defaultdict(int) to use when creating unique names. If None (default), uses a per-Graph dictionary. avoid_names: An optional set or dict with names which should not be used. If None (default) does not avoid any names. namespace: Gets a name which is unique within the (graph, namespace). Layers which are not Networks use a blank namespace and so get graph-global names. zero_based: If True, name sequences start with no suffix (e.g. ""dense"", ""dense_1""). If False, naming is one-based (""dense_1"", ""dense_2""). Returns: Unique string name. Example: unique_object_name('dense') # dense_1 unique_object_name('dense') # dense_2" 4546,_get_variables,tensorflow/tensorflow/python/keras/backend.py,1039,function,Returns variables corresponding to the given graph for initialization. 4547,_initialize_variables,tensorflow/tensorflow/python/keras/backend.py,1048,function,Utility to initialize uninitialized variables on the fly. 4548,constant,tensorflow/tensorflow/python/keras/backend.py,1076,function,"Creates a constant tensor. Arguments: value: A constant value (or list) dtype: The type of the elements of the resulting tensor. shape: Optional dimensions of resulting tensor. name: Optional name for the tensor. Returns: A Constant Tensor." 4549,is_keras_tensor,tensorflow/tensorflow/python/keras/backend.py,1095,function,"Returns whether `x` is a Keras tensor. A ""Keras tensor"" is a tensor that was returned by a Keras layer, (`Layer` class) or by `Input`. Arguments: x: A candidate tensor. Returns: A boolean: Whether the argument is a Keras tensor. Raises: ValueError: In case `x` is not a symbolic tensor. Examples: >>> np_var = np.array([1, 2]) >>> # A numpy array is not a symbolic tensor. >>> tf.keras.backend.is_keras_tensor(np_var) Traceback (most recent call last): ... ValueError: Unexpectedly found an instance of type ``. Expected a symbolic tensor instance. >>> keras_var = tf.keras.backend.variable(np_var) >>> # A variable created with the keras backend is not a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_var) False >>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> # A placeholder is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_placeholder) True >>> keras_input = tf.keras.layers.Input([10]) >>> # An Input is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_input) True >>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input) >>> # Any Keras layer output is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_layer_output) True" 4550,placeholder,tensorflow/tensorflow/python/keras/backend.py,1149,function,"Instantiates a placeholder tensor and returns it. Arguments: shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. ragged: Boolean, whether the placeholder should have a ragged type. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see this [guide](https://www.tensorflow.org/guide/ragged_tensors). Raises: ValueError: If called with eager execution ValueError: If called with sparse = True and ragged = True. Returns: Tensor instance (with Keras metadata included). Examples: >>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> input_ph " 4551,is_placeholder,tensorflow/tensorflow/python/keras/backend.py,1244,function,"Returns whether `x` is a placeholder. Arguments: x: A candidate placeholder. Returns: Boolean." 4552,shape,tensorflow/tensorflow/python/keras/backend.py,1267,function,"Returns the symbolic shape of a tensor or variable. Arguments: x: A tensor or variable. Returns: A symbolic shape (which is itself a tensor). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.shape(kvar) >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.shape(input) " 4553,int_shape,tensorflow/tensorflow/python/keras/backend.py,1291,function,"Returns the shape of tensor or variable as a tuple of int or None entries. Arguments: x: Tensor or variable. Returns: A tuple of integers (or None entries). Examples: >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.int_shape(input) (2, 4, 5) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.int_shape(kvar) (2, 2)" 4554,ndim,tensorflow/tensorflow/python/keras/backend.py,1321,function,"Returns the number of axes in a tensor, as an integer. Arguments: x: Tensor or variable. Returns: Integer (scalar), number of axes. Examples: >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.ndim(input) 3 >>> tf.keras.backend.ndim(kvar) 2" 4555,dtype,tensorflow/tensorflow/python/keras/backend.py,1350,function,"Returns the dtype of a Keras tensor or variable, as a string. Arguments: x: Tensor or variable. Returns: String, dtype of `x`. Examples: >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5))) 'float32' >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5), ... dtype='float32')) 'float32' >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5), ... dtype='float64')) 'float64' >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]])) >>> tf.keras.backend.dtype(kvar) 'float32' >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.dtype(kvar) 'float32'" 4556,eval,tensorflow/tensorflow/python/keras/backend.py,1382,function,"Evaluates the value of a variable. Arguments: x: A variable. Returns: A Numpy array. Examples: >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.eval(kvar) array([[1., 2.], [3., 4.]], dtype=float32)" 4557,zeros,tensorflow/tensorflow/python/keras/backend.py,1404,function,"Instantiates an all-zeros variable and returns it. Arguments: shape: Tuple or list of integers, shape of returned Keras variable dtype: data type of returned Keras variable name: name of returned Keras variable Returns: A variable (including Keras metadata), filled with `0.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example: >>> kvar = tf.keras.backend.zeros((3,4)) >>> tf.keras.backend.eval(kvar) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]], dtype=float32) >>> A = tf.constant([1,2,3]) >>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.] >>> tf.keras.backend.eval(kvar2) array([0., 0., 0.], dtype=float32) >>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32) >>> tf.keras.backend.eval(kvar3) array([0, 0, 0], dtype=int32) >>> kvar4 = tf.keras.backend.zeros([2,3]) >>> tf.keras.backend.eval(kvar4) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32)" 4558,ones,tensorflow/tensorflow/python/keras/backend.py,1449,function,"Instantiates an all-ones variable and returns it. Arguments: shape: Tuple of integers, shape of returned Keras variable. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, filled with `1.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example: >>> kvar = tf.keras.backend.ones((3,4)) >>> tf.keras.backend.eval(kvar) array([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], dtype=float32)" 4559,eye,tensorflow/tensorflow/python/keras/backend.py,1484,function,"Instantiate an identity matrix and returns it. Arguments: size: Integer, number of rows/columns. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, an identity matrix. Example: >>> kvar = tf.keras.backend.eye(3) >>> tf.keras.backend.eval(kvar) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32)" 4560,zeros_like,tensorflow/tensorflow/python/keras/backend.py,1513,function,"Instantiates an all-zeros variable of the same shape as another tensor. Arguments: x: Keras variable or Keras tensor. dtype: dtype of returned Keras variable. `None` uses the dtype of `x`. name: name for the variable to create. Returns: A Keras variable with the shape of `x` filled with zeros. Example: from tensorflow.keras import backend as K kvar = K.variable(np.random.random((2,3))) kvar_zeros = K.zeros_like(kvar) K.eval(kvar_zeros) # array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)" 4561,ones_like,tensorflow/tensorflow/python/keras/backend.py,1541,function,"Instantiates an all-ones variable of the same shape as another tensor. Arguments: x: Keras variable or tensor. dtype: String, dtype of returned Keras variable. None uses the dtype of x. name: String, name for the variable to create. Returns: A Keras variable with the shape of x filled with ones. Example: >>> kvar = tf.keras.backend.variable(np.random.random((2,3))) >>> kvar_ones = tf.keras.backend.ones_like(kvar) >>> tf.keras.backend.eval(kvar_ones) array([[1., 1., 1.], [1., 1., 1.]], dtype=float32)" 4562,identity,tensorflow/tensorflow/python/keras/backend.py,1565,function,"Returns a tensor with the same content as the input tensor. Arguments: x: The input tensor. name: String, name for the variable to create. Returns: A tensor of the same shape, type and content." 4563,random_uniform_variable,tensorflow/tensorflow/python/keras/backend.py,1579,function,"Instantiates a variable with values drawn from a uniform distribution. Arguments: shape: Tuple of integers, shape of returned Keras variable. low: Float, lower boundary of the output interval. high: Float, upper boundary of the output interval. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3), ... low=0.0, high=1.0) >>> kvar " 4564,random_normal_variable,tensorflow/tensorflow/python/keras/backend.py,1613,function,"Instantiates a variable with values drawn from a normal distribution. Arguments: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3), ... mean=0.0, scale=1.0) >>> kvar " 4565,count_params,tensorflow/tensorflow/python/keras/backend.py,1648,function,"Returns the static number of elements in a variable or tensor. Arguments: x: Variable or tensor. Returns: Integer, the number of scalars in `x`. Example: >>> kvar = tf.keras.backend.zeros((2,3)) >>> tf.keras.backend.count_params(kvar) 6 >>> tf.keras.backend.eval(kvar) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32)" 4566,cast,tensorflow/tensorflow/python/keras/backend.py,1672,function,"Casts a tensor to a different dtype and returns it. You can cast a Keras variable but it still returns a Keras tensor. Arguments: x: Keras tensor (or variable). dtype: String, either (`'float16'`, `'float32'`, or `'float64'`). Returns: Keras tensor with dtype `dtype`. Examples: Cast a float32 variable to a float64 tensor >>> input = tf.keras.backend.ones(shape=(1,3)) >>> print(input) >>> cast_input = tf.keras.backend.cast(input, dtype='float64') >>> print(cast_input) tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)" 4567,update,tensorflow/tensorflow/python/keras/backend.py,1703,function, 4568,update_add,tensorflow/tensorflow/python/keras/backend.py,1708,function,"Update the value of `x` by adding `increment`. Arguments: x: A Variable. increment: A tensor of same shape as `x`. Returns: The variable `x` updated." 4569,update_sub,tensorflow/tensorflow/python/keras/backend.py,1722,function,"Update the value of `x` by subtracting `decrement`. Arguments: x: A Variable. decrement: A tensor of same shape as `x`. Returns: The variable `x` updated." 4570,moving_average_update,tensorflow/tensorflow/python/keras/backend.py,1736,function,"Compute the exponential moving average of a value. The moving average 'x' is updated with 'value' following: ``` x = x * momentum + value * (1 - momentum) ``` For example: >>> x = tf.Variable(0.0) >>> momentum=0.9 >>> moving_average_update(x, value = 2.0, momentum=momentum).numpy() >>> x.numpy() 0.2 The result will be biased towards the initial value of the variable. If the variable was initialized to zero, you can divide by `1 - momentum ** num_updates` to debias it (Section 3 of [Kingma et al., 2015](https://arxiv.org/abs/1412.6980)): >>> num_updates = 1.0 >>> x_zdb = x/(1 - momentum**num_updates) >>> x_zdb.numpy() 2.0 Arguments: x: A Variable, the moving average. value: A tensor with the same shape as `x`, the new value to be averaged in. momentum: The moving average momentum. Returns: The updated variable." 4571,dot,tensorflow/tensorflow/python/keras/backend.py,1783,function,"Multiplies 2 tensors (and/or variables) and returns a tensor. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A tensor, dot product of `x` and `y`. Examples: >>> x = tf.keras.backend.placeholder(shape=(2, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy >>> x = tf.keras.backend.placeholder(shape=(32, 28, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy >>> x = tf.keras.backend.random_uniform_variable(shape=(2, 3), low=0, high=1) >>> y = tf.keras.backend.ones((4, 3, 5)) >>> xy = tf.keras.backend.dot(x, y) >>> tf.keras.backend.int_shape(xy) (2, 4, 5)" 4572,batch_dot,tensorflow/tensorflow/python/keras/backend.py,1844,function,"Batchwise dot product. `batch_dot` is used to compute dot product of `x` and `y` when `x` and `y` are data in batch, i.e. in a shape of `(batch_size, :)`. `batch_dot` results in a tensor or variable with less dimensions than the input. If the number of dimensions is reduced to 1, we use `expand_dims` to make sure that ndim is at least 2. Arguments: x: Keras tensor or variable with `ndim >= 2`. y: Keras tensor or variable with `ndim >= 2`. axes: Tuple or list of integers with target dimensions, or single integer. The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal. Returns: A tensor with shape equal to the concatenation of `x`'s shape (less the dimension that was summed over) and `y`'s shape (less the batch dimension and the dimension that was summed over). If the final rank is 1, we reshape it to `(batch_size, 1)`. Examples: >>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1)) >>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20)) >>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2)) >>> tf.keras.backend.int_shape(xy_batch_dot) (32, 1, 30) Shape inference: Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`. If `axes` is (1, 2), to find the output shape of resultant tensor, loop through each dimension in `x`'s shape and `y`'s shape: * `x.shape[0]` : 100 : append to output shape * `x.shape[1]` : 20 : do not append to output shape, dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1) * `y.shape[0]` : 100 : do not append to output shape, always ignore first dimension of `y` * `y.shape[1]` : 30 : append to output shape * `y.shape[2]` : 20 : do not append to output shape, dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2) `output_shape` = `(100, 30)`" 4573,transpose,tensorflow/tensorflow/python/keras/backend.py,2033,function,"Transposes a tensor and returns it. Arguments: x: Tensor or variable. Returns: A tensor. Examples: >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_transposed = tf.keras.backend.transpose(var) >>> tf.keras.backend.eval(var_transposed) array([[1., 4.], [2., 5.], [3., 6.]], dtype=float32) >>> input = tf.keras.backend.placeholder((2, 3)) >>> input >>> input_transposed = tf.keras.backend.transpose(input) >>> input_transposed " 4574,gather,tensorflow/tensorflow/python/keras/backend.py,2065,function,"Retrieves the elements of indices `indices` in the tensor `reference`. Arguments: reference: A tensor. indices: An integer tensor of indices. Returns: A tensor of same type as `reference`. Examples: >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [1]) >>> tf.keras.backend.eval(var_gathered) array([[4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0,1,0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.], [4., 5., 6.], [1., 2., 3.]], dtype=float32)" 4575,max,tensorflow/tensorflow/python/keras/backend.py,2101,function,"Maximum value in a tensor. Arguments: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with maximum values of `x`." 4576,min,tensorflow/tensorflow/python/keras/backend.py,2120,function,"Minimum value in a tensor. Arguments: x: A tensor or variable. axis: An integer, the axis to find minimum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with minimum values of `x`." 4577,sum,tensorflow/tensorflow/python/keras/backend.py,2139,function,"Sum of the values in a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with sum of `x`." 4578,prod,tensorflow/tensorflow/python/keras/backend.py,2158,function,"Multiplies the values in a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the product. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the product of elements of `x`." 4579,cumsum,tensorflow/tensorflow/python/keras/backend.py,2177,function,"Cumulative sum of the values in a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the sum. Returns: A tensor of the cumulative sum of values of `x` along `axis`." 4580,cumprod,tensorflow/tensorflow/python/keras/backend.py,2192,function,"Cumulative product of the values in a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the product. Returns: A tensor of the cumulative product of values of `x` along `axis`." 4581,var,tensorflow/tensorflow/python/keras/backend.py,2206,function,"Variance of a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the variance of elements of `x`." 4582,std,tensorflow/tensorflow/python/keras/backend.py,2227,function,"Standard deviation of a tensor, alongside the specified axis. It is an alias to `tf.math.reduce_std`. Arguments: x: A tensor or variable. It should have numerical dtypes. Boolean type inputs will be converted to float. axis: An integer, the axis to compute the standard deviation. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(x), rank(x))`. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the standard deviation of elements of `x` with same dtype. Boolean type input will be converted to float." 4583,mean,tensorflow/tensorflow/python/keras/backend.py,2254,function,"Mean of a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is `True`, the reduced dimensions are retained with length 1. Returns: A tensor with the mean of elements of `x`." 4584,any,tensorflow/tensorflow/python/keras/backend.py,2275,function,"Bitwise reduction (logical OR). Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s)." 4585,all,tensorflow/tensorflow/python/keras/backend.py,2292,function,"Bitwise reduction (logical AND). Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s)." 4586,argmax,tensorflow/tensorflow/python/keras/backend.py,2309,function,"Returns the index of the maximum value along an axis. Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor." 4587,argmin,tensorflow/tensorflow/python/keras/backend.py,2324,function,"Returns the index of the minimum value along an axis. Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor." 4588,square,tensorflow/tensorflow/python/keras/backend.py,2339,function,"Element-wise square. Arguments: x: Tensor or variable. Returns: A tensor." 4589,abs,tensorflow/tensorflow/python/keras/backend.py,2353,function,"Element-wise absolute value. Arguments: x: Tensor or variable. Returns: A tensor." 4590,sqrt,tensorflow/tensorflow/python/keras/backend.py,2367,function,"Element-wise square root. Arguments: x: Tensor or variable. Returns: A tensor." 4591,exp,tensorflow/tensorflow/python/keras/backend.py,2384,function,"Element-wise exponential. Arguments: x: Tensor or variable. Returns: A tensor." 4592,log,tensorflow/tensorflow/python/keras/backend.py,2398,function,"Element-wise log. Arguments: x: Tensor or variable. Returns: A tensor." 4593,logsumexp,tensorflow/tensorflow/python/keras/backend.py,2410,function,"Computes log(sum(exp(elements across dimensions of a tensor))). This function is more numerically stable than log(sum(exp(x))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. Arguments: x: A tensor or variable. axis: An integer, the axis to reduce over. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: The reduced tensor." 4594,round,tensorflow/tensorflow/python/keras/backend.py,2433,function,"Element-wise rounding to the closest integer. In case of tie, the rounding mode used is ""half to even"". Arguments: x: Tensor or variable. Returns: A tensor." 4595,sign,tensorflow/tensorflow/python/keras/backend.py,2449,function,"Element-wise sign. Arguments: x: Tensor or variable. Returns: A tensor." 4596,pow,tensorflow/tensorflow/python/keras/backend.py,2463,function,"Element-wise exponentiation. Arguments: x: Tensor or variable. a: Python integer. Returns: A tensor." 4597,clip,tensorflow/tensorflow/python/keras/backend.py,2478,function,"Element-wise value clipping. Arguments: x: Tensor or variable. min_value: Python float, integer, or tensor. max_value: Python float, integer, or tensor. Returns: A tensor." 4598,equal,tensorflow/tensorflow/python/keras/backend.py,2502,function,"Element-wise equality between two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor." 4599,not_equal,tensorflow/tensorflow/python/keras/backend.py,2517,function,"Element-wise inequality between two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor." 4600,greater,tensorflow/tensorflow/python/keras/backend.py,2532,function,"Element-wise truth value of (x > y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor." 4601,greater_equal,tensorflow/tensorflow/python/keras/backend.py,2547,function,"Element-wise truth value of (x >= y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor." 4602,less,tensorflow/tensorflow/python/keras/backend.py,2562,function,"Element-wise truth value of (x < y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor." 4603,less_equal,tensorflow/tensorflow/python/keras/backend.py,2577,function,"Element-wise truth value of (x <= y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor." 4604,maximum,tensorflow/tensorflow/python/keras/backend.py,2592,function,"Element-wise maximum of two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A tensor with the element wise maximum value(s) of `x` and `y`. Examples: >>> x = tf.Variable([[1, 2], [3, 4]]) >>> y = tf.Variable([[2, 1], [0, -1]]) >>> m = tf.keras.backend.maximum(x, y) >>> m " 4605,minimum,tensorflow/tensorflow/python/keras/backend.py,2617,function,"Element-wise minimum of two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A tensor." 4606,sin,tensorflow/tensorflow/python/keras/backend.py,2632,function,"Computes sin of x element-wise. Arguments: x: Tensor or variable. Returns: A tensor." 4607,cos,tensorflow/tensorflow/python/keras/backend.py,2646,function,"Computes cos of x element-wise. Arguments: x: Tensor or variable. Returns: A tensor." 4608,_regular_normalize_batch_in_training,tensorflow/tensorflow/python/keras/backend.py,2658,function,"Non-fused version of `normalize_batch_in_training`. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`." 4609,_broadcast_normalize_batch_in_training,tensorflow/tensorflow/python/keras/backend.py,2681,function,"Non-fused, broadcast version of `normalize_batch_in_training`. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`." 4610,_fused_normalize_batch_in_training,tensorflow/tensorflow/python/keras/backend.py,2724,function,"Fused version of `normalize_batch_in_training`. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`." 4611,normalize_batch_in_training,tensorflow/tensorflow/python/keras/backend.py,2761,function,"Computes mean and std for batch then apply batch_normalization on batch. Arguments: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`." 4612,batch_normalization,tensorflow/tensorflow/python/keras/backend.py,2792,function,"Applies batch normalization on x given mean, var, beta and gamma. I.e. returns: `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` Arguments: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. beta: Tensor with which to center the input. gamma: Tensor by which to scale the input. axis: Integer, the axis that should be normalized. (typically the features axis). epsilon: Fuzz factor. Returns: A tensor." 4613,concatenate,tensorflow/tensorflow/python/keras/backend.py,2855,function,"Concatenates a list of tensors alongside the specified axis. Arguments: tensors: list of tensors to concatenate. axis: concatenation axis. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]]) >>> tf.keras.backend.concatenate((a, b), axis=-1) " 4614,reshape,tensorflow/tensorflow/python/keras/backend.py,2893,function,"Reshapes a tensor to the specified shape. Arguments: x: Tensor or variable. shape: Target shape tuple. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a >>> tf.keras.backend.reshape(a, shape=(2, 6)) " 4615,permute_dimensions,tensorflow/tensorflow/python/keras/backend.py,2923,function,"Permutes axes in a tensor. Arguments: x: Tensor or variable. pattern: A tuple of dimension indices, e.g. `(0, 2, 1)`. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0)) " 4616,resize_images,tensorflow/tensorflow/python/keras/backend.py,2955,function,"Resizes the images contained in a 4D tensor. Arguments: x: Tensor or variable to resize. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `""channels_first""`, `""channels_last""`. interpolation: A string, one of `nearest` or `bilinear`. Returns: A tensor. Raises: ValueError: in case of incorrect value for `data_format` or `interpolation`." 4617,resize_volumes,tensorflow/tensorflow/python/keras/backend.py,3019,function,"Resizes the volume contained in a 5D tensor. Arguments: x: Tensor or variable to resize. depth_factor: Positive integer. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `""channels_first""`, `""channels_last""`. Returns: A tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4618,repeat_elements,tensorflow/tensorflow/python/keras/backend.py,3052,function,"Repeats the elements of a tensor along an axis, like `np.repeat`. If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output will have shape `(s1, s2 * rep, s3)`. Arguments: x: Tensor or variable. rep: Python integer, number of times to repeat. axis: Axis along which to repeat. Returns: A tensor. Example: >>> b = tf.constant([1, 2, 3]) >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0) " 4619,repeat,tensorflow/tensorflow/python/keras/backend.py,3114,function,"Repeats a 2D tensor. if `x` has shape (samples, dim) and `n` is `2`, the output will have shape `(samples, 2, dim)`. Arguments: x: Tensor or variable. n: Python integer, number of times to repeat. Returns: A tensor. Example: >>> b = tf.constant([[1, 2], [3, 4]]) >>> b >>> tf.keras.backend.repeat(b, n=2) " 4620,arange,tensorflow/tensorflow/python/keras/backend.py,3150,function,"Creates a 1D tensor containing a sequence of integers. The function arguments use the same convention as Theano's arange: if only one argument is provided, it is in fact the ""stop"" argument and ""start"" is 0. The default type of the returned tensor is `'int32'` to match TensorFlow's default. Arguments: start: Start value. stop: Stop value. step: Difference between two successive values. dtype: Integer dtype to use. Returns: An integer tensor. Example: >>> tf.keras.backend.arange(start=0, stop=10, step=1.5) " 4621,tile,tensorflow/tensorflow/python/keras/backend.py,3189,function,"Creates a tensor by tiling `x` by `n`. Arguments: x: A tensor or variable n: A list of integer. The length must be the same as the number of dimensions in `x`. Returns: A tiled tensor." 4622,flatten,tensorflow/tensorflow/python/keras/backend.py,3207,function,"Flatten a tensor. Arguments: x: A tensor or variable. Returns: A tensor, reshaped into 1-D Example: >>> b = tf.constant([[1, 2], [3, 4]]) >>> b >>> tf.keras.backend.flatten(b) " 4623,batch_flatten,tensorflow/tensorflow/python/keras/backend.py,3233,function,"Turn a nD tensor into a 2D tensor with same 0th dimension. In other words, it flattens each data samples of a batch. Arguments: x: A tensor or variable. Returns: A tensor. Examples: Flattening a 3D tensor to 2D by collapsing the last dimension. >>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5)) >>> x_batch_flatten = batch_flatten(x_batch) >>> tf.keras.backend.int_shape(x_batch_flatten) (2, 60)" 4624,expand_dims,tensorflow/tensorflow/python/keras/backend.py,3259,function,"Adds a 1-sized dimension at index ""axis"". Arguments: x: A tensor or variable. axis: Position where to add a new axis. Returns: A tensor with expanded dimensions." 4625,squeeze,tensorflow/tensorflow/python/keras/backend.py,3274,function,"Removes a 1-dimension from the tensor at index ""axis"". Arguments: x: A tensor or variable. axis: Axis to drop. Returns: A tensor with the same data as `x` but reduced dimensions." 4626,temporal_padding,tensorflow/tensorflow/python/keras/backend.py,3289,function,"Pads the middle dimension of a 3D tensor. Arguments: x: Tensor or variable. padding: Tuple of 2 integers, how many zeros to add at the start and end of dim 1. Returns: A padded 3D tensor." 4627,spatial_2d_padding,tensorflow/tensorflow/python/keras/backend.py,3307,function,"Pads the 2nd and 3rd dimensions of a 4D tensor. Arguments: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 4D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4628,spatial_3d_padding,tensorflow/tensorflow/python/keras/backend.py,3339,function,"Pads 5D tensor with zeros along the depth, height, width dimensions. Pads these dimensions with respectively ""padding[0]"", ""padding[1]"" and ""padding[2]"" zeros left and right. For 'channels_last' data_format, the 2nd, 3rd and 4th dimension will be padded. For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded. Arguments: x: Tensor or variable. padding: Tuple of 3 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 5D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4629,stack,tensorflow/tensorflow/python/keras/backend.py,3384,function,"Stacks a list of rank `R` tensors into a rank `R+1` tensor. Arguments: x: List of tensors. axis: Axis along which to perform stacking. Returns: A tensor. Example: >>> a = tf.constant([[1, 2],[3, 4]]) >>> b = tf.constant([[10, 20],[30, 40]]) >>> tf.keras.backend.stack((a, b)) " 4630,one_hot,tensorflow/tensorflow/python/keras/backend.py,3411,function,"Computes the one-hot representation of an integer tensor. Arguments: indices: nD integer tensor of shape `(batch_size, dim1, dim2, ... dim(n-1))` num_classes: Integer, number of classes to consider. Returns: (n + 1)D one hot representation of the input with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)` Returns: The one-hot tensor." 4631,reverse,tensorflow/tensorflow/python/keras/backend.py,3431,function,"Reverse a tensor along the specified axes. Arguments: x: Tensor to reverse. axes: Integer or iterable of integers. Axes to reverse. Returns: A tensor." 4632,get_value,tensorflow/tensorflow/python/keras/backend.py,3477,function,"Returns the value of a variable. `backend.get_value` is the compliment of `backend.set_value`, and provides a generic interface for reading from variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Arguments: x: input variable. Returns: A Numpy array." 4633,batch_get_value,tensorflow/tensorflow/python/keras/backend.py,3512,function,"Returns the value of more than one tensor variable. Arguments: tensors: list of ops to run. Returns: A list of Numpy arrays. Raises: RuntimeError: If this method is called inside defun." 4634,set_value,tensorflow/tensorflow/python/keras/backend.py,3535,function,"Sets the value of a variable, from a Numpy array. `backend.set_value` is the compliment of `backend.get_value`, and provides a generic interface for assigning to variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Arguments: x: Variable to set to a new value. value: Value to set the tensor to, as a Numpy array (of the same shape)." 4635,batch_set_value,tensorflow/tensorflow/python/keras/backend.py,3574,function,"Sets the values of many tensor variables at once. Arguments: tuples: a list of tuples `(tensor, value)`. `value` should be a Numpy array." 4636,print_tensor,tensorflow/tensorflow/python/keras/backend.py,3617,function,"Prints `message` and the tensor value when evaluated. Note that `print_tensor` returns a new tensor identical to `x` which should be used in the following code. Otherwise the print operation is not taken into account during evaluation. Example: >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> tf.keras.backend.print_tensor(x) Arguments: x: Tensor to print. message: Message to print jointly with the tensor. Returns: The same tensor `x`, unchanged." 4637,GraphExecutionFunction,tensorflow/tensorflow/python/keras/backend.py,3651,class,"Runs a computation graph. It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`. In particular additional operations via `fetches` argument and additional tensor substitutions via `feed_dict` arguments. Note that given substitutions are merged with substitutions from `inputs`. Even though `feed_dict` is passed once in the constructor (called in `model.compile()`) we can modify the values in the dictionary. Through this feed_dict we can provide additional substitutions besides Keras inputs. Arguments: inputs: Feed placeholders to the computation graph. outputs: Output tensors to fetch. updates: Additional update ops to be run at function call. name: A name to help users identify what this function does. session_kwargs: Arguments to `tf.Session.run()`: `fetches`, `feed_dict`, `options`, `run_metadata`." 4638,eval_in_eager_or_function,tensorflow/tensorflow/python/keras/backend.py,3847,function,"Method to evaluate a tensor in eager or in a tf.function. In the case of a tf.function, it will lift the tensor out of the function and try to evaluate that piece of the graph. Warning: Do not add new usages of this function. TODO(b/150169018): delete this function once _keras_history_helper is no longer needed, after Keras switches to KerasTensors and op layers work via dispatch. Arguments: outputs: tensors to fetch. Returns: The value of the tensors (as numpy arrays)." 4639,function,tensorflow/tensorflow/python/keras/backend.py,3918,function,"Instantiates a Keras function. Arguments: inputs: List of placeholder tensors. outputs: List of output tensors. updates: List of update ops. name: String, name of function. **kwargs: Passed to `tf.Session.run`. Returns: Output values as Numpy arrays. Raises: ValueError: if invalid kwargs are passed in or if in eager execution." 4640,gradients,tensorflow/tensorflow/python/keras/backend.py,3965,function,"Returns the gradients of `loss` w.r.t. `variables`. Arguments: loss: Scalar tensor to minimize. variables: List of variables. Returns: A gradients tensor." 4641,stop_gradient,tensorflow/tensorflow/python/keras/backend.py,3981,function,"Returns `variables` but with zero gradient w.r.t. every other variable. Arguments: variables: Tensor or list of tensors to consider constant with respect to any other variable. Returns: A single tensor or a list of tensors (depending on the passed argument) that has no gradient with respect to any other variable." 4642,rnn,tensorflow/tensorflow/python/keras/backend.py,4003,function,"Iterates over the time dimension of a tensor. Arguments: step_function: RNN step function. Args; input; Tensor with shape `(samples, ...)` (no time dimension), representing input for the batch of samples at a certain time step. states; List of tensors. Returns; output; Tensor with shape `(samples, output_dim)` (no time dimension). new_states; List of tensors, same length and shapes as 'states'. The first state in the list must be the output tensor at the previous timestep. inputs: Tensor of temporal data of shape `(samples, time, ...)` (at least 3D), or nested tensors, and each of which has shape `(samples, time, ...)`. initial_states: Tensor with shape `(samples, state_size)` (no time dimension), containing the initial values for the states used in the step function. In the case that state_size is in a nested shape, the shape of initial_states will also follow the nested structure. go_backwards: Boolean. If True, do the iteration over the time dimension in reverse order and return the reversed sequence. mask: Binary tensor with shape `(samples, time, 1)`, with a zero for every element that is masked. constants: List of constant values passed at each step. unroll: Whether to unroll the RNN or to use a symbolic `while_loop`. input_length: An integer or a 1-D Tensor, depending on whether the time dimension is fixed-length or not. In case of variable length input, it is used for masking in case there's no mask specified. time_major: Boolean. If true, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. zero_output_for_mask: Boolean. If True, the output for masked timestep will be zeros, whereas in the False case, output from previous timestep is returned. Returns: A tuple, `(last_output, outputs, new_states)`. last_output: the latest output of the rnn, of shape `(samples, ...)` outputs: tensor with shape `(samples, time, ...)` where each entry `outputs[s, t]` is the output of the step function at time `t` for sample `s`. new_states: list of tensors, latest states returned by the step function, of shape `(samples, ...)`. Raises: ValueError: if input dimension is less than 3. ValueError: if `unroll` is `True` but input timestep is not a fixed number. ValueError: if `mask` is provided (not `None`) but states is not provided (`len(states)` == 0)." 4643,switch,tensorflow/tensorflow/python/keras/backend.py,4398,function,"Switches between two operations depending on a scalar value. Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. Arguments: condition: tensor (`int` or `bool`). then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. Returns: The selected tensor. Raises: ValueError: If rank of `condition` is greater than rank of expressions." 4644,in_train_phase,tensorflow/tensorflow/python/keras/backend.py,4462,function,"Selects `x` in train phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Arguments: x: What to return in train phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on the `training` flag. the `training` flag defaults to `K.learning_phase()`." 4645,in_test_phase,tensorflow/tensorflow/python/keras/backend.py,4507,function,"Selects `x` in test phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Arguments: x: What to return in test phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on `K.learning_phase`." 4646,relu,tensorflow/tensorflow/python/keras/backend.py,4532,function,"Rectified linear unit. With default values, it returns element-wise `max(x, 0)`. Otherwise, it follows: `f(x) = max_value` for `x >= max_value`, `f(x) = x` for `threshold <= x < max_value`, `f(x) = alpha * (x - threshold)` otherwise. Arguments: x: A tensor or variable. alpha: A scalar, slope of negative section (default=`0.`). max_value: float. Saturation threshold. threshold: float. Threshold value for thresholded activation. Returns: A tensor." 4647,elu,tensorflow/tensorflow/python/keras/backend.py,4589,function,"Exponential linear unit. Arguments: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of negative section. Returns: A tensor." 4648,softmax,tensorflow/tensorflow/python/keras/backend.py,4608,function,"Softmax of a tensor. Arguments: x: A tensor or variable. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. Returns: A tensor." 4649,softplus,tensorflow/tensorflow/python/keras/backend.py,4624,function,"Softplus of a tensor. Arguments: x: A tensor or variable. Returns: A tensor." 4650,softsign,tensorflow/tensorflow/python/keras/backend.py,4638,function,"Softsign of a tensor. Arguments: x: A tensor or variable. Returns: A tensor." 4651,categorical_crossentropy,tensorflow/tensorflow/python/keras/backend.py,4652,function,"Categorical crossentropy between an output tensor and a target tensor. Arguments: target: A tensor of the same shape as `output`. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. axis: Int specifying the channels axis. `axis=-1` corresponds to data format `channels_last', and `axis=1` corresponds to data format `channels_first`. Returns: Output tensor. Raises: ValueError: if `axis` is neither -1 nor one of the axes of `output`. Example: >>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3]) >>> print(a) tf.Tensor( [[1. 0. 0.] [0. 1. 0.] [0. 0. 1.]], shape=(3, 3), dtype=float32) >>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94], shape=[3,3]) >>> print(b) tf.Tensor( [[0.9 0.05 0.05] [0.05 0.89 0.06] [0.05 0.01 0.94]], shape=(3, 3), dtype=float32) >>> loss = tf.keras.backend.categorical_crossentropy(a, b) >>> print(np.around(loss, 5)) [0.10536 0.11653 0.06188] >>> loss = tf.keras.backend.categorical_crossentropy(a, a) >>> print(np.around(loss, 5)) [0. 0. 0.]" 4652,sparse_categorical_crossentropy,tensorflow/tensorflow/python/keras/backend.py,4723,function,"Categorical crossentropy with integer targets. Arguments: target: An integer tensor. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. axis: Int specifying the channels axis. `axis=-1` corresponds to data format `channels_last', and `axis=1` corresponds to data format `channels_first`. Returns: Output tensor. Raises: ValueError: if `axis` is neither -1 nor one of the axes of `output`." 4653,binary_crossentropy,tensorflow/tensorflow/python/keras/backend.py,4807,function,"Binary crossentropy between an output tensor and a target tensor. Arguments: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor." 4654,sigmoid,tensorflow/tensorflow/python/keras/backend.py,4846,function,"Element-wise sigmoid. Arguments: x: A tensor or variable. Returns: A tensor." 4655,hard_sigmoid,tensorflow/tensorflow/python/keras/backend.py,4860,function,"Segment-wise linear approximation of sigmoid. Faster than sigmoid. Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`. In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`. Arguments: x: A tensor or variable. Returns: A tensor." 4656,tanh,tensorflow/tensorflow/python/keras/backend.py,4883,function,"Element-wise tanh. Arguments: x: A tensor or variable. Returns: A tensor." 4657,dropout,tensorflow/tensorflow/python/keras/backend.py,4897,function,"Sets entries in `x` to zero at random, while scaling the entire tensor. Arguments: x: tensor level: fraction of the entries in the tensor that will be set to 0. noise_shape: shape for randomly generated keep/drop flags, must be broadcastable to the shape of `x` seed: random seed to ensure determinism. Returns: A tensor." 4658,l2_normalize,tensorflow/tensorflow/python/keras/backend.py,4918,function,"Normalizes a tensor wrt the L2 norm alongside the specified axis. Arguments: x: Tensor or variable. axis: axis along which to perform normalization. Returns: A tensor." 4659,in_top_k,tensorflow/tensorflow/python/keras/backend.py,4933,function,"Returns whether the `targets` are in the top `k` `predictions`. Arguments: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`." 4660,_preprocess_conv1d_input,tensorflow/tensorflow/python/keras/backend.py,4952,function,"Transpose and cast the input before the conv1d. Arguments: x: input tensor. data_format: string, `""channels_last""` or `""channels_first""`. Returns: A tensor." 4661,_preprocess_conv2d_input,tensorflow/tensorflow/python/keras/backend.py,4971,function,"Transpose and cast the input before the conv2d. Arguments: x: input tensor. data_format: string, `""channels_last""` or `""channels_first""`. force_transpose: Boolean. If True, the input will always be transposed from NCHW to NHWC if `data_format` is `""channels_first""`. If False, the transposition only occurs on CPU (GPU ops are assumed to support NCHW). Returns: A tensor." 4662,_preprocess_conv3d_input,tensorflow/tensorflow/python/keras/backend.py,4994,function,"Transpose and cast the input before the conv3d. Arguments: x: input tensor. data_format: string, `""channels_last""` or `""channels_first""`. Returns: A tensor." 4663,_preprocess_padding,tensorflow/tensorflow/python/keras/backend.py,5013,function,"Convert keras' padding to TensorFlow's padding. Arguments: padding: string, one of 'same' , 'valid' Returns: a string, one of 'SAME', 'VALID'. Raises: ValueError: if invalid `padding'`" 4664,conv1d,tensorflow/tensorflow/python/keras/backend.py,5036,function,"1D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, `""same""`, `""causal""` or `""valid""`. data_format: string, one of ""channels_last"", ""channels_first"". dilation_rate: integer dilate rate. Returns: A tensor, result of 1D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4665,conv2d,tensorflow/tensorflow/python/keras/backend.py,5087,function,"2D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `""same""` or `""valid""`. data_format: `""channels_last""` or `""channels_first""`. dilation_rate: tuple of 2 integers. Returns: A tensor, result of 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4666,conv2d_transpose,tensorflow/tensorflow/python/keras/backend.py,5131,function,"2D deconvolution (i.e. transposed convolution). Arguments: x: Tensor or variable. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, `""same""` or `""valid""`. data_format: string, `""channels_last""` or `""channels_first""`. dilation_rate: Tuple of 2 integers. Returns: A tensor, result of transposed 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4667,separable_conv1d,tensorflow/tensorflow/python/keras/backend.py,5203,function,"1D convolution with separable filters. Arguments: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: stride integer. padding: string, `""same""` or `""valid""`. data_format: string, `""channels_last""` or `""channels_first""`. dilation_rate: integer dilation rate. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4668,separable_conv2d,tensorflow/tensorflow/python/keras/backend.py,5272,function,"2D convolution with separable filters. Arguments: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: strides tuple (length 2). padding: string, `""same""` or `""valid""`. data_format: string, `""channels_last""` or `""channels_first""`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. ValueError: if `strides` is not a tuple of 2 integers." 4669,depthwise_conv2d,tensorflow/tensorflow/python/keras/backend.py,5330,function,"2D convolution with separable filters. Arguments: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. strides: strides tuple (length 2). padding: string, `""same""` or `""valid""`. data_format: string, `""channels_last""` or `""channels_first""`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4670,conv3d,tensorflow/tensorflow/python/keras/backend.py,5380,function,"3D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `""same""` or `""valid""`. data_format: string, `""channels_last""` or `""channels_first""`. dilation_rate: tuple of 3 integers. Returns: A tensor, result of 3D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4671,conv3d_transpose,tensorflow/tensorflow/python/keras/backend.py,5422,function,"3D deconvolution (i.e. transposed convolution). Arguments: x: input tensor. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, ""same"" or ""valid"". data_format: string, `""channels_last""` or `""channels_first""`. Returns: A tensor, result of transposed 3D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`." 4672,pool2d,tensorflow/tensorflow/python/keras/backend.py,5483,function,"2D Pooling. Arguments: x: Tensor or variable. pool_size: tuple of 2 integers. strides: tuple of 2 integers. padding: string, `""same""` or `""valid""`. data_format: string, `""channels_last""` or `""channels_first""`. pool_mode: string, `""max""` or `""avg""`. Returns: A tensor, result of 2D pooling. Raises: ValueError: if `data_format` is neither `""channels_last""` or `""channels_first""`. ValueError: if `pool_size` is not a tuple of 2 integers. ValueError: if `strides` is not a tuple of 2 integers. ValueError: if `pool_mode` is neither `""max""` or `""avg""`." 4673,pool3d,tensorflow/tensorflow/python/keras/backend.py,5543,function,"3D Pooling. Arguments: x: Tensor or variable. pool_size: tuple of 3 integers. strides: tuple of 3 integers. padding: string, `""same""` or `""valid""`. data_format: string, `""channels_last""` or `""channels_first""`. pool_mode: string, `""max""` or `""avg""`. Returns: A tensor, result of 3D pooling. Raises: ValueError: if `data_format` is neither `""channels_last""` or `""channels_first""`. ValueError: if `pool_mode` is neither `""max""` or `""avg""`." 4674,local_conv,tensorflow/tensorflow/python/keras/backend.py,5595,function,"Apply N-D convolution with un-shared weights. Arguments: inputs: (N+2)-D tensor with shape (batch_size, channels_in, d_in1, ..., d_inN) if data_format='channels_first', or (batch_size, d_in1, ..., d_inN, channels_in) if data_format='channels_last'. kernel: the unshared weight for N-D convolution, with shape (output_items, feature_dim, channels_out), where feature_dim = np.prod(kernel_size) * channels_in, output_items = np.prod(output_shape). kernel_size: a tuple of N integers, specifying the spatial dimensions of the N-D convolution window. strides: a tuple of N integers, specifying the strides of the convolution along the spatial dimensions. output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial dimensionality of the output. data_format: string, ""channels_first"" or ""channels_last"". Returns: An (N+2)-D tensor with shape: (batch_size, channels_out) + output_shape if data_format='channels_first', or: (batch_size,) + output_shape + (channels_out,) if data_format='channels_last'. Raises: ValueError: if `data_format` is neither `channels_last` nor `channels_first`." 4675,local_conv1d,tensorflow/tensorflow/python/keras/backend.py,5674,function,"Apply 1D conv with un-shared weights. Arguments: inputs: 3D tensor with shape: (batch_size, steps, input_dim) if data_format is ""channels_last"" or (batch_size, input_dim, steps) if data_format is ""channels_first"". kernel: the unshared weight for convolution, with shape (output_length, feature_dim, filters). kernel_size: a tuple of a single integer, specifying the length of the 1D convolution window. strides: a tuple of a single integer, specifying the stride length of the convolution. data_format: the data format, channels_first or channels_last. Returns: A 3d tensor with shape: (batch_size, output_length, filters) if data_format='channels_first' or 3D tensor with shape: (batch_size, filters, output_length) if data_format='channels_last'." 4676,local_conv2d,tensorflow/tensorflow/python/keras/backend.py,5710,function,"Apply 2D conv with un-shared weights. Arguments: inputs: 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. kernel: the unshared weight for convolution, with shape (output_items, feature_dim, filters). kernel_size: a tuple of 2 integers, specifying the width and height of the 2D convolution window. strides: a tuple of 2 integers, specifying the strides of the convolution along the width and height. output_shape: a tuple with (output_row, output_col). data_format: the data format, channels_first or channels_last. Returns: A 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'." 4677,bias_add,tensorflow/tensorflow/python/keras/backend.py,5752,function,"Adds a bias vector to a tensor. Arguments: x: Tensor or variable. bias: Bias tensor to add. data_format: string, `""channels_last""` or `""channels_first""`. Returns: Output tensor. Raises: ValueError: In one of the two cases below: 1. invalid `data_format` argument. 2. invalid bias shape. the bias should be either a vector or a tensor with ndim(x) - 1 dimension" 4678,random_normal,tensorflow/tensorflow/python/keras/backend.py,5797,function,"Returns a tensor with normal distribution of values. It is an alias to `tf.random.normal`. Arguments: shape: A tuple of integers, the shape of tensor to create. mean: A float, the mean value of the normal distribution to draw samples. Default to 0.0. stddev: A float, the standard deviation of the normal distribution to draw samples. Default to 1.0. dtype: `tf.dtypes.DType`, dtype of returned tensor. Default to use Keras backend dtype which is float32. seed: Integer, random seed. Will use a random numpy integer when not specified. Returns: A tensor with normal distribution of values. Example: >>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3), ... mean=0.0, stddev=1.0) >>> random_normal_tensor " 4679,random_uniform,tensorflow/tensorflow/python/keras/backend.py,5834,function,"Returns a tensor with uniform distribution of values. Arguments: shape: A tuple of integers, the shape of tensor to create. minval: A float, lower boundary of the uniform distribution to draw samples. maxval: A float, upper boundary of the uniform distribution to draw samples. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. Example: >>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3), ... minval=0.0, maxval=1.0) >>> random_uniform_tensor " 4680,random_binomial,tensorflow/tensorflow/python/keras/backend.py,5868,function,"Returns a tensor with random binomial distribution of values. DEPRECATED, use `tf.keras.backend.random_bernoulli` instead. The binomial distribution with parameters `n` and `p` is the probability distribution of the number of successful Bernoulli process. Only supports `n` = 1 for now. Arguments: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of binomial distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. Example: >>> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3), ... p=0.5) >>> random_binomial_tensor " 4681,random_bernoulli,tensorflow/tensorflow/python/keras/backend.py,5905,function,"Returns a tensor with random bernoulli distribution of values. Arguments: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of bernoulli distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor." 4682,truncated_normal,tensorflow/tensorflow/python/keras/backend.py,5922,function,"Returns a tensor with truncated random normal distribution of values. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than two standard deviations from the mean are dropped and re-picked. Arguments: shape: A tuple of integers, the shape of tensor to create. mean: Mean of the values. stddev: Standard deviation of the values. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor." 4683,ctc_label_dense_to_sparse,tensorflow/tensorflow/python/keras/backend.py,5957,function,"Converts CTC labels from dense to sparse. Arguments: labels: dense CTC labels. label_lengths: length of the labels. Returns: A sparse tensor representation of the labels." 4684,ctc_batch_cost,tensorflow/tensorflow/python/keras/backend.py,6004,function,"Runs CTC loss algorithm on each batch element. Arguments: y_true: tensor `(samples, max_string_length)` containing the truth labels. y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_pred`. label_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_true`. Returns: Tensor with shape (samples,1) containing the CTC loss of each element." 4685,ctc_decode,tensorflow/tensorflow/python/keras/backend.py,6037,function,"Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Arguments: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. Returns: Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence." 4686,map_fn,tensorflow/tensorflow/python/keras/backend.py,6093,function,"Map the function fn over the elements elems and return the outputs. Arguments: fn: Callable that will be called upon each element in elems elems: tensor name: A string name for the map node in the graph dtype: Output data type. Returns: Tensor with dtype `dtype`." 4687,foldl,tensorflow/tensorflow/python/keras/backend.py,6109,function,"Reduce elems using fn to combine them from left to right. Arguments: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor initializer: The first value used (`elems[0]` in case of None) name: A string name for the foldl node in the graph Returns: Tensor with same type and shape as `initializer`." 4688,foldr,tensorflow/tensorflow/python/keras/backend.py,6126,function,"Reduce elems using fn to combine them from right to left. Arguments: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor initializer: The first value used (`elems[-1]` in case of None) name: A string name for the foldr node in the graph Returns: Same type and shape as initializer" 4689,configure_and_create_distributed_session,tensorflow/tensorflow/python/keras/backend.py,6190,function,Configure session config and create a session with it. 4690,is_tpu_strategy,tensorflow/tensorflow/python/keras/backend.py,6235,function,We're executing TPU Strategy. 4691,cast_variables_to_tensor,tensorflow/tensorflow/python/keras/backend.py,6241,function, 4692,_is_symbolic_tensor,tensorflow/tensorflow/python/keras/backend.py,6251,function, 4693,convert_inputs_if_ragged,tensorflow/tensorflow/python/keras/backend.py,6255,function,Converts any ragged tensors to dense. 4694,maybe_convert_to_ragged,tensorflow/tensorflow/python/keras/backend.py,6278,function,Converts any ragged input back to its initial structure. 4695,ContextValueCache,tensorflow/tensorflow/python/keras/backend.py,6286,class,"Container that caches (possibly tensor) values based on the context. This class is similar to defaultdict, where values may be produced by the default factory specified during initialization. This class also has a default value for the key (when key is `None`) -- the key is set to the the current graph or eager context. The default factories for key and value are only used in `__getitem__` and `setdefault`. The `.get()` behavior remains the same. This object will return the value of the current graph or closest parent graph if the current graph is a function. This is to reflect the fact that if a tensor is created in eager/graph, child functions may capture that tensor. The default factory method may accept keyword arguments (unlike defaultdict, which only accepts callables with 0 arguments). To pass keyword arguments to `default_factory`, use the `setdefault` method instead of `__getitem__`. An example of how this class can be used in different contexts: ``` cache = ContextValueCache(int) # Eager mode cache[None] += 2 cache[None] += 4 assert cache[None] == 6 # Graph mode with tf.Graph().as_default() as g: cache[None] += 5 cache[g] += 3 assert cache[g] == 8 ``` Example of a default factory with arguments: ``` cache = ContextValueCache(lambda x: x + 1) g = tf.get_default_graph() # Example with keyword argument. value = cache.setdefault(key=g, kwargs={'x': 3}) assert cache[g] == 4 ```" 4696,epsilon,tensorflow/tensorflow/python/keras/backend_config.py,35,function,"Returns the value of the fuzz factor used in numeric expressions. Returns: A float. Example: >>> tf.keras.backend.epsilon() 1e-07" 4697,set_epsilon,tensorflow/tensorflow/python/keras/backend_config.py,49,function,"Sets the value of the fuzz factor used in numeric expressions. Arguments: value: float. New value of epsilon. Example: >>> tf.keras.backend.epsilon() 1e-07 >>> tf.keras.backend.set_epsilon(1e-5) >>> tf.keras.backend.epsilon() 1e-05 >>> tf.keras.backend.set_epsilon(1e-7)" 4698,floatx,tensorflow/tensorflow/python/keras/backend_config.py,68,function,"Returns the default float type, as a string. E.g. `'float16'`, `'float32'`, `'float64'`. Returns: String, the current default float type. Example: >>> tf.keras.backend.floatx() 'float32'" 4699,set_floatx,tensorflow/tensorflow/python/keras/backend_config.py,84,function,"Sets the default float type. Note: It is not recommended to set this to float16 for training, as this will likely cause numeric stability issues. Instead, mixed precision, which is using a mix of float16 and float32, can be used by calling `tf.keras.mixed_precision.experimental.set_policy('mixed_float16')`. See the [mixed precision guide]( https://www.tensorflow.org/guide/keras/mixed_precision) for details. Arguments: value: String; `'float16'`, `'float32'`, or `'float64'`. Example: >>> tf.keras.backend.floatx() 'float32' >>> tf.keras.backend.set_floatx('float64') >>> tf.keras.backend.floatx() 'float64' >>> tf.keras.backend.set_floatx('float32') Raises: ValueError: In case of invalid value." 4700,image_data_format,tensorflow/tensorflow/python/keras/backend_config.py,116,function,"Returns the default image data format convention. Returns: A string, either `'channels_first'` or `'channels_last'` Example: >>> tf.keras.backend.image_data_format() 'channels_last'" 4701,set_image_data_format,tensorflow/tensorflow/python/keras/backend_config.py,130,function,"Sets the value of the image data format convention. Arguments: data_format: string. `'channels_first'` or `'channels_last'`. Example: >>> tf.keras.backend.image_data_format() 'channels_last' >>> tf.keras.backend.set_image_data_format('channels_first') >>> tf.keras.backend.image_data_format() 'channels_first' >>> tf.keras.backend.set_image_data_format('channels_last') Raises: ValueError: In case of invalid `data_format` value." 4702,BackendConfigTest,tensorflow/tensorflow/python/keras/backend_config_test.py,27,class, 4703,compare_single_input_op_to_numpy,tensorflow/tensorflow/python/keras/backend_test.py,46,function, 4704,compare_two_inputs_op_to_numpy,tensorflow/tensorflow/python/keras/backend_test.py,74,function, 4705,BackendResetTest,tensorflow/tensorflow/python/keras/backend_test.py,103,class, 4706,BackendUtilsTest,tensorflow/tensorflow/python/keras/backend_test.py,146,class, 4707,BackendVariableTest,tensorflow/tensorflow/python/keras/backend_test.py,294,class, 4708,BackendLinearAlgebraTest,tensorflow/tensorflow/python/keras/backend_test.py,357,class, 4709,BackendShapeOpsTest,tensorflow/tensorflow/python/keras/backend_test.py,562,class, 4710,BackendNNOpsTest,tensorflow/tensorflow/python/keras/backend_test.py,748,class, 4711,BackendCrossEntropyLossesTest,tensorflow/tensorflow/python/keras/backend_test.py,1583,class, 4712,TestCTC,tensorflow/tensorflow/python/keras/backend_test.py,1735,class, 4713,TestRandomOps,tensorflow/tensorflow/python/keras/backend_test.py,1841,class, 4714,FunctionTest,tensorflow/tensorflow/python/keras/backend_test.py,1875,class, 4715,BackendGraphTests,tensorflow/tensorflow/python/keras/backend_test.py,1940,class, 4716,ControlOpsTests,tensorflow/tensorflow/python/keras/backend_test.py,2108,class, 4717,ContextValueCacheTest,tensorflow/tensorflow/python/keras/backend_test.py,2142,class, 4718,configure_callbacks,tensorflow/tensorflow/python/keras/callbacks.py,71,function,"Configures callbacks for use in various training loops. Arguments: callbacks: List of Callbacks. model: Model being trained. do_validation: Whether or not validation loop will be run. batch_size: Number of samples per batch. epochs: Number of epoch to train. steps_per_epoch: Number of batches to run per training epoch. samples: Number of training samples. verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger. count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count. mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT. Which loop mode to configure callbacks for. Returns: Instance of CallbackList used to control all Callbacks." 4719,set_callback_parameters,tensorflow/tensorflow/python/keras/callbacks.py,133,function,"Sets callback parameters. Arguments: callback_list: CallbackList instance. model: Model being trained. do_validation: Whether or not validation loop will be run. batch_size: Number of samples per batch. epochs: Number of epoch to train. steps_per_epoch: Number of batches to run per training epoch. samples: Number of training samples. verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger. mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT. Which loop mode to configure callbacks for." 4720,_is_generator_like,tensorflow/tensorflow/python/keras/callbacks.py,181,function,"Checks if data is a generator, Sequence, or Iterator." 4721,make_logs,tensorflow/tensorflow/python/keras/callbacks.py,187,function,Computes logs for sending to `on_batch_end` methods. 4722,CallbackList,tensorflow/tensorflow/python/keras/callbacks.py,199,class,Container abstracting a list of callbacks. 4723,Callback,tensorflow/tensorflow/python/keras/callbacks.py,591,class,"Abstract base class used to build new callbacks. Attributes: params: Dict. Training parameters (eg. verbosity, batch size, number of epochs...). model: Instance of `keras.models.Model`. Reference of the model being trained. The `logs` dictionary that callback methods take as argument will contain keys for quantities relevant to the current batch or epoch (see method-specific docstrings)." 4724,BaseLogger,tensorflow/tensorflow/python/keras/callbacks.py,832,class,"Callback that accumulates epoch averages of metrics. This callback is automatically applied to every Keras model. Arguments: stateful_metrics: Iterable of string names of metrics that should *not* be averaged over an epoch. Metrics in this list will be logged as-is in `on_epoch_end`. All others will be averaged in `on_epoch_end`." 4725,TerminateOnNaN,tensorflow/tensorflow/python/keras/callbacks.py,881,class,"Callback that terminates training when a NaN loss is encountered. " 4726,ProgbarLogger,tensorflow/tensorflow/python/keras/callbacks.py,895,class,"Callback that prints metrics to stdout. Arguments: count_mode: One of `""steps""` or `""samples""`. Whether the progress bar should count samples seen or steps (batches) seen. stateful_metrics: Iterable of string names of metrics that should *not* be averaged over an epoch. Metrics in this list will be logged as-is. All others will be averaged over time (e.g. loss, etc). If not provided, defaults to the `Model`'s metrics. Raises: ValueError: In case of invalid `count_mode`." 4727,History,tensorflow/tensorflow/python/keras/callbacks.py,1054,class,"Callback that records events into a `History` object. This callback is automatically applied to every Keras model. The `History` object gets returned by the `fit` method of models." 4728,ModelCheckpoint,tensorflow/tensorflow/python/keras/callbacks.py,1081,class,"Callback to save the Keras model or model weights at some frequency. `ModelCheckpoint` callback is used in conjunction with training using `model.fit()` to save a model or weights (in a checkpoint file) at some interval, so the model or weights can be loaded later to continue the training from the state saved. A few options this callback provides include: - Whether to only keep the model that has achieved the ""best performance"" so far, or whether to save the model at the end of every epoch regardless of performance. - Definition of 'best'; which quantity to monitor and whether it should be maximized or minimized. - The frequency it should save at. Currently, the callback supports saving at the end of every epoch, or after a fixed number of training batches. - Whether only weights are saved, or the whole model is saved. Example: ```python EPOCHS = 10 checkpoint_filepath = '/tmp/checkpoint' model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_filepath, save_weights_only=True, monitor='val_acc', mode='max', save_best_only=True) # Model weights are saved at the end of every epoch, if it's the best seen # so far. model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback]) # The model weights (that are considered the best) are loaded into the model. model.load_weights(checkpoint_filepath) ``` Arguments: filepath: string or `PathLike`, path to save the model file. `filepath` can contain named formatting options, which will be filled the value of `epoch` and keys in `logs` (passed in `on_epoch_end`). For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model checkpoints will be saved with the epoch number and the validation loss in the filename. monitor: quantity to monitor. verbose: verbosity mode, 0 or 1. save_best_only: if `save_best_only=True`, the latest best model according to the quantity monitored will not be overwritten. If `filepath` doesn't contain formatting options like `{epoch}` then `filepath` will be overwritten by each new better model. mode: one of {auto, min, max}. If `save_best_only=True`, the decision to overwrite the current save file is made based on either the maximization or the minimization of the monitored quantity. For `val_acc`, this should be `max`, for `val_loss` this should be `min`, etc. In `auto` mode, the direction is automatically inferred from the name of the monitored quantity. save_weights_only: if True, then only the model's weights will be saved (`model.save_weights(filepath)`), else the full model is saved (`model.save(filepath)`). save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves the model after each epoch. When using integer, the callback saves the model at end of this many batches. If the `Model` is compiled with `experimental_steps_per_execution=N`, then the saving criteria will be checked every Nth batch. Note that if the saving isn't aligned to epochs, the monitored metric may potentially be less reliable (it could reflect as little as 1 batch, since the metrics get reset every epoch). Defaults to `'epoch'`. options: Optional `tf.train.CheckpointOptions` object if `save_weights_only` is true or optional `tf.saved_model.SavedOptions` object if `save_weights_only` is false. **kwargs: Additional arguments for backwards compatibility. Possible key is `period`." 4729,BackupAndRestore,tensorflow/tensorflow/python/keras/callbacks.py,1484,class,"Callback to back up and restore the training state. `BackupAndRestore` callback is intended to recover from interruptions that happened in the middle of a model.fit execution by backing up the training states in a temporary checkpoint file (based on TF CheckpointManager) at the end of each epoch. If training restarted before completion, the training state and model are restored to the most recently saved state at the beginning of a new model.fit() run. Note that user is responsible to bring jobs back up. This callback is important for the backup and restore mechanism for fault tolerance purpose. And the model to be restored from an previous checkpoint is expected to be the same as the one used to back up. If user changes arguments passed to compile or fit, the checkpoint saved for fault tolerance can become invalid. Note: 1. This callback is not compatible with disabling eager execution. 2. A checkpoint is saved at the end of each epoch, when restoring we'll redo any partial work from an unfinished epoch in which the training got restarted (so the work done before a interruption doesn't affect the final model state). 3. This works for both single worker and multi-worker mode, only MirroredStrategy and MultiWorkerMirroredStrategy are supported for now. Example: >>> class InterruptingCallback(tf.keras.callbacks.Callback): ... def on_epoch_begin(self, epoch, logs=None): ... if epoch == 4: ... raise RuntimeError('Interrupting!') >>> callback = tf.keras.callbacks.experimental.BackupAndRestore( ... backup_dir=""/tmp"") >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> try: ... model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10, ... batch_size=1, callbacks=[callback, InterruptingCallback()], ... verbose=0) ... except: ... pass >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10, ... batch_size=1, callbacks=[callback], verbose=0) >>> # Only 6 more epochs are run, since first trainning got interrupted at >>> # zero-indexed epoch 4, second training will continue from 4 to 9. >>> len(history.history['loss']) 6 Arguments: backup_dir: String, path to save the model file. This is the directory in which the system stores temporary files to recover the model from jobs terminated unexpectedly. The directory cannot be reused elsewhere to store other checkpoints, e.g. by BackupAndRestore callback of another training, or by another callback (ModelCheckpoint) of the same training." 4730,EarlyStopping,tensorflow/tensorflow/python/keras/callbacks.py,1597,class,"Stop training when a monitored metric has stopped improving. Assuming the goal of a training is to minimize the loss. With this, the metric to be monitored would be `'loss'`, and mode would be `'min'`. A `model.fit()` training loop will check at end of every epoch whether the loss is no longer decreasing, considering the `min_delta` and `patience` if applicable. Once it's found no longer decreasing, `model.stop_training` is marked True and the training terminates. The quantity to be monitored needs to be available in `logs` dict. To make it so, pass the loss or metrics at `model.compile()`. Arguments: monitor: Quantity to be monitored. min_delta: Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement. patience: Number of epochs with no improvement after which training will be stopped. verbose: verbosity mode. mode: One of `{""auto"", ""min"", ""max""}`. In `min` mode, training will stop when the quantity monitored has stopped decreasing; in `""max""` mode it will stop when the quantity monitored has stopped increasing; in `""auto""` mode, the direction is automatically inferred from the name of the monitored quantity. baseline: Baseline value for the monitored quantity. Training will stop if the model doesn't show improvement over the baseline. restore_best_weights: Whether to restore model weights from the epoch with the best value of the monitored quantity. If False, the model weights obtained at the last step of training are used. Example: >>> callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3) >>> # This callback will stop the training when there is no improvement in >>> # the validation loss for three consecutive epochs. >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=10, batch_size=1, callbacks=[callback], ... verbose=0) >>> len(history.history['loss']) # Only 4 epochs are run. 4" 4731,RemoteMonitor,tensorflow/tensorflow/python/keras/callbacks.py,1732,class,"Callback used to stream events to a server. Requires the `requests` library. Events are sent to `root + '/publish/epoch/end/'` by default. Calls are HTTP POST, with a `data` argument which is a JSON-encoded dictionary of event data. If `send_as_json=True`, the content type of the request will be `""application/json""`. Otherwise the serialized JSON will be sent within a form. Arguments: root: String; root url of the target server. path: String; path relative to `root` to which the events will be sent. field: String; JSON field under which the data will be stored. The field is used only if the payload is sent within a form (i.e. send_as_json is set to False). headers: Dictionary; optional custom HTTP headers. send_as_json: Boolean; whether the request should be sent as `""application/json""`." 4732,LearningRateScheduler,tensorflow/tensorflow/python/keras/callbacks.py,1795,class,"Learning rate scheduler. At the beginning of every epoch, this callback gets the updated learning rate value from `schedule` function provided at `__init__`, with the current epoch and current learning rate, and applies the updated learning rate on the optimizer. Arguments: schedule: a function that takes an epoch index (integer, indexed from 0) and current learning rate (float) as inputs and returns a new learning rate as output (float). verbose: int. 0: quiet, 1: update messages. Example: >>> # This function keeps the initial learning rate for the first ten epochs >>> # and decreases it exponentially after that. >>> def scheduler(epoch, lr): ... if epoch < 10: ... return lr ... else: ... return lr * tf.math.exp(-0.1) >>> >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> round(model.optimizer.lr.numpy(), 5) 0.01 >>> callback = tf.keras.callbacks.LearningRateScheduler(scheduler) >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=15, callbacks=[callback], verbose=0) >>> round(model.optimizer.lr.numpy(), 5) 0.00607" 4733,TensorBoard,tensorflow/tensorflow/python/keras/callbacks.py,1861,class,"Enable visualizations for TensorBoard. TensorBoard is a visualization tool provided with TensorFlow. This callback logs events for TensorBoard, including: * Metrics summary plots * Training graph visualization * Activation histograms * Sampled profiling If you have installed TensorFlow with pip, you should be able to launch TensorBoard from the command line: ``` tensorboard --logdir=path_to_your_logs ``` You can find more information about TensorBoard [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). Example (Basic): ```python tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=""./logs"") model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # run the tensorboard command to view the visualizations. ``` Example (Profile): ```python # profile a single batch, e.g. the 5th batch. tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='./logs', profile_batch=5) model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # Now run the tensorboard command to view the visualizations (profile plugin). # profile a range of batches, e.g. from 10 to 20. tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='./logs', profile_batch='10,20') model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback]) # Now run the tensorboard command to view the visualizations (profile plugin). ``` Arguments: log_dir: the path of the directory where to save the log files to be parsed by TensorBoard. histogram_freq: frequency (in epochs) at which to compute activation and weight histograms for the layers of the model. If set to 0, histograms won't be computed. Validation data (or split) must be specified for histogram visualizations. write_graph: whether to visualize the graph in TensorBoard. The log file can become quite large when write_graph is set to True. write_images: whether to write model weights to visualize as image in TensorBoard. update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, writes the losses and metrics to TensorBoard after each batch. The same applies for `'epoch'`. If using an integer, let's say `1000`, the callback will write the metrics and losses to TensorBoard every 1000 batches. Note that writing too frequently to TensorBoard can slow down your training. profile_batch: Profile the batch(es) to sample compute characteristics. profile_batch must be a non-negative integer or a tuple of integers. A pair of positive integers signify a range of batches to profile. By default, it will profile the second batch. Set profile_batch=0 to disable profiling. embeddings_freq: frequency (in epochs) at which embedding layers will be visualized. If set to 0, embeddings won't be visualized. embeddings_metadata: a dictionary which maps layer name to a file name in which metadata for this embedding layer is saved. See the [details]( https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional) about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed. Raises: ValueError: If histogram_freq is set and no validation data is provided." 4734,ReduceLROnPlateau,tensorflow/tensorflow/python/keras/callbacks.py,2324,class,"Reduce learning rate when a metric has stopped improving. Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. This callback monitors a quantity and if no improvement is seen for a 'patience' number of epochs, the learning rate is reduced. Example: ```python reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.001) model.fit(X_train, Y_train, callbacks=[reduce_lr]) ``` Arguments: monitor: quantity to be monitored. factor: factor by which the learning rate will be reduced. `new_lr = lr * factor`. patience: number of epochs with no improvement after which learning rate will be reduced. verbose: int. 0: quiet, 1: update messages. mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode, the learning rate will be reduced when the quantity monitored has stopped decreasing; in `'max'` mode it will be reduced when the quantity monitored has stopped increasing; in `'auto'` mode, the direction is automatically inferred from the name of the monitored quantity. min_delta: threshold for measuring the new optimum, to only focus on significant changes. cooldown: number of epochs to wait before resuming normal operation after lr has been reduced. min_lr: lower bound on the learning rate." 4735,CSVLogger,tensorflow/tensorflow/python/keras/callbacks.py,2448,class,"Callback that streams epoch results to a CSV file. Supports all values that can be represented as a string, including 1D iterables such as `np.ndarray`. Example: ```python csv_logger = CSVLogger('training.log') model.fit(X_train, Y_train, callbacks=[csv_logger]) ``` Arguments: filename: Filename of the CSV file, e.g. `'run/log.csv'`. separator: String used to separate elements in the CSV file. append: Boolean. True: append if file exists (useful for continuing training). False: overwrite existing file." 4736,LambdaCallback,tensorflow/tensorflow/python/keras/callbacks.py,2541,class,"Callback for creating simple, custom callbacks on-the-fly. This callback is constructed with anonymous functions that will be called at the appropriate time. Note that the callbacks expects positional arguments, as: - `on_epoch_begin` and `on_epoch_end` expect two positional arguments: `epoch`, `logs` - `on_batch_begin` and `on_batch_end` expect two positional arguments: `batch`, `logs` - `on_train_begin` and `on_train_end` expect one positional argument: `logs` Arguments: on_epoch_begin: called at the beginning of every epoch. on_epoch_end: called at the end of every epoch. on_batch_begin: called at the beginning of every batch. on_batch_end: called at the end of every batch. on_train_begin: called at the beginning of model training. on_train_end: called at the end of model training. Example: ```python # Print the batch number at the beginning of every batch. batch_print_callback = LambdaCallback( on_batch_begin=lambda batch,logs: print(batch)) # Stream the epoch loss to a file in JSON format. The file content # is not well-formed JSON but rather has a JSON object per line. import json json_log = open('loss_log.json', mode='wt', buffering=1) json_logging_callback = LambdaCallback( on_epoch_end=lambda epoch, logs: json_log.write( json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'), on_train_end=lambda logs: json_log.close() ) # Terminate some processes after having finished model training. processes = ... cleanup_callback = LambdaCallback( on_train_end=lambda logs: [ p.terminate() for p in processes if p.is_alive()]) model.fit(..., callbacks=[batch_print_callback, json_logging_callback, cleanup_callback]) ```" 4737,Counter,tensorflow/tensorflow/python/keras/callbacks_test.py,77,class,"Counts the number of times each callback method was run. Attributes: method_counts: dict. Contains the counts of time each callback method was run." 4738,_get_numpy,tensorflow/tensorflow/python/keras/callbacks_test.py,107,function, 4739,_get_sequence,tensorflow/tensorflow/python/keras/callbacks_test.py,111,function, 4740,CallbackCountsTest,tensorflow/tensorflow/python/keras/callbacks_test.py,126,class, 4741,KerasCallbacksTest,tensorflow/tensorflow/python/keras/callbacks_test.py,250,class, 4742,_SummaryFile,tensorflow/tensorflow/python/keras/callbacks_test.py,1758,class,"A record of summary tags and the files to which they were written. Fields `scalars`, `images`, `histograms`, and `tensors` are sets containing `_ObservedSummary` values." 4743,list_summaries,tensorflow/tensorflow/python/keras/callbacks_test.py,1773,function,"Read all summaries under the logdir into a `_SummaryFile`. Args: logdir: A path to a directory that contains zero or more event files, either as direct children or in transitive subdirectories. Summaries in these events must only contain old-style scalars, images, and histograms. Non-summary events, like `graph_def`s, are ignored. Returns: A `_SummaryFile` object reflecting all summaries written to any event files in the logdir or any of its descendant directories. Raises: ValueError: If an event file contains an summary of unexpected kind." 4744,TestTensorBoardV2,tensorflow/tensorflow/python/keras/callbacks_test.py,1829,class, 4745,TestTensorBoardV2NonParameterizedTest,tensorflow/tensorflow/python/keras/callbacks_test.py,2182,class, 4746,MostRecentlyModifiedFileMatchingPatternTest,tensorflow/tensorflow/python/keras/callbacks_test.py,2462,class, 4747,TensorBoard,tensorflow/tensorflow/python/keras/callbacks_v1.py,42,class,"Enable visualizations for TensorBoard. TensorBoard is a visualization tool provided with TensorFlow. This callback logs events for TensorBoard, including: * Metrics summary plots * Training graph visualization * Activation histograms * Sampled profiling If you have installed TensorFlow with pip, you should be able to launch TensorBoard from the command line: ```sh tensorboard --logdir=path_to_your_logs ``` You can find more information about TensorBoard [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard). Arguments: log_dir: the path of the directory where to save the log files to be parsed by TensorBoard. histogram_freq: frequency (in epochs) at which to compute activation and weight histograms for the layers of the model. If set to 0, histograms won't be computed. Validation data (or split) must be specified for histogram visualizations. write_graph: whether to visualize the graph in TensorBoard. The log file can become quite large when write_graph is set to True. write_grads: whether to visualize gradient histograms in TensorBoard. `histogram_freq` must be greater than 0. batch_size: size of batch of inputs to feed to the network for histograms computation. write_images: whether to write model weights to visualize as image in TensorBoard. embeddings_freq: frequency (in epochs) at which selected embedding layers will be saved. If set to 0, embeddings won't be computed. Data to be visualized in TensorBoard's Embedding tab must be passed as `embeddings_data`. embeddings_layer_names: a list of names of layers to keep eye on. If None or empty list all the embedding layer will be watched. embeddings_metadata: a dictionary which maps layer name to a file name in which metadata for this embedding layer is saved. [Here are details]( https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional) about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed. embeddings_data: data to be embedded at layers specified in `embeddings_layer_names`. Numpy array (if the model has a single input) or list of Numpy arrays (if the model has multiple inputs). Learn more about embeddings [in this guide]( https://www.tensorflow.org/programmers_guide/embedding). update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, writes the losses and metrics to TensorBoard after each batch. The same applies for `'epoch'`. If using an integer, let's say `1000`, the callback will write the metrics and losses to TensorBoard every 1000 samples. Note that writing too frequently to TensorBoard can slow down your training. profile_batch: Profile the batch to sample compute characteristics. By default, it will profile the second batch. Set profile_batch=0 to disable profiling. Raises: ValueError: If histogram_freq is set and no validation data is provided. @compatibility(eager) Using the `TensorBoard` callback will work when eager execution is enabled, with the restriction that outputting histogram summaries of weights and gradients is not supported. Consequently, `histogram_freq` will be ignored. @end_compatibility" 4748,TestTensorBoardV1,tensorflow/tensorflow/python/keras/callbacks_v1_test.py,51,class, 4749,keras_mode_combinations,tensorflow/tensorflow/python/keras/combinations.py,31,function,"Returns the default test combinations for tf.keras tests. Note that if tf2 is enabled, then v1 session test will be skipped. Args: mode: List of modes to run the tests. The valid options are 'graph' and 'eager'. Default to ['graph', 'eager'] if not specified. If a empty list is provide, then the test will run under the context based on tf's version, eg graph for v1 and eager for v2. run_eagerly: List of `run_eagerly` value to be run with the tests. Default to [True, False] if not specified. Note that for `graph` mode, run_eagerly value will only be False. Returns: A list contains all the combinations to be used to generate test cases." 4750,keras_model_type_combinations,tensorflow/tensorflow/python/keras/combinations.py,60,function, 4751,keras_tensor_combinations,tensorflow/tensorflow/python/keras/combinations.py,64,function, 4752,KerasModeCombination,tensorflow/tensorflow/python/keras/combinations.py,68,class,"Combination for Keras test mode. It by default includes v1_session, v2_eager and v2_tf_function." 4753,KerasModelTypeCombination,tensorflow/tensorflow/python/keras/combinations.py,86,class,"Combination for Keras model types when doing model test. It by default includes 'functional', 'subclass', 'sequential'. Various methods in `testing_utils` to get models will auto-generate a model of the currently active Keras model type. This allows unittests to confirm the equivalence between different Keras models." 4754,KerasTensorCombination,tensorflow/tensorflow/python/keras/combinations.py,107,class,"Combination for whether KerasTensors are being used or not. It by default includes `True` and `False`: running Keras's functional API with KerasTensors as the inputs, and without." 4755,CombinationsTest,tensorflow/tensorflow/python/keras/combinations_test.py,33,class, 4756,Constraint,tensorflow/tensorflow/python/keras/constraints.py,36,class, 4757,MaxNorm,tensorflow/tensorflow/python/keras/constraints.py,46,class,"MaxNorm weight constraint. Constrains the weights incident to each hidden unit to have a norm less than or equal to a desired value. Also available via the shortcut function `tf.keras.constraints.max_norm`. Arguments: max_value: the maximum norm value for the incoming weights. axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format=""channels_last""`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`." 4758,NonNeg,tensorflow/tensorflow/python/keras/constraints.py,87,class,"Constrains the weights to be non-negative. Also available via the shortcut function `tf.keras.constraints.non_neg`." 4759,UnitNorm,tensorflow/tensorflow/python/keras/constraints.py,98,class,"Constrains the weights incident to each hidden unit to have unit norm. Also available via the shortcut function `tf.keras.constraints.unit_norm`. Arguments: axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format=""channels_last""`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`." 4760,MinMaxNorm,tensorflow/tensorflow/python/keras/constraints.py,133,class,"MinMaxNorm weight constraint. Constrains the weights incident to each hidden unit to have the norm between a lower bound and an upper bound. Also available via the shortcut function `tf.keras.constraints.min_max_norm`. Arguments: min_value: the minimum norm for the incoming weights. max_value: the maximum norm for the incoming weights. rate: rate for enforcing the constraint: weights will be rescaled to yield `(1 - rate) * norm + rate * norm.clip(min_value, max_value)`. Effectively, this means that rate=1.0 stands for strict enforcement of the constraint, while rate<1.0 means that weights will be rescaled at each step to slowly move towards a value inside the desired interval. axis: integer, axis along which to calculate weight norms. For instance, in a `Dense` layer the weight matrix has shape `(input_dim, output_dim)`, set `axis` to `0` to constrain each weight vector of length `(input_dim,)`. In a `Conv2D` layer with `data_format=""channels_last""`, the weight tensor has shape `(rows, cols, input_depth, output_depth)`, set `axis` to `[0, 1, 2]` to constrain the weights of each filter tensor of size `(rows, cols, input_depth)`." 4761,RadialConstraint,tensorflow/tensorflow/python/keras/constraints.py,191,class,"Constrains `Conv2D` kernel weights to be the same for each radius. Also available via the shortcut function `tf.keras.constraints.radial_constraint`. For example, the desired output for the following 4-by-4 kernel: ``` kernel = [[v_00, v_01, v_02, v_03], [v_10, v_11, v_12, v_13], [v_20, v_21, v_22, v_23], [v_30, v_31, v_32, v_33]] ``` is this:: ``` kernel = [[v_11, v_11, v_11, v_11], [v_11, v_33, v_33, v_11], [v_11, v_33, v_33, v_11], [v_11, v_11, v_11, v_11]] ``` This constraint can be applied to any `Conv2D` layer version, including `Conv2DTranspose` and `SeparableConv2D`, and with either `""channels_last""` or `""channels_first""` data format. The method assumes the weight tensor is of shape `(rows, cols, input_depth, output_depth)`." 4762,serialize,tensorflow/tensorflow/python/keras/constraints.py,286,function, 4763,deserialize,tensorflow/tensorflow/python/keras/constraints.py,291,function, 4764,get,tensorflow/tensorflow/python/keras/constraints.py,300,function, 4765,get_test_values,tensorflow/tensorflow/python/keras/constraints_test.py,31,function, 4766,get_example_array,tensorflow/tensorflow/python/keras/constraints_test.py,35,function, 4767,get_example_kernel,tensorflow/tensorflow/python/keras/constraints_test.py,42,function, 4768,KerasConstraintsTest,tensorflow/tensorflow/python/keras/constraints_test.py,49,class, 4769,KerasInitializersTest,tensorflow/tensorflow/python/keras/initializers_test.py,36,class, 4770,TestCase,tensorflow/tensorflow/python/keras/keras_parameterized.py,42,class, 4771,run_with_all_saved_model_formats,tensorflow/tensorflow/python/keras/keras_parameterized.py,49,function,"Execute the decorated test with all Keras saved model formats). This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once for each Keras saved model format. The Keras saved model formats include: 1. HDF5: 'h5' 2. SavedModel: 'tf' Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. Various methods in `testing_utils` to get file path for saved models will auto-generate a string of the two saved model formats. This allows unittests to confirm the equivalence between the two Keras saved model formats. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_with_all_saved_model_formats def test_foo(self): save_format = testing_utils.get_save_format() saved_model_dir = '/tmp/saved_model/' model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) keras.models.save_model(model, saved_model_dir, save_format=save_format) model = keras.models.load_model(saved_model_dir) if __name__ == ""__main__"": tf.test.main() ``` This test tries to save the model into the formats of 'hdf5', 'h5', 'keras', 'tensorflow', and 'tf'. We can also annotate the whole class if we want this to apply to all tests in the class: ```python @testing_utils.run_with_all_saved_model_formats class MyTests(testing_utils.KerasTestCase): def test_foo(self): save_format = testing_utils.get_save_format() saved_model_dir = '/tmp/saved_model/' model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) keras.models.save_model(model, saved_model_dir, save_format=save_format) model = tf.keras.models.load_model(saved_model_dir) if __name__ == ""__main__"": tf.test.main() ``` Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. exclude_formats: A collection of Keras saved model formats to not run. (May also be a single format not wrapped in a collection). Defaults to None. Returns: Returns a decorator that will run the decorated test method multiple times: once for each desired Keras saved model format. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency." 4772,_test_h5_saved_model_format,tensorflow/tensorflow/python/keras/keras_parameterized.py,160,function, 4773,_test_tf_saved_model_format,tensorflow/tensorflow/python/keras/keras_parameterized.py,165,function, 4774,run_with_all_model_types,tensorflow/tensorflow/python/keras/keras_parameterized.py,172,function,"Execute the decorated test with all Keras model types. This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once for each Keras model type. The Keras model types are: ['functional', 'subclass', 'sequential'] Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. Various methods in `testing_utils` to get models will auto-generate a model of the currently active Keras model type. This allows unittests to confirm the equivalence between different Keras models. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_with_all_model_types( exclude_models = ['sequential']) def test_foo(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == ""__main__"": tf.test.main() ``` This test tries building a small mlp as both a functional model and as a subclass model. We can also annotate the whole class if we want this to apply to all tests in the class: ```python @testing_utils.run_with_all_model_types(exclude_models = ['sequential']) class MyTests(testing_utils.KerasTestCase): def test_foo(self): model = testing_utils.get_small_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile(optimizer, loss, metrics=metrics) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == ""__main__"": tf.test.main() ``` Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. exclude_models: A collection of Keras model types to not run. (May also be a single model type not wrapped in a collection). Defaults to None. Returns: Returns a decorator that will run the decorated test method multiple times: once for each desired Keras model type. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency." 4775,_test_functional_model_type,tensorflow/tensorflow/python/keras/keras_parameterized.py,288,function, 4776,_test_subclass_model_type,tensorflow/tensorflow/python/keras/keras_parameterized.py,293,function, 4777,_test_sequential_model_type,tensorflow/tensorflow/python/keras/keras_parameterized.py,298,function, 4778,run_all_keras_modes,tensorflow/tensorflow/python/keras/keras_parameterized.py,303,function,"Execute the decorated test with all keras execution modes. This decorator is intended to be applied either to individual test methods in a `keras_parameterized.TestCase` class, or directly to a test class that extends it. Doing so will cause the contents of the individual test method (or all test methods in the class) to be executed multiple times - once executing in legacy graph mode, once running eagerly and with `should_run_eagerly` returning True, and once running eagerly with `should_run_eagerly` returning False. If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and the test will only run twice. Note: if stacking this decorator with absl.testing's parameterized decorators, those should be at the bottom of the stack. For example, consider the following unittest: ```python class MyTests(testing_utils.KerasTestCase): @testing_utils.run_all_keras_modes def test_foo(self): model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3) optimizer = RMSPropOptimizer(learning_rate=0.001) loss = 'mse' metrics = ['mae'] model.compile( optimizer, loss, metrics=metrics, run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((10, 3)) targets = np.zeros((10, 4)) dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)) dataset = dataset.repeat(100) dataset = dataset.batch(10) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1) if __name__ == ""__main__"": tf.test.main() ``` This test will try compiling & fitting the small functional mlp using all three Keras execution modes. Args: test_or_class: test method or class to be annotated. If None, this method returns a decorator that can be applied to a test method or test class. If it is not None this returns the decorator applied to the test or class. config: An optional config_pb2.ConfigProto to use to configure the session when executing graphs. always_skip_v1: If True, does not try running the legacy graph mode even when Tensorflow v2 behavior is not enabled. always_skip_eager: If True, does not execute the decorated test with eager execution modes. **kwargs: Additional kwargs for configuring tests for in-progress Keras behaviors/ refactorings that we haven't fully rolled out yet Returns: Returns a decorator that will run the decorated test method multiple times. Raises: ImportError: If abseil parameterized is not installed or not included as a target dependency." 4779,_v1_session_test,tensorflow/tensorflow/python/keras/keras_parameterized.py,413,function, 4780,_v2_eager_test,tensorflow/tensorflow/python/keras/keras_parameterized.py,420,function, 4781,_v2_function_test,tensorflow/tensorflow/python/keras/keras_parameterized.py,426,function, 4782,_v2_function_and_kerastensors_test,tensorflow/tensorflow/python/keras/keras_parameterized.py,432,function, 4783,_test_or_class_decorator,tensorflow/tensorflow/python/keras/keras_parameterized.py,439,function,"Decorate a test or class with a decorator intended for one method. If the test_or_class is a class: This will apply the decorator to all test methods in the class. If the test_or_class is an iterable of already-parameterized test cases: This will apply the decorator to all the cases, and then flatten the resulting cross-product of test cases. This allows stacking the Keras parameterized decorators w/ each other, and to apply them to test methods that have already been marked with an absl parameterized decorator. Otherwise, treat the obj as a single method and apply the decorator directly. Args: test_or_class: A test method (that may have already been decorated with a parameterized decorator, or a test class that extends keras_parameterized.TestCase single_method_decorator: A parameterized decorator intended for a single test method. Returns: The decorated result." 4784,KerasParameterizedTest,tensorflow/tensorflow/python/keras/keras_parameterized_test.py,34,class, 4785,Loss,tensorflow/tensorflow/python/keras/losses.py,46,class,"Loss base class. To be implemented by subclasses: * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`. Example subclass implementation: ```python class MeanSquaredError(Loss): def call(self, y_true, y_pred): y_pred = tf.convert_to_tensor_v2(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return tf.reduce_mean(math_ops.square(y_pred - y_true), axis=-1) ``` When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction types, and reduce losses explicitly in your training loop. Using 'AUTO' or 'SUM_OVER_BATCH_SIZE' will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details on this. You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: ```python with strategy.scope(): loss_obj = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) .... loss = (tf.reduce_sum(loss_obj(labels, predictions)) * (1. / global_batch_size)) ```" 4786,LossFunctionWrapper,tensorflow/tensorflow/python/keras/losses.py,209,class,Wraps a loss function in the `Loss` class. 4787,MeanSquaredError,tensorflow/tensorflow/python/keras/losses.py,263,class,"Computes the mean of squares of errors between labels and predictions. `loss = square(y_true - y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mse = tf.keras.losses.MeanSquaredError() >>> mse(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.25 >>> # Using 'sum' reduction type. >>> mse = tf.keras.losses.MeanSquaredError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mse(y_true, y_pred).numpy() 1.0 >>> # Using 'none' reduction type. >>> mse = tf.keras.losses.MeanSquaredError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mse(y_true, y_pred).numpy() array([0.5, 0.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredError()) ```" 4788,MeanAbsoluteError,tensorflow/tensorflow/python/keras/losses.py,322,class,"Computes the mean of absolute difference between labels and predictions. `loss = abs(y_true - y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError() >>> mae(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.25 >>> # Using 'sum' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mae(y_true, y_pred).numpy() 1.0 >>> # Using 'none' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mae(y_true, y_pred).numpy() array([0.5, 0.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError()) ```" 4789,MeanAbsolutePercentageError,tensorflow/tensorflow/python/keras/losses.py,381,class,"Computes the mean absolute percentage error between `y_true` and `y_pred`. `loss = 100 * abs(y_true - y_pred) / y_true` Standalone usage: >>> y_true = [[2., 1.], [2., 3.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError() >>> mape(y_true, y_pred).numpy() 50. >>> # Calling with 'sample_weight'. >>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 20. >>> # Using 'sum' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mape(y_true, y_pred).numpy() 100. >>> # Using 'none' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mape(y_true, y_pred).numpy() array([25., 75.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsolutePercentageError()) ```" 4790,MeanSquaredLogarithmicError,tensorflow/tensorflow/python/keras/losses.py,442,class,"Computes the mean squared logarithmic error between `y_true` and `y_pred`. `loss = square(log(y_true + 1.) - log(y_pred + 1.))` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError() >>> msle(y_true, y_pred).numpy() 0.240 >>> # Calling with 'sample_weight'. >>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.120 >>> # Using 'sum' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError( ... reduction=tf.keras.losses.Reduction.SUM) >>> msle(y_true, y_pred).numpy() 0.480 >>> # Using 'none' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError( ... reduction=tf.keras.losses.Reduction.NONE) >>> msle(y_true, y_pred).numpy() array([0.240, 0.240], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError()) ```" 4791,BinaryCrossentropy,tensorflow/tensorflow/python/keras/losses.py,503,class,"Computes the cross-entropy loss between true labels and predicted labels. Use this cross-entropy loss when there are only two label classes (assumed to be 0 and 1). For each example, there should be a single floating-point value per prediction. In the snippet below, each of the four examples has only a single floating-pointing value, and both `y_pred` and `y_true` have the shape `[batch_size]`. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy() >>> bce(y_true, y_pred).numpy() 0.815 >>> # Calling with 'sample_weight'. >>> bce(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.458 >>> # Using 'sum' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> bce(y_true, y_pred).numpy() 1.630 >>> # Using 'none' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> bce(y_true, y_pred).numpy() array([0.916 , 0.714], dtype=float32) Usage with the `tf.keras` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.BinaryCrossentropy()) ```" 4792,CategoricalCrossentropy,tensorflow/tensorflow/python/keras/losses.py,583,class,"Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided in a `one_hot` representation. If you want to provide labels as integers, please use `SparseCategoricalCrossentropy` loss. There should be `# classes` floating point values per feature. In the snippet below, there is `# classes` floating pointing values per example. The shape of both `y_pred` and `y_true` are `[batch_size, num_classes]`. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy() >>> cce(y_true, y_pred).numpy() 1.177 >>> # Calling with 'sample_weight'. >>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.814 >>> # Using 'sum' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> cce(y_true, y_pred).numpy() 2.354 >>> # Using 'none' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> cce(y_true, y_pred).numpy() array([0.0513, 2.303], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy()) ```" 4793,SparseCategoricalCrossentropy,tensorflow/tensorflow/python/keras/losses.py,662,class,"Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using `one-hot` representation, please use `CategoricalCrossentropy` loss. There should be `# classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. In the snippet below, there is a single floating point value per example for `y_true` and `# classes` floating pointing values per example for `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is `[batch_size, num_classes]`. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy() >>> scce(y_true, y_pred).numpy() 1.177 >>> # Calling with 'sample_weight'. >>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.814 >>> # Using 'sum' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> scce(y_true, y_pred).numpy() 2.354 >>> # Using 'none' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> scce(y_true, y_pred).numpy() array([0.0513, 2.303], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy()) ```" 4794,Hinge,tensorflow/tensorflow/python/keras/losses.py,739,class,"Computes the hinge loss between `y_true` and `y_pred`. `loss = maximum(1 - y_true * y_pred, 0)` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Hinge() >>> h(y_true, y_pred).numpy() 1.3 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.55 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.6 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.1, 1.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Hinge()) ```" 4795,SquaredHinge,tensorflow/tensorflow/python/keras/losses.py,798,class,"Computes the squared hinge loss between `y_true` and `y_pred`. `loss = square(maximum(1 - y_true * y_pred, 0))` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.SquaredHinge() >>> h(y_true, y_pred).numpy() 1.86 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.73 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.SquaredHinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 3.72 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.SquaredHinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.46, 2.26], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge()) ```" 4796,CategoricalHinge,tensorflow/tensorflow/python/keras/losses.py,860,class,"Computes the categorical hinge loss between `y_true` and `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.CategoricalHinge() >>> h(y_true, y_pred).numpy() 1.4 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.6 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.CategoricalHinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.8 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.CategoricalHinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.2, 1.6], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalHinge()) ```" 4797,Poisson,tensorflow/tensorflow/python/keras/losses.py,920,class,"Computes the Poisson loss between `y_true` and `y_pred`. `loss = y_pred - y_true * log(y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> p = tf.keras.losses.Poisson() >>> p(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.4 >>> # Using 'sum' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.SUM) >>> p(y_true, y_pred).numpy() 0.999 >>> # Using 'none' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.NONE) >>> p(y_true, y_pred).numpy() array([0.999, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson()) ```" 4798,LogCosh,tensorflow/tensorflow/python/keras/losses.py,976,class,"Computes the logarithm of the hyperbolic cosine of the prediction error. `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error `y_pred - y_true`. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> l = tf.keras.losses.LogCosh() >>> l(y_true, y_pred).numpy() 0.108 >>> # Calling with 'sample_weight'. >>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.087 >>> # Using 'sum' reduction type. >>> l = tf.keras.losses.LogCosh( ... reduction=tf.keras.losses.Reduction.SUM) >>> l(y_true, y_pred).numpy() 0.217 >>> # Using 'none' reduction type. >>> l = tf.keras.losses.LogCosh( ... reduction=tf.keras.losses.Reduction.NONE) >>> l(y_true, y_pred).numpy() array([0.217, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.LogCosh()) ```" 4799,KLDivergence,tensorflow/tensorflow/python/keras/losses.py,1033,class,"Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> kl = tf.keras.losses.KLDivergence() >>> kl(y_true, y_pred).numpy() 0.458 >>> # Calling with 'sample_weight'. >>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.366 >>> # Using 'sum' reduction type. >>> kl = tf.keras.losses.KLDivergence( ... reduction=tf.keras.losses.Reduction.SUM) >>> kl(y_true, y_pred).numpy() 0.916 >>> # Using 'none' reduction type. >>> kl = tf.keras.losses.KLDivergence( ... reduction=tf.keras.losses.Reduction.NONE) >>> kl(y_true, y_pred).numpy() array([0.916, -3.08e-06], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.KLDivergence()) ```" 4800,Huber,tensorflow/tensorflow/python/keras/losses.py,1094,class,"Computes the Huber loss between `y_true` and `y_pred`. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Huber() >>> h(y_true, y_pred).numpy() 0.155 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.09 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Huber( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 0.31 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Huber( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([0.18, 0.13], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Huber()) ```" 4801,mean_squared_error,tensorflow/tensorflow/python/keras/losses.py,1168,function,"Computes the mean squared error between labels and predictions. After computing the squared distance between the inputs, the mean value over the last dimension is returned. `loss = mean(square(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`." 4802,mean_absolute_error,tensorflow/tensorflow/python/keras/losses.py,1204,function,"Computes the mean absolute error between labels and predictions. `loss = mean(abs(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`." 4803,mean_absolute_percentage_error,tensorflow/tensorflow/python/keras/losses.py,1237,function,"Computes the mean absolute percentage error between `y_true` and `y_pred`. `loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)` Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`." 4804,mean_squared_logarithmic_error,tensorflow/tensorflow/python/keras/losses.py,1274,function,"Computes the mean squared logarithmic error between `y_true` and `y_pred`. `loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = np.maximum(y_true, 1e-7) >>> y_pred = np.maximum(y_pred, 1e-7) >>> assert np.array_equal( ... loss.numpy(), ... np.mean( ... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`." 4805,_maybe_convert_labels,tensorflow/tensorflow/python/keras/losses.py,1306,function,Converts binary labels into -1/1. 4806,squared_hinge,tensorflow/tensorflow/python/keras/losses.py,1323,function,"Computes the squared hinge loss between `y_true` and `y_pred`. `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)` Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1)) Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`." 4807,hinge,tensorflow/tensorflow/python/keras/losses.py,1356,function,"Computes the hinge loss between `y_true` and `y_pred`. `loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)` Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1)) Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided they will be converted to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Hinge loss values. shape = `[batch_size, d0, .. dN-1]`." 4808,categorical_hinge,tensorflow/tensorflow/python/keras/losses.py,1388,function,"Computes the categorical hinge loss between `y_true` and `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` Standalone usage: >>> y_true = np.random.randint(0, 3, size=(2,)) >>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> pos = np.sum(y_true * y_pred, axis=-1) >>> neg = np.amax((1. - y_true) * y_pred, axis=-1) >>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.)) Args: y_true: The ground truth values. `y_true` values are expected to be 0 or 1. y_pred: The predicted values. Returns: Categorical hinge loss values." 4809,huber,tensorflow/tensorflow/python/keras/losses.py,1422,function,"Computes Huber loss value. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. delta: A float, the point where the Huber loss function changes from a quadratic to linear. Returns: Tensor with one scalar loss entry per sample." 4810,log_cosh,tensorflow/tensorflow/python/keras/losses.py,1457,function,"Logarithm of the hyperbolic cosine of the prediction error. `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly like the mean squared error, but will not be so strongly affected by the occasional wildly incorrect prediction. Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.logcosh(y_true, y_pred) >>> assert loss.shape == (2,) >>> x = y_pred - y_true >>> assert np.allclose( ... loss.numpy(), ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Logcosh error values. shape = `[batch_size, d0, .. dN-1]`." 4811,categorical_crossentropy,tensorflow/tensorflow/python/keras/losses.py,1496,function,"Computes the categorical crossentropy loss. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. Returns: Categorical crossentropy loss value." 4812,sparse_categorical_crossentropy,tensorflow/tensorflow/python/keras/losses.py,1537,function,"Computes the sparse categorical crossentropy loss. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Ground truth values. y_pred: The predicted values. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. axis: (Optional) Defaults to -1. The dimension along which the entropy is computed. Returns: Sparse categorical crossentropy loss value." 4813,binary_crossentropy,tensorflow/tensorflow/python/keras/losses.py,1569,function,"Computes the binary crossentropy loss. Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.916 , 0.714], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. Returns: Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`." 4814,kl_divergence,tensorflow/tensorflow/python/keras/losses.py,1613,function,"Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1) >>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1) >>> assert np.array_equal( ... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1)) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. Returns: A `Tensor` with loss. Raises: TypeError: If `y_true` cannot be cast to the `y_pred.dtype`." 4815,poisson,tensorflow/tensorflow/python/keras/losses.py,1650,function,"Computes the Poisson loss between y_true and y_pred. The Poisson loss is the mean of the elements of the `Tensor` `y_pred - y_true * log(y_pred)`. Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.poisson(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_pred = y_pred + 1e-7 >>> assert np.allclose( ... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Poisson loss value. shape = `[batch_size, d0, .. dN-1]`. Raises: InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes." 4816,cosine_similarity,tensorflow/tensorflow/python/keras/losses.py,1692,function,"Computes the cosine similarity between labels and predictions. Note that it is a number between -1 and 1. When it is a negative number between -1 and 0, 0 indicates orthogonality and values closer to -1 indicate greater similarity. The values closer to 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` Standalone usage: >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] >>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1) >>> loss.numpy() array([-0., -0.999, 0.999], dtype=float32) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Returns: Cosine similarity tensor." 4817,CosineSimilarity,tensorflow/tensorflow/python/keras/losses.py,1728,class,"Computes the cosine similarity between labels and predictions. Note that it is a negative quantity between -1 and 0, where 0 indicates orthogonality and values closer to -1 indicate greater similarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` Standalone usage: >>> y_true = [[0., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) >>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = -((0. + 0.) + (0.5 + 0.5)) / 2 >>> cosine_loss(y_true, y_pred).numpy() -0.5 >>> # Calling with 'sample_weight'. >>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() -0.0999 >>> # Using 'sum' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1, ... reduction=tf.keras.losses.Reduction.SUM) >>> cosine_loss(y_true, y_pred).numpy() -0.999 >>> # Using 'none' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1, ... reduction=tf.keras.losses.Reduction.NONE) >>> cosine_loss(y_true, y_pred).numpy() array([-0., -0.999], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CosineSimilarity(axis=1)) ``` Args: axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial] (https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op." 4818,is_categorical_crossentropy,tensorflow/tensorflow/python/keras/losses.py,1811,function, 4819,serialize,tensorflow/tensorflow/python/keras/losses.py,1822,function,"Serializes loss function or `Loss` instance. Arguments: loss: A Keras `Loss` instance or a loss function. Returns: Loss configuration dictionary." 4820,deserialize,tensorflow/tensorflow/python/keras/losses.py,1835,function,"Deserializes a serialized loss class/function instance. Arguments: name: Loss configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras `Loss` instance or a loss function." 4821,get,tensorflow/tensorflow/python/keras/losses.py,1854,function,"Retrieves a Keras loss as a `function`/`Loss` class instance. The `identifier` may be the string name of a loss function or `Loss` class. >>> loss = tf.keras.losses.get(""categorical_crossentropy"") >>> type(loss) >>> loss = tf.keras.losses.get(""CategoricalCrossentropy"") >>> type(loss) You can also specify `config` of the loss to this function by passing dict containing `class_name` and `config` as an identifier. Also note that the `class_name` must map to a `Loss` class >>> identifier = {""class_name"": ""CategoricalCrossentropy"", ... ""config"": {""from_logits"": True}} >>> loss = tf.keras.losses.get(identifier) >>> type(loss) Arguments: identifier: A loss identifier. One of None or string name of a loss function/class or loss configuration dictionary or a loss function or a loss class instance Returns: A Keras loss as a `function`/ `Loss` class instance. Raises: ValueError: If `identifier` cannot be interpreted." 4822,KerasLossesTest,tensorflow/tensorflow/python/keras/losses_test.py,46,class, 4823,MeanSquaredErrorTest,tensorflow/tensorflow/python/keras/losses_test.py,245,class, 4824,MeanAbsoluteErrorTest,tensorflow/tensorflow/python/keras/losses_test.py,337,class, 4825,MeanAbsolutePercentageErrorTest,tensorflow/tensorflow/python/keras/losses_test.py,429,class, 4826,MeanSquaredLogarithmicErrorTest,tensorflow/tensorflow/python/keras/losses_test.py,505,class, 4827,CosineSimilarityTest,tensorflow/tensorflow/python/keras/losses_test.py,562,class, 4828,BinaryCrossentropyTest,tensorflow/tensorflow/python/keras/losses_test.py,647,class, 4829,CategoricalCrossentropyTest,tensorflow/tensorflow/python/keras/losses_test.py,813,class, 4830,SparseCategoricalCrossentropyTest,tensorflow/tensorflow/python/keras/losses_test.py,920,class, 4831,HingeTest,tensorflow/tensorflow/python/keras/losses_test.py,1003,class, 4832,SquaredHingeTest,tensorflow/tensorflow/python/keras/losses_test.py,1103,class, 4833,CategoricalHingeTest,tensorflow/tensorflow/python/keras/losses_test.py,1212,class, 4834,LogCoshTest,tensorflow/tensorflow/python/keras/losses_test.py,1278,class, 4835,PoissonTest,tensorflow/tensorflow/python/keras/losses_test.py,1359,class, 4836,KLDivergenceTest,tensorflow/tensorflow/python/keras/losses_test.py,1440,class, 4837,HuberLossTest,tensorflow/tensorflow/python/keras/losses_test.py,1520,class, 4838,BinaryTruePositivesViaControlFlow,tensorflow/tensorflow/python/keras/losses_test.py,1631,class, 4839,CustomLossTest,tensorflow/tensorflow/python/keras/losses_test.py,1649,class, 4840,Metric,tensorflow/tensorflow/python/keras/metrics.py,82,class,"Encapsulates metric logic and state. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. **kwargs: Additional layer keywords arguments. Standalone usage: ```python m = SomeMetric(...) for input in ...: m.update_state(input) print('Final result: ', m.result().numpy()) ``` Usage with `compile()` API: ```python model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01), loss=tf.keras.losses.CategoricalCrossentropy(), metrics=[tf.keras.metrics.CategoricalAccuracy()]) data = np.random.random((1000, 32)) labels = np.random.random((1000, 10)) dataset = tf.data.Dataset.from_tensor_slices((data, labels)) dataset = dataset.batch(32) model.fit(dataset, epochs=10) ``` To be implemented by subclasses: * `__init__()`: All state variables should be created in this method by calling `self.add_weight()` like: `self.var = self.add_weight(...)` * `update_state()`: Has all updates to the state variables like: self.var.assign_add(...). * `result()`: Computes and returns a value for the metric from the state variables. Example subclass implementation: ```python class BinaryTruePositives(tf.keras.metrics.Metric): def __init__(self, name='binary_true_positives', **kwargs): super(BinaryTruePositives, self).__init__(name=name, **kwargs) self.true_positives = self.add_weight(name='tp', initializer='zeros') def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, tf.bool) y_pred = tf.cast(y_pred, tf.bool) values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True)) values = tf.cast(values, self.dtype) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self.dtype) sample_weight = tf.broadcast_to(sample_weight, values.shape) values = tf.multiply(values, sample_weight) self.true_positives.assign_add(tf.reduce_sum(values)) def result(self): return self.true_positives ```" 4841,Reduce,tensorflow/tensorflow/python/keras/metrics.py,323,class,"Encapsulates metrics that perform a reduce operation on the values. Args: reduction: a `tf.keras.metrics.Reduction` enum value. name: string name of the metric instance. dtype: (Optional) data type of the metric result." 4842,Sum,tensorflow/tensorflow/python/keras/metrics.py,414,class,"Computes the (weighted) sum of the given values. For example, if values is [1, 3, 5, 7] then the sum is 16. If the weights were specified as [1, 1, 0, 0] then the sum would be 4. This metric creates one variable, `total`, that is used to compute the sum of `values`. This is ultimately returned as `sum`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Sum() >>> m.update_state([1, 3, 5, 7]) >>> m.result().numpy() 16.0 Usage with `compile()` API: ```python model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs)) model.compile(optimizer='sgd', loss='mse') ```" 4843,Mean,tensorflow/tensorflow/python/keras/metrics.py,451,class,"Computes the (weighted) mean of the given values. For example, if values is [1, 3, 5, 7] then the mean is 4. If the weights were specified as [1, 1, 0, 0] then the mean would be 2. This metric creates two variables, `total` and `count` that are used to compute the average of `values`. This average is ultimately returned as `mean` which is an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Mean() >>> m.update_state([1, 3, 5, 7]) >>> m.result().numpy() 4.0 >>> m.reset_states() >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0]) >>> m.result().numpy() 2.0 Usage with `compile()` API: ```python model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs)) model.compile(optimizer='sgd', loss='mse') ```" 4844,MeanRelativeError,tensorflow/tensorflow/python/keras/metrics.py,493,class,"Computes the mean relative error by normalizing with the given values. This metric creates two local variables, `total` and `count` that are used to compute the mean relative error. This is weighted by `sample_weight`, and it is ultimately returned as `mean_relative_error`: an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: normalizer: The normalizer values with same shape as predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3]) >>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8]) >>> # metric = mean(|y_pred - y_true| / normalizer) >>> # = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3]) >>> # = 5/4 = 1.25 >>> m.result().numpy() 1.25 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])]) ```" 4845,MeanMetricWrapper,tensorflow/tensorflow/python/keras/metrics.py,572,class,"Wraps a stateless metric function with the Mean metric. Args: fn: The metric function to wrap, with signature `fn(y_true, y_pred, **kwargs)`. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. **kwargs: The keyword arguments that are passed on to `fn`." 4846,Accuracy,tensorflow/tensorflow/python/keras/metrics.py,646,class,"Calculates how often predictions equals labels. This metric creates two local variables, `total` and `count` that are used to compute the frequency with which `y_pred` matches `y_true`. This frequency is ultimately returned as `binary accuracy`: an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Accuracy() >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]]) >>> m.result().numpy() 0.75 >>> m.reset_states() >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]], ... sample_weight=[1, 1, 0, 0]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Accuracy()]) ```" 4847,BinaryAccuracy,tensorflow/tensorflow/python/keras/metrics.py,688,class,"Calculates how often predictions matches binary labels. This metric creates two local variables, `total` and `count` that are used to compute the frequency with which `y_pred` matches `y_true`. This frequency is ultimately returned as `binary accuracy`: an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Standalone usage: >>> m = tf.keras.metrics.BinaryAccuracy() >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]]) >>> m.result().numpy() 0.75 >>> m.reset_states() >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.BinaryAccuracy()]) ```" 4848,CategoricalAccuracy,tensorflow/tensorflow/python/keras/metrics.py,733,class,"Calculates how often predictions matches one-hot labels. You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. This metric creates two local variables, `total` and `count` that are used to compute the frequency with which `y_pred` matches `y_true`. This frequency is ultimately returned as `categorical accuracy`: an idempotent operation that simply divides `total` by `count`. `y_pred` and `y_true` should be passed in as vectors of probabilities, rather than as labels. If necessary, use `tf.one_hot` to expand `y_true` as a vector. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.CategoricalAccuracy() >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], ... [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8], ... [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.CategoricalAccuracy()]) ```" 4849,SparseCategoricalAccuracy,tensorflow/tensorflow/python/keras/metrics.py,785,class,"Calculates how often predictions matches integer labels. ```python acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1)) ``` You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. This metric creates two local variables, `total` and `count` that are used to compute the frequency with which `y_pred` matches `y_true`. This frequency is ultimately returned as `sparse categorical accuracy`: an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SparseCategoricalAccuracy() >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) ```" 4850,TopKCategoricalAccuracy,tensorflow/tensorflow/python/keras/metrics.py,836,class,"Computes how often targets are in the top `K` predictions. Args: k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1) >>> m.update_state([[0, 0, 1], [0, 1, 0]], ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> m.update_state([[0, 0, 1], [0, 1, 0]], ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.TopKCategoricalAccuracy()]) ```" 4851,SparseTopKCategoricalAccuracy,tensorflow/tensorflow/python/keras/metrics.py,875,class,"Computes how often integer targets are in the top `K` predictions. Args: k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1) >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()]) ```" 4852,_ConfusionMatrixConditionCount,tensorflow/tensorflow/python/keras/metrics.py,912,class,"Calculates the number of the given confusion matrix condition. Args: confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix` conditions. thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result." 4853,FalsePositives,tensorflow/tensorflow/python/keras/metrics.py,980,class,"Calculates the number of false positives. If `sample_weight` is given, calculates the sum of the weights of false positives. This metric creates one local variable, `accumulator` that is used to keep track of the number of false positives. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.FalsePositives() >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1]) >>> m.result().numpy() 2.0 >>> m.reset_states() >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.FalsePositives()]) ```" 4854,FalseNegatives,tensorflow/tensorflow/python/keras/metrics.py,1029,class,"Calculates the number of false negatives. If `sample_weight` is given, calculates the sum of the weights of false negatives. This metric creates one local variable, `accumulator` that is used to keep track of the number of false negatives. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.FalseNegatives() >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0]) >>> m.result().numpy() 2.0 >>> m.reset_states() >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.FalseNegatives()]) ```" 4855,TrueNegatives,tensorflow/tensorflow/python/keras/metrics.py,1078,class,"Calculates the number of true negatives. If `sample_weight` is given, calculates the sum of the weights of true negatives. This metric creates one local variable, `accumulator` that is used to keep track of the number of true negatives. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.TrueNegatives() >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0]) >>> m.result().numpy() 2.0 >>> m.reset_states() >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.TrueNegatives()]) ```" 4856,TruePositives,tensorflow/tensorflow/python/keras/metrics.py,1127,class,"Calculates the number of true positives. If `sample_weight` is given, calculates the sum of the weights of true positives. This metric creates one local variable, `true_positives` that is used to keep track of the number of true positives. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: thresholds: (Optional) Defaults to 0.5. A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.TruePositives() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result().numpy() 2.0 >>> m.reset_states() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.TruePositives()]) ```" 4857,Precision,tensorflow/tensorflow/python/keras/metrics.py,1176,class,"Computes the precision of the predictions with respect to the labels. The metric creates two local variables, `true_positives` and `false_positives` that are used to compute the precision. This value is ultimately returned as `precision`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_positives`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `top_k` is set, we'll calculate precision as how often on average a class among the top-k classes with the highest predicted values of a batch entry is correct and can be found in the label for that entry. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is above the threshold and/or in the top-k highest predictions, and computing the fraction of them for which `class_id` is indeed a correct label. Args: thresholds: (Optional) A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. If neither thresholds nor top_k are set, the default is to calculate precision with `thresholds=0.5`. top_k: (Optional) Unset by default. An int value specifying the top-k predictions to consider when calculating precision. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Precision() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result().numpy() 0.6666667 >>> m.reset_states() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 >>> # With top_k=2, it will calculate precision over y_true[:2] and y_pred[:2] >>> m = tf.keras.metrics.Precision(top_k=2) >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) >>> m.result().numpy() 0.0 >>> # With top_k=4, it will calculate precision over y_true[:4] and y_pred[:4] >>> m = tf.keras.metrics.Precision(top_k=4) >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Precision()]) ```" 4858,Recall,tensorflow/tensorflow/python/keras/metrics.py,1314,class,"Computes the recall of the predictions with respect to the labels. This metric creates two local variables, `true_positives` and `false_negatives`, that are used to compute the recall. This value is ultimately returned as `recall`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_negatives`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. If `top_k` is set, recall will be computed as how often on average a class among the labels of a batch entry is in the top-k predictions. If `class_id` is specified, we calculate recall by considering only the entries in the batch for which `class_id` is in the label, and computing the fraction of them for which `class_id` is above the threshold and/or in the top-k predictions. Args: thresholds: (Optional) A float value or a python list/tuple of float threshold values in [0, 1]. A threshold is compared with prediction values to determine the truth value of predictions (i.e., above the threshold is `true`, below is `false`). One metric value is generated for each threshold value. If neither thresholds nor top_k are set, the default is to calculate recall with `thresholds=0.5`. top_k: (Optional) Unset by default. An int value specifying the top-k predictions to consider when calculating recall. class_id: (Optional) Integer class ID for which we want binary metrics. This must be in the half-open interval `[0, num_classes)`, where `num_classes` is the last dimension of predictions. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Recall() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1]) >>> m.result().numpy() 0.6666667 >>> m.reset_states() >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Recall()]) ```" 4859,SensitivitySpecificityBase,tensorflow/tensorflow/python/keras/metrics.py,1439,class,"Abstract base class for computing sensitivity and specificity. For additional information about specificity and sensitivity, see [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity)." 4860,SensitivityAtSpecificity,tensorflow/tensorflow/python/keras/metrics.py,1531,class,"Computes best sensitivity where specificity is >= specified value. the sensitivity at a given specificity. `Sensitivity` measures the proportion of actual positives that are correctly identified as such (tp / (tp + fn)). `Specificity` measures the proportion of actual negatives that are correctly identified as such (tn / (tn + fp)). This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the sensitivity at the given specificity. The threshold for the given specificity value is computed and used to evaluate the corresponding sensitivity. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. For additional information about specificity and sensitivity, see [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). Args: specificity: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given specificity. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SensitivityAtSpecificity(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], ... sample_weight=[1, 1, 2, 2, 1]) >>> m.result().numpy() 0.333333 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SensitivityAtSpecificity()]) ```" 4861,SpecificityAtSensitivity,tensorflow/tensorflow/python/keras/metrics.py,1608,class,"Computes best specificity where sensitivity is >= specified value. `Sensitivity` measures the proportion of actual positives that are correctly identified as such (tp / (tp + fn)). `Specificity` measures the proportion of actual negatives that are correctly identified as such (tn / (tn + fp)). This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the specificity at the given sensitivity. The threshold for the given sensitivity value is computed and used to evaluate the corresponding specificity. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. For additional information about specificity and sensitivity, see [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). Args: sensitivity: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given sensitivity. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SpecificityAtSensitivity(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result().numpy() 0.66666667 >>> m.reset_states() >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], ... sample_weight=[1, 1, 2, 2, 2]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SpecificityAtSensitivity()]) ```" 4862,PrecisionAtRecall,tensorflow/tensorflow/python/keras/metrics.py,1683,class,"Computes best precision where recall is >= specified value. This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the precision at the given recall. The threshold for the given recall value is computed and used to evaluate the corresponding precision. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: recall: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given recall. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.PrecisionAtRecall(0.5) >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8], ... sample_weight=[2, 2, 2, 1, 1]) >>> m.result().numpy() 0.33333333 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.PrecisionAtRecall(recall=0.8)]) ```" 4863,RecallAtPrecision,tensorflow/tensorflow/python/keras/metrics.py,1750,class,"Computes best recall where precision is >= specified value. For a given score-label-distribution the required precision might not be achievable, in this case 0.0 is returned as recall. This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the recall at the given precision. The threshold for the given precision value is computed and used to evaluate the corresponding recall. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: precision: A scalar value in range `[0, 1]`. num_thresholds: (Optional) Defaults to 200. The number of thresholds to use for matching the given precision. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.RecallAtPrecision(0.8) >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.RecallAtPrecision(precision=0.8)]) ```" 4864,AUC,tensorflow/tensorflow/python/keras/metrics.py,1821,class,"Computes the approximate AUC (Area under the curve) via a Riemann sum. This metric creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the AUC. To discretize the AUC curve, a linearly spaced set of thresholds is used to compute pairs of recall and precision values. The area under the ROC-curve is therefore computed using the height of the recall values by the false positive rate, while the area under the PR-curve is the computed using the height of the precision values by the recall. This value is ultimately returned as `auc`, an idempotent operation that computes the area under a discretized curve of precision versus recall values (computed using the aforementioned variables). The `num_thresholds` variable controls the degree of discretization with larger numbers of thresholds more closely approximating the true AUC. The quality of the approximation may vary dramatically depending on `num_thresholds`. The `thresholds` parameter can be used to manually specify thresholds which split the predictions more evenly. For best results, `predictions` should be distributed approximately uniformly in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC approximation may be poor if this is not the case. Setting `summation_method` to 'minoring' or 'majoring' can help quantify the error in the approximation by providing lower or upper bound estimate of the AUC. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: num_thresholds: (Optional) Defaults to 200. The number of thresholds to use when discretizing the roc curve. Values must be > 1. curve: (Optional) Specifies the name of the curve to be computed, 'ROC' [default] or 'PR' for the Precision-Recall-curve. summation_method: (Optional) Specifies the [Riemann summation method]( https://en.wikipedia.org/wiki/Riemann_sum) used. 'interpolation' (default) applies mid-point summation scheme for `ROC`. For PR-AUC, interpolates (true/false) positives but not the ratio that is precision (see Davis & Goadrich 2006 for details); 'minoring' applies left summation for increasing intervals and right summation for decreasing intervals; 'majoring' does the opposite. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. thresholds: (Optional) A list of floating point values to use as the thresholds for discretizing the curve. If set, the `num_thresholds` parameter is ignored. Values should be in [0, 1]. Endpoint thresholds equal to {-epsilon, 1+epsilon} for a small positive epsilon value will be automatically included with these to correctly handle predictions equal to exactly 0 or 1. multi_label: boolean indicating whether multilabel data should be treated as such, wherein AUC is computed separately for each label and then averaged across labels, or (when False) if the data should be flattened into a single label before AUC computation. In the latter case, when multilabel data is passed to AUC, each label-prediction pair is treated as an individual data point. Should be set to False for multi-class data. label_weights: (optional) list, array, or tensor of non-negative weights used to compute AUCs for multilabel data. When `multi_label` is True, the weights are applied to the individual label AUCs when they are averaged to produce the multi-label AUC. When it's False, they are used to weight the individual label predictions in computing the confusion matrix on the flattened data. Note that this is unlike class_weights in that class_weights weights the example depending on the value of its label, whereas label_weights depends only on the index of that label before flattening; therefore `label_weights` should not be used for multi-class data. Standalone usage: >>> m = tf.keras.metrics.AUC(num_thresholds=3) >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9]) >>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7] >>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2] >>> # recall = [1, 0.5, 0], fp_rate = [1, 0, 0] >>> # auc = ((((1+0.5)/2)*(1-0))+ (((0.5+0)/2)*(0-0))) = 0.75 >>> m.result().numpy() 0.75 >>> m.reset_states() >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 1.0 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.AUC()]) ```" 4865,CosineSimilarity,tensorflow/tensorflow/python/keras/metrics.py,2256,class,"Computes the cosine similarity between the labels and predictions. `cosine similarity = (a . b) / ||a|| ||b||` See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity). This metric keeps the average cosine similarity between `predictions` and `labels` over a stream of data. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. Standalone usage: >>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = ((0. + 0.) + (0.5 + 0.5)) / 2 >>> m = tf.keras.metrics.CosineSimilarity(axis=1) >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]]) >>> m.result().numpy() 0.49999997 >>> m.reset_states() >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]], ... sample_weight=[0.3, 0.7]) >>> m.result().numpy() 0.6999999 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.CosineSimilarity(axis=1)]) ```" 4866,MeanAbsoluteError,tensorflow/tensorflow/python/keras/metrics.py,2306,class,"Computes the mean absolute error between the labels and predictions. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanAbsoluteError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.25 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanAbsoluteError()]) ```" 4867,MeanAbsolutePercentageError,tensorflow/tensorflow/python/keras/metrics.py,2342,class,"Computes the mean absolute percentage error between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanAbsolutePercentageError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 250000000.0 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 500000000.0 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanAbsolutePercentageError()]) ```" 4868,MeanSquaredError,tensorflow/tensorflow/python/keras/metrics.py,2378,class,"Computes the mean squared error between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.25 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.5 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanSquaredError()]) ```" 4869,MeanSquaredLogarithmicError,tensorflow/tensorflow/python/keras/metrics.py,2414,class,"Computes the mean squared logarithmic error between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanSquaredLogarithmicError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.12011322 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.24022643 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()]) ```" 4870,Hinge,tensorflow/tensorflow/python/keras/metrics.py,2450,class,"Computes the hinge metric between `y_true` and `y_pred`. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Hinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.3 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.1 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()]) ```" 4871,SquaredHinge,tensorflow/tensorflow/python/keras/metrics.py,2485,class,"Computes the squared hinge metric between `y_true` and `y_pred`. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SquaredHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.86 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.46 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SquaredHinge()]) ```" 4872,CategoricalHinge,tensorflow/tensorflow/python/keras/metrics.py,2523,class,"Computes the categorical hinge metric between `y_true` and `y_pred`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.CategoricalHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.4000001 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.2 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.CategoricalHinge()]) ```" 4873,RootMeanSquaredError,tensorflow/tensorflow/python/keras/metrics.py,2558,class,"Computes root mean squared error metric between `y_true` and `y_pred`. Standalone usage: >>> m = tf.keras.metrics.RootMeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.70710677 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.RootMeanSquaredError()]) ```" 4874,LogCoshError,tensorflow/tensorflow/python/keras/metrics.py,2613,class,"Computes the logarithm of the hyperbolic cosine of the prediction error. `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true) Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.LogCoshError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.10844523 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.21689045 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.LogCoshError()]) ```" 4875,Poisson,tensorflow/tensorflow/python/keras/metrics.py,2649,class,"Computes the Poisson metric between `y_true` and `y_pred`. `metric = y_pred - y_true * log(y_pred)` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Poisson() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.49999997 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.99999994 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Poisson()]) ```" 4876,KLDivergence,tensorflow/tensorflow/python/keras/metrics.py,2685,class,"Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`. `metric = y_true * log(y_true / y_pred)` Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.KLDivergence() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 0.45814306 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.9162892 Usage with `compile()` API: ```python model.compile(optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.KLDivergence()]) ```" 4877,MeanIoU,tensorflow/tensorflow/python/keras/metrics.py,2722,class,"Computes the mean Intersection-Over-Union metric. Mean Intersection-Over-Union is a common evaluation metric for semantic image segmentation, which first computes the IOU for each semantic class and then computes the average over classes. IOU is defined as follows: IOU = true_positive / (true_positive + false_positive + false_negative). The predictions are accumulated in a confusion matrix, weighted by `sample_weight` and the metric is then calculated from it. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> # cm = [[1, 1], >>> # [1, 1]] >>> # sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1] >>> # iou = true_positives / (sum_row + sum_col - true_positives)) >>> # result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2 = 0.33 >>> m = tf.keras.metrics.MeanIoU(num_classes=2) >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1]) >>> m.result().numpy() 0.33333334 >>> m.reset_states() >>> m.update_state([0, 0, 1, 1], [0, 1, 0, 1], ... sample_weight=[0.3, 0.3, 0.3, 0.1]) >>> m.result().numpy() 0.23809525 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.MeanIoU(num_classes=2)]) ```" 4878,MeanTensor,tensorflow/tensorflow/python/keras/metrics.py,2854,class,"Computes the element-wise (weighted) mean of the given tensors. `MeanTensor` returns a tensor with the same shape of the input tensors. The mean value is updated by keeping local variables `total` and `count`. The `total` tracks the sum of the weighted values, and `count` stores the sum of the weighted counts. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.MeanTensor() >>> m.update_state([0, 1, 2, 3]) >>> m.update_state([4, 5, 6, 7]) >>> m.result().numpy() array([2., 3., 4., 5.], dtype=float32) >>> m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1]) >>> m.result().numpy() array([2. , 3.6363635, 4.8 , 5.3333335], dtype=float32)" 4879,BinaryCrossentropy,tensorflow/tensorflow/python/keras/metrics.py,2965,class,"Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are only two label classes (0 and 1). Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional )Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for label `0` and `0.9` for label `1`"". Standalone usage: >>> m = tf.keras.metrics.BinaryCrossentropy() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 0.81492424 >>> m.reset_states() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.9162905 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.BinaryCrossentropy()]) ```" 4880,CategoricalCrossentropy,tensorflow/tensorflow/python/keras/metrics.py,3018,class,"Computes the crossentropy metric between the labels and predictions. This is the crossentropy metric class to be used when there are multiple label classes (2 or more). Here we assume that labels are given as a `one_hot` representation. eg., When labels values are [2, 0, 1], `y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for label `0` and `0.9` for label `1`"" Standalone usage: >>> # EPSILON = 1e-7, y = y_true, y` = y_pred >>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON) >>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] >>> # xent = -sum(y * log(y'), axis = -1) >>> # = -((log 0.95), (log 0.1)) >>> # = [0.051, 2.302] >>> # Reduced xent = (0.051 + 2.302) / 2 >>> m = tf.keras.metrics.CategoricalCrossentropy() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result().numpy() 1.1769392 >>> m.reset_states() >>> m.update_state([[0, 1, 0], [0, 0, 1]], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=tf.constant([0.3, 0.7])) >>> m.result().numpy() 1.6271976 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.CategoricalCrossentropy()]) ```" 4881,SparseCategoricalCrossentropy,tensorflow/tensorflow/python/keras/metrics.py,3082,class,"Computes the crossentropy metric between the labels and predictions. Use this crossentropy metric when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using `one-hot` representation, please use `CategoricalCrossentropy` metric. There should be `# classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. In the snippet below, there is a single floating point value per example for `y_true` and `# classes` floating pointing values per example for `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is `[batch_size, num_classes]`. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. from_logits: (Optional) Whether output is expected to be a logits tensor. By default, we consider that output encodes a probability distribution. axis: (Optional) Defaults to -1. The dimension along which the metric is computed. Standalone usage: >>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]] >>> # logits = log(y_pred) >>> # softmax = exp(logits) / sum(exp(logits), axis=-1) >>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]] >>> # xent = -sum(y * log(softmax), 1) >>> # log(softmax) = [[-2.9957, -0.0513, -16.1181], >>> # [-2.3026, -0.2231, -2.3026]] >>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]] >>> # xent = [0.0513, 2.3026] >>> # Reduced xent = (0.0513 + 2.3026) / 2 >>> m = tf.keras.metrics.SparseCategoricalCrossentropy() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> m.result().numpy() 1.1769392 >>> m.reset_states() >>> m.update_state([1, 2], ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]], ... sample_weight=tf.constant([0.3, 0.7])) >>> m.result().numpy() 1.6271976 Usage with `compile()` API: ```python model.compile( optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()]) ```" 4882,SumOverBatchSize,tensorflow/tensorflow/python/keras/metrics.py,3152,class,"Computes the weighted sum over batch size of the given values. For example, if values is [1, 3, 5, 7] then the metric value is 4. If the weights were specified as [1, 1, 0, 0] then the value would be 1. This metric creates two variables, `total` and `count` that are used to compute the average of `values`. This average is ultimately returned as sum over batch size which is an idempotent operation that simply divides `total` by `count`. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values." 4883,SumOverBatchSizeMetricWrapper,tensorflow/tensorflow/python/keras/metrics.py,3174,class,Wraps a function with the `SumOverBatchSizeMetricWrapper` metric. 4884,accuracy,tensorflow/tensorflow/python/keras/metrics.py,3210,function, 4885,binary_accuracy,tensorflow/tensorflow/python/keras/metrics.py,3222,function,"Calculates how often predictions matches binary labels. Standalone usage: >>> y_true = [[1], [1], [0], [0]] >>> y_pred = [[1], [1], [0], [0]] >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred) >>> assert m.shape == (4,) >>> m.numpy() array([1., 1., 1., 1.], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Returns: Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`" 4886,categorical_accuracy,tensorflow/tensorflow/python/keras/metrics.py,3250,function,"Calculates how often predictions matches one-hot labels. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. Args: y_true: One-hot ground truth values. y_pred: The prediction values. Returns: Categorical accuracy values." 4887,sparse_categorical_accuracy,tensorflow/tensorflow/python/keras/metrics.py,3279,function,"Calculates how often predictions matches integer labels. Standalone usage: >>> y_true = [2, 1] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as `y_pred`, since argmax of logits and probabilities are same. Args: y_true: Integer ground truth values. y_pred: The prediction values. Returns: Sparse categorical accuracy values." 4888,top_k_categorical_accuracy,tensorflow/tensorflow/python/keras/metrics.py,3320,function,"Computes how often targets are in the top `K` predictions. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: The ground truth values. y_pred: The prediction values. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Top K categorical accuracy value." 4889,sparse_top_k_categorical_accuracy,tensorflow/tensorflow/python/keras/metrics.py,3346,function,"Computes how often integer targets are in the top `K` predictions. Standalone usage: >>> y_true = [2, 1] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy( ... y_true, y_pred, k=3) >>> assert m.shape == (2,) >>> m.numpy() array([1., 1.], dtype=float32) Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. Returns: Sparse top K categorical accuracy value." 4890,cosine_proximity,tensorflow/tensorflow/python/keras/metrics.py,3380,function,"Computes the cosine similarity between labels and predictions. Args: y_true: The ground truth values. y_pred: The prediction values. axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. Returns: Cosine similarity value." 4891,clone_metric,tensorflow/tensorflow/python/keras/metrics.py,3407,function,"Returns a clone of the metric if stateful, otherwise returns it as is." 4892,clone_metrics,tensorflow/tensorflow/python/keras/metrics.py,3415,function,Clones the given metric list/dict. 4893,serialize,tensorflow/tensorflow/python/keras/metrics.py,3421,function,"Serializes metric function or `Metric` instance. Arguments: metric: A Keras `Metric` instance or a metric function. Returns: Metric configuration dictionary." 4894,deserialize,tensorflow/tensorflow/python/keras/metrics.py,3434,function,"Deserializes a serialized metric class/function instance. Arguments: config: Metric configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras `Metric` instance or a metric function." 4895,get,tensorflow/tensorflow/python/keras/metrics.py,3453,function,"Retrieves a Keras metric as a `function`/`Metric` class instance. The `identifier` may be the string name of a metric function or class. >>> metric = tf.keras.metrics.get(""categorical_crossentropy"") >>> type(metric) >>> metric = tf.keras.metrics.get(""CategoricalCrossentropy"") >>> type(metric) You can also specify `config` of the metric to this function by passing dict containing `class_name` and `config` as an identifier. Also note that the `class_name` must map to a `Metric` class >>> identifier = {""class_name"": ""CategoricalCrossentropy"", ... ""config"": {""from_logits"": True}} >>> metric = tf.keras.metrics.get(identifier) >>> type(metric) Arguments: identifier: A metric identifier. One of None or string name of a metric function/class or metric configuration dictionary or a metric function or a metric class instance Returns: A Keras metric as a `function`/ `Metric` class instance. Raises: ValueError: If `identifier` cannot be interpreted." 4896,is_built_in,tensorflow/tensorflow/python/keras/metrics.py,3497,function, 4897,FalsePositivesTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,41,class, 4898,FalseNegativesTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,121,class, 4899,TrueNegativesTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,189,class, 4900,TruePositivesTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,257,class, 4901,PrecisionTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,324,class, 4902,RecallTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,529,class, 4903,SensitivityAtSpecificityTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,733,class, 4904,SpecificityAtSensitivityTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,830,class, 4905,PrecisionAtRecallTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,926,class, 4906,RecallAtPrecisionTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,1025,class, 4907,AUCTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,1140,class, 4908,MultiAUCTest,tensorflow/tensorflow/python/keras/metrics_confusion_matrix_test.py,1414,class, 4909,get_multi_io_model,tensorflow/tensorflow/python/keras/metrics_correctness_test.py,34,function, 4910,custom_generator_multi_io,tensorflow/tensorflow/python/keras/metrics_correctness_test.py,48,function, 4911,TestMetricsCorrectnessMultiIO,tensorflow/tensorflow/python/keras/metrics_correctness_test.py,71,class, 4912,TestMetricsCorrectnessSingleIO,tensorflow/tensorflow/python/keras/metrics_correctness_test.py,348,class, 4913,TestOutputLossMetrics,tensorflow/tensorflow/python/keras/metrics_correctness_test.py,564,class, 4914,KerasFunctionalMetricsTest,tensorflow/tensorflow/python/keras/metrics_functional_test.py,29,class, 4915,KerasSumTest,tensorflow/tensorflow/python/keras/metrics_test.py,53,class, 4916,MeanTest,tensorflow/tensorflow/python/keras/metrics_test.py,170,class, 4917,KerasAccuracyTest,tensorflow/tensorflow/python/keras/metrics_test.py,363,class, 4918,CosineSimilarityTest,tensorflow/tensorflow/python/keras/metrics_test.py,607,class, 4919,MeanAbsoluteErrorTest,tensorflow/tensorflow/python/keras/metrics_test.py,668,class, 4920,MeanAbsolutePercentageErrorTest,tensorflow/tensorflow/python/keras/metrics_test.py,706,class, 4921,MeanSquaredErrorTest,tensorflow/tensorflow/python/keras/metrics_test.py,746,class, 4922,MeanSquaredLogarithmicErrorTest,tensorflow/tensorflow/python/keras/metrics_test.py,784,class, 4923,HingeTest,tensorflow/tensorflow/python/keras/metrics_test.py,824,class, 4924,SquaredHingeTest,tensorflow/tensorflow/python/keras/metrics_test.py,879,class, 4925,CategoricalHingeTest,tensorflow/tensorflow/python/keras/metrics_test.py,940,class, 4926,RootMeanSquaredErrorTest,tensorflow/tensorflow/python/keras/metrics_test.py,980,class, 4927,TopKCategoricalAccuracyTest,tensorflow/tensorflow/python/keras/metrics_test.py,1014,class, 4928,SparseTopKCategoricalAccuracyTest,tensorflow/tensorflow/python/keras/metrics_test.py,1061,class, 4929,LogCoshErrorTest,tensorflow/tensorflow/python/keras/metrics_test.py,1108,class, 4930,PoissonTest,tensorflow/tensorflow/python/keras/metrics_test.py,1151,class, 4931,KLDivergenceTest,tensorflow/tensorflow/python/keras/metrics_test.py,1197,class, 4932,MeanRelativeErrorTest,tensorflow/tensorflow/python/keras/metrics_test.py,1244,class, 4933,MeanIoUTest,tensorflow/tensorflow/python/keras/metrics_test.py,1300,class, 4934,MeanTensorTest,tensorflow/tensorflow/python/keras/metrics_test.py,1382,class, 4935,BinaryCrossentropyTest,tensorflow/tensorflow/python/keras/metrics_test.py,1535,class, 4936,CategoricalCrossentropyTest,tensorflow/tensorflow/python/keras/metrics_test.py,1655,class, 4937,SparseCategoricalCrossentropyTest,tensorflow/tensorflow/python/keras/metrics_test.py,1781,class, 4938,BinaryTruePositives,tensorflow/tensorflow/python/keras/metrics_test.py,1931,class, 4939,BinaryTruePositivesViaControlFlow,tensorflow/tensorflow/python/keras/metrics_test.py,1955,class, 4940,CustomMetricsTest,tensorflow/tensorflow/python/keras/metrics_test.py,1980,class, 4941,_get_model,tensorflow/tensorflow/python/keras/metrics_test.py,2066,function, 4942,ResetStatesTest,tensorflow/tensorflow/python/keras/metrics_test.py,2082,class, 4943,share_weights,tensorflow/tensorflow/python/keras/models.py,56,function, 4944,_clone_layer,tensorflow/tensorflow/python/keras/models.py,60,function, 4945,_insert_ancillary_layers,tensorflow/tensorflow/python/keras/models.py,64,function,Inserts ancillary layers into the model with the proper order. 4946,_make_new_nodes,tensorflow/tensorflow/python/keras/models.py,77,function,"Uses the layers in `layer_map` to make new nodes based on `nodes_by_depth`. Args: nodes_by_depth: Provides structure information to create new nodes. layer_fn: Function to clone layers. layer_map: Map from layers in `model` to new layers. tensor_map: Map from tensors in `model` to newly compute tensors. Returns: A set of new nodes. `layer_map` and `tensor_map` are updated." 4947,_clone_functional_model,tensorflow/tensorflow/python/keras/models.py,133,function,"Clone a functional `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Input layers are always cloned. Arguments: model: Instance of `Model`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. layer_fn: callable to be applied on non-input layers in the model. By default it clones the layer. Another example is to preserve the layer to share the weights. This is required when we create a per-replica copy of the model with distribution strategy; we want the weights to be shared but still feed inputs separately so we create new input layers. Returns: An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. Raises: ValueError: in case of invalid `model` argument value or `layer_fn` argument value." 4948,_clone_layers_and_model_config,tensorflow/tensorflow/python/keras/models.py,219,function,"Clones all layers, and returns the model config without serializing layers. This function ensures that only the node graph is retrieved when getting the model config. The `layer_fn` used to clone layers might not rely on `layer.get_config()`, so some custom layers do not define `get_config`. Trying to retrieve the config results in errors. Args: model: A Functional model. input_layers: Dictionary mapping input layers in `model` to new input layers layer_fn: Function used to clone all non-input layers. Returns: Model config object, and a dictionary of newly created layers." 4949,_remove_ancillary_layers,tensorflow/tensorflow/python/keras/models.py,252,function,"Removes and returns any ancillary layers from `layers` based on `model`. Ancillary layers are part of the model topology but not used to compute the model outputs, e.g., layers from `add_loss` and `add_metric`. Args: model: A Keras Model. layer_map: A map to from layers in the `model` to those in `layers`. layers: A list of all layers. Returns: Two lists of layers: (1) `layers` with the ancillary layers removed, and (2) the ancillary layers." 4950,_clone_sequential_model,tensorflow/tensorflow/python/keras/models.py,281,function,"Clone a `Sequential` model instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments: model: Instance of `Sequential`. input_tensors: optional list of input tensors to build the model upon. If not provided, placeholders will be created. layer_fn: callable to be applied on non-input layers in the model. By default it clones the layer. Another example is to preserve the layer to share the weights. This is required when we create a per-replica copy of the model with distribution strategy; we want the weights to be shared but still feed inputs separately so we create new input layers. Returns: An instance of `Sequential` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. Raises: ValueError: in case of invalid `model` argument value or `layer_fn` argument value." 4951,clone_model,tensorflow/tensorflow/python/keras/models.py,387,function,"Clone any `Model` instance. Model cloning is similar to calling a model on new inputs, except that it creates new layers (and thus new weights) instead of sharing the weights of the existing layers. Arguments: model: Instance of `Model` (could be a functional model or a Sequential model). input_tensors: optional list of input tensors or InputLayer objects to build the model upon. If not provided, placeholders will be created. clone_function: Callable to be used to clone each layer in the target model (except `InputLayer` instances). It takes as argument the layer instance to be cloned, and returns the corresponding layer instance to be used in the model copy. If unspecified, this callable defaults to the following serialization/deserialization function: `lambda layer: layer.__class__.from_config(layer.get_config())`. By passing a custom callable, you can customize your copy of the model, e.g. by wrapping certain layers of interest (you might want to replace all `LSTM` instances with equivalent `Bidirectional(LSTM(...))` instances, for example). Returns: An instance of `Model` reproducing the behavior of the original model, on top of new inputs tensors, using newly instantiated weights. The cloned model might behave differently from the original model if a custom clone_function modifies the layer. Raises: ValueError: in case of invalid `model` argument value." 4952,_in_place_subclassed_model_reset,tensorflow/tensorflow/python/keras/models.py,433,function,"Substitute for model cloning that works for subclassed models. Subclassed models cannot be cloned because their topology is not serializable. To ""instantiate"" an identical model in a new TF graph, we reuse the original model object, but we clear its state. After calling this function on a model instance, you can use the model instance as if it were a model clone (in particular you can use it in a new graph). This method clears the state of the input model. It is thus destructive. However the original state can be restored fully by calling `_in_place_subclassed_model_state_restoration`. Args: model: Instance of a Keras model created via subclassing. Raises: ValueError: In case the model uses a subclassed model as inner layer." 4953,_reset_build_compile_trackers,tensorflow/tensorflow/python/keras/models.py,533,function,"Reset state trackers for model. Note that we do not actually zero out attributes such as optimizer, but instead rely on the expectation that all of the attrs will be over-written on calling build/compile/etc. This is somewhat fragile, insofar as we check elsewhere for the presence of these attributes as evidence of having been built/compiled/etc. Pending a better way to do this, we reset key attributes here to allow building and compiling. Args: model: the model that is being reset" 4954,in_place_subclassed_model_state_restoration,tensorflow/tensorflow/python/keras/models.py,557,function,"Restores the original state of a model after it was ""reset"". This undoes this action of `_in_place_subclassed_model_reset`, which is called in `clone_and_build_model` if `in_place_reset` is set to True. Args: model: Instance of a Keras model created via subclassing, on which `_in_place_subclassed_model_reset` was previously called." 4955,clone_and_build_model,tensorflow/tensorflow/python/keras/models.py,589,function,"Clone a `Model` and build/compile it with the same settings used before. This function can be be run in the same graph or in a separate graph from the model. When using a separate graph, `in_place_reset` must be `False`. Note that, currently, the clone produced from this function may not work with TPU DistributionStrategy. Try at your own risk. Args: model: `tf.keras.Model` object. Can be Functional, Sequential, or sub-classed. input_tensors: Optional list or dictionary of input tensors to build the model upon. If not provided, placeholders will be created. target_tensors: Optional list of target tensors for compiling the model. If not provided, placeholders will be created. custom_objects: Optional dictionary mapping string names to custom classes or functions. compile_clone: Boolean, whether to compile model clone (default `True`). in_place_reset: Boolean, whether to reset the model in place. Only used if the model is a subclassed model. In the case of a subclassed model, this argument must be set to `True` (default `False`). To restore the original model, use the function `in_place_subclassed_model_state_restoration(model)`. optimizer_iterations: An iterations variable that will be incremented by the optimizer if the clone is compiled. This argument is used when a Keras model is cloned into an Estimator model function, because Estimators create their own global step variable. optimizer_config: Optimizer config dictionary or list of dictionary returned from `get_config()`. This argument should be defined if `clone_and_build_model` is called in a different graph or session from the original model, and the optimizer is an instance of `OptimizerV2`. Returns: Clone of the model. Raises: ValueError: Cloning fails in the following cases - cloning a subclassed model with `in_place_reset` set to False. - compiling the clone when the original model has not been compiled." 4956,TestModel,tensorflow/tensorflow/python/keras/models_test.py,43,class,A model subclass. 4957,_get_layers,tensorflow/tensorflow/python/keras/models_test.py,56,function, 4958,_get_model,tensorflow/tensorflow/python/keras/models_test.py,73,function, 4959,TestModelCloning,tensorflow/tensorflow/python/keras/models_test.py,79,class, 4960,_has_placeholder,tensorflow/tensorflow/python/keras/models_test.py,307,function, 4961,CheckpointingTests,tensorflow/tensorflow/python/keras/models_test.py,312,class, 4962,TestModelBackend,tensorflow/tensorflow/python/keras/models_test.py,338,class, 4963,TestCloneAndBuildModel,tensorflow/tensorflow/python/keras/models_test.py,356,class, 4964,Optimizer,tensorflow/tensorflow/python/keras/optimizers.py,47,class,"Abstract optimizer base class. Note: this is the parent class of all optimizers, not an actual optimizer that can be used for training models. All Keras optimizers support the following keyword arguments: clipnorm: float >= 0. Gradients will be clipped when their L2 norm exceeds this value. clipvalue: float >= 0. Gradients will be clipped when their absolute value exceeds this value." 4965,SGD,tensorflow/tensorflow/python/keras/optimizers.py,174,class,"Stochastic gradient descent optimizer. Includes support for momentum, learning rate decay, and Nesterov momentum. Arguments: lr: float >= 0. Learning rate. momentum: float >= 0. Parameter that accelerates SGD in the relevant direction and dampens oscillations. decay: float >= 0. Learning rate decay over each update. nesterov: boolean. Whether to apply Nesterov momentum." 4966,RMSprop,tensorflow/tensorflow/python/keras/optimizers.py,243,class,"RMSProp optimizer. It is recommended to leave the parameters of this optimizer at their default values (except the learning rate, which can be freely tuned). Arguments: lr: float >= 0. Learning rate. rho: float >= 0. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. decay: float >= 0. Learning rate decay over each update." 4967,Adagrad,tensorflow/tensorflow/python/keras/optimizers.py,310,class,"Adagrad optimizer. Adagrad is an optimizer with parameter-specific learning rates, which are adapted relative to how frequently a parameter gets updated during training. The more updates a parameter receives, the smaller the updates. It is recommended to leave the parameters of this optimizer at their default values. # Arguments lr: float >= 0. Initial learning rate. epsilon: float >= 0. If `None`, defaults to `K.epsilon()`. decay: float >= 0. Learning rate decay over each update. # References - [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)" 4968,Adadelta,tensorflow/tensorflow/python/keras/optimizers.py,383,class,"Adadelta optimizer. Adadelta is a more robust extension of Adagrad that adapts learning rates based on a moving window of gradient updates, instead of accumulating all past gradients. This way, Adadelta continues learning even when many updates have been done. Compared to Adagrad, in the original version of Adadelta you don't have to set an initial learning rate. In this version, initial learning rate and decay factor can be set, as in most other Keras optimizers. It is recommended to leave the parameters of this optimizer at their default values. # Arguments lr: float >= 0. Initial learning rate, defaults to 1. It is recommended to leave it at the default value. rho: float >= 0. Adadelta decay factor, corresponding to fraction of gradient to keep at each time step. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. decay: float >= 0. Initial learning rate decay. # References - [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)" 4969,Adam,tensorflow/tensorflow/python/keras/optimizers.py,472,class,"Adam optimizer. Default parameters follow those provided in the original paper. Arguments: lr: float >= 0. Learning rate. beta_1: float, 0 < beta < 1. Generally close to 1. beta_2: float, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. decay: float >= 0. Learning rate decay over each update. amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm from the paper ""On the Convergence of Adam and Beyond""." 4970,Adamax,tensorflow/tensorflow/python/keras/optimizers.py,570,class,"Adamax optimizer from Adam paper's Section 7. It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. Arguments: lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`. decay: float >= 0. Learning rate decay over each update." 4971,Nadam,tensorflow/tensorflow/python/keras/optimizers.py,658,class,"Nesterov Adam optimizer. Much like Adam is essentially RMSprop with momentum, Nadam is Adam RMSprop with Nesterov momentum. Default parameters follow those provided in the paper. It is recommended to leave the parameters of this optimizer at their default values. Arguments: lr: float >= 0. Learning rate. beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1. epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`." 4972,TFOptimizer,tensorflow/tensorflow/python/keras/optimizers.py,756,class,Wrapper class for native TensorFlow optimizers. 4973,serialize,tensorflow/tensorflow/python/keras/optimizers.py,831,function, 4974,deserialize,tensorflow/tensorflow/python/keras/optimizers.py,836,function,"Inverse of the `serialize` function. Arguments: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance." 4975,get,tensorflow/tensorflow/python/keras/optimizers.py,873,function,"Retrieves a Keras Optimizer instance. Arguments: identifier: Optimizer identifier, one of - String: name of an optimizer - Dictionary: configuration dictionary. - Keras Optimizer instance (it will be returned unchanged). - TensorFlow Optimizer instance (it will be wrapped as a Keras Optimizer). Returns: A Keras Optimizer instance. Raises: ValueError: If `identifier` cannot be interpreted." 4976,_get_model,tensorflow/tensorflow/python/keras/optimizers_test.py,37,function, 4977,KerasOptimizersTest,tensorflow/tensorflow/python/keras/optimizers_test.py,47,class, 4978,_check_penalty_number,tensorflow/tensorflow/python/keras/regularizers.py,33,function,"check penalty number availability, raise ValueError if failed." 4979,_none_to_default,tensorflow/tensorflow/python/keras/regularizers.py,46,function, 4980,Regularizer,tensorflow/tensorflow/python/keras/regularizers.py,51,class,"Regularizer base class. Regularizers allow you to apply penalties on layer parameters or layer activity during optimization. These penalties are summed into the loss function that the network optimizes. Regularization penalties are applied on a per-layer basis. The exact API will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D` and `Conv3D`) have a unified API. These layers expose 3 keyword arguments: - `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel - `bias_regularizer`: Regularizer to apply a penalty on the layer's bias - `activity_regularizer`: Regularizer to apply a penalty on the layer's output All layers (including custom layers) expose `activity_regularizer` as a settable property, whether or not it is in the constructor arguments. The value returned by the `activity_regularizer` is divided by the input batch size so that the relative weighting between the weight regularizers and the activity regularizers does not change with the batch size. You can access a layer's regularization penalties by calling `layer.losses` after calling the layer on inputs. ## Example >>> layer = tf.keras.layers.Dense( ... 5, input_dim=5, ... kernel_initializer='ones', ... kernel_regularizer=tf.keras.regularizers.L1(0.01), ... activity_regularizer=tf.keras.regularizers.L2(0.01)) >>> tensor = tf.ones(shape=(5, 5)) * 2.0 >>> out = layer(tensor) >>> # The kernel regularization term is 0.25 >>> # The activity regularization term (after dividing by the batch size) is 5 >>> tf.math.reduce_sum(layer.losses) ## Available penalties ```python tf.keras.regularizers.L1(0.3) # L1 Regularization Penalty tf.keras.regularizers.L2(0.1) # L2 Regularization Penalty tf.keras.regularizers.L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties ``` ## Directly calling a regularizer Compute a regularization loss on a tensor by directly calling a regularizer as if it is a one-argument function. E.g. >>> regularizer = tf.keras.regularizers.L2(2.) >>> tensor = tf.ones(shape=(5, 5)) >>> regularizer(tensor) ## Developing new regularizers Any function that takes in a weight matrix and returns a scalar tensor can be used as a regularizer, e.g.: >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1') ... def l1_reg(weight_matrix): ... return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix)) ... >>> layer = tf.keras.layers.Dense(5, input_dim=5, ... kernel_initializer='ones', kernel_regularizer=l1_reg) >>> tensor = tf.ones(shape=(5, 5)) >>> out = layer(tensor) >>> layer.losses [] Alternatively, you can write your custom regularizers in an object-oriented way by extending this regularizer base class, e.g.: >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2') ... class L2Regularizer(tf.keras.regularizers.Regularizer): ... def __init__(self, l2=0.): # pylint: disable=redefined-outer-name ... self.l2 = l2 ... ... def __call__(self, x): ... return self.l2 * tf.math.reduce_sum(tf.math.square(x)) ... ... def get_config(self): ... return {'l2': float(self.l2)} ... >>> layer = tf.keras.layers.Dense( ... 5, input_dim=5, kernel_initializer='ones', ... kernel_regularizer=L2Regularizer(l2=0.5)) >>> tensor = tf.ones(shape=(5, 5)) >>> out = layer(tensor) >>> layer.losses [] ### A note on serialization and deserialization: Registering the regularizers as serializable is optional if you are just training and executing models, exporting to and from SavedModels, or saving and loading weight checkpoints. Registration is required for Keras `model_to_estimator`, saving and loading models to HDF5 formats, Keras model cloning, some visualization utilities, and exporting models to and from JSON. If using this functionality, you must make sure any python process running your model has also defined and registered your custom regularizer. `tf.keras.utils.register_keras_serializable` is only available in TF 2.1 and beyond. In earlier versions of TensorFlow you must pass your custom regularizer to the `custom_objects` argument of methods that expect custom regularizers to be registered as serializable." 4981,L1L2,tensorflow/tensorflow/python/keras/regularizers.py,216,class,"A regularizer that applies both L1 and L2 regularization penalties. The L1 regularization penalty is computed as: `loss = l1 * reduce_sum(abs(x))` The L2 regularization penalty is computed as `loss = l2 * reduce_sum(square(x))` L1L2 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1_l2') In this case, the default values used are `l1=0.01` and `l2=0.01`. Attributes: l1: Float; L1 regularization factor. l2: Float; L2 regularization factor." 4982,L1,tensorflow/tensorflow/python/keras/regularizers.py,261,class,"A regularizer that applies a L1 regularization penalty. The L1 regularization penalty is computed as: `loss = l1 * reduce_sum(abs(x))` L1 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1') In this case, the default value used is `l1=0.01`. Attributes: l1: Float; L1 regularization factor." 4983,L2,tensorflow/tensorflow/python/keras/regularizers.py,295,class,"A regularizer that applies a L2 regularization penalty. The L2 regularization penalty is computed as: `loss = l2 * reduce_sum(square(x))` L2 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2') In this case, the default value used is `l2=0.01`. Attributes: l2: Float; L2 regularization factor." 4984,l1_l2,tensorflow/tensorflow/python/keras/regularizers.py,329,function,"Create a regularizer that applies both L1 and L2 penalties. The L1 regularization penalty is computed as: `loss = l1 * reduce_sum(abs(x))` The L2 regularization penalty is computed as: `loss = l2 * reduce_sum(square(x))` Arguments: l1: Float; L1 regularization factor. l2: Float; L2 regularization factor. Returns: An L1L2 Regularizer with the given regularization factors." 4985,serialize,tensorflow/tensorflow/python/keras/regularizers.py,354,function, 4986,deserialize,tensorflow/tensorflow/python/keras/regularizers.py,359,function, 4987,get,tensorflow/tensorflow/python/keras/regularizers.py,372,function,Retrieve a regularizer instance from a config or identifier. 4988,KerasRegularizersTest,tensorflow/tensorflow/python/keras/regularizers_test.py,38,class, 4989,string_test,tensorflow/tensorflow/python/keras/testing_utils.py,51,function, 4990,numeric_test,tensorflow/tensorflow/python/keras/testing_utils.py,55,function, 4991,get_test_data,tensorflow/tensorflow/python/keras/testing_utils.py,59,function,"Generates test data to train a model on. Arguments: train_samples: Integer, how many training samples to generate. test_samples: Integer, how many test samples to generate. input_shape: Tuple of integers, shape of the inputs. num_classes: Integer, number of classes for the data and targets. random_seed: Integer, random seed used by numpy to generate data. Returns: A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`." 4992,layer_test,tensorflow/tensorflow/python/keras/testing_utils.py,89,function,"Test routine for a layer with a single input and single output. Arguments: layer_cls: Layer class object. kwargs: Optional dictionary of keyword arguments for instantiating the layer. input_shape: Input shape tuple. input_dtype: Data type of the input data. input_data: Numpy array of input data. expected_output: Numpy array of the expected output. expected_output_dtype: Data type expected for the output. expected_output_shape: Shape tuple for the expected shape of the output. validate_training: Whether to attempt to validate training on this layer. This might be set to False for non-differentiable layers that output string or integer values. adapt_data: Optional data for an 'adapt' call. If None, adapt() will not be tested for this layer. This is only relevant for PreprocessingLayers. custom_objects: Optional dictionary mapping name strings to custom objects in the layer class. This is helpful for testing custom layers. test_harness: The Tensorflow test, if any, that this function is being called in. Returns: The output data (Numpy array) returned by the layer, for additional checks to be done by the calling code. Raises: ValueError: if `input_shape is None`." 4993,model_type_scope,tensorflow/tensorflow/python/keras/testing_utils.py,307,function,"Provides a scope within which the model type to test is equal to `value`. The model type gets restored to its original value upon exiting the scope. Arguments: value: model type value Yields: The provided value." 4994,run_eagerly_scope,tensorflow/tensorflow/python/keras/testing_utils.py,328,function,"Provides a scope within which we compile models to run eagerly or not. The boolean gets restored to its original value upon exiting the scope. Arguments: value: Bool specifying if we should run models eagerly in the active test. Should be True or False. Yields: The provided value." 4995,use_keras_tensors_scope,tensorflow/tensorflow/python/keras/testing_utils.py,350,function,"Provides a scope within which we use KerasTensors in the func. API or not. The boolean gets restored to its original value upon exiting the scope. Arguments: value: Bool specifying if we should build functional models using KerasTensors in the active test. Should be True or False. Yields: The provided value." 4996,should_run_eagerly,tensorflow/tensorflow/python/keras/testing_utils.py,372,function,Returns whether the models we are testing should be run eagerly. 4997,saved_model_format_scope,tensorflow/tensorflow/python/keras/testing_utils.py,383,function,"Provides a scope within which the savde model format to test is `value`. The saved model format gets restored to its original value upon exiting the scope. Arguments: value: saved model format value Yields: The provided value." 4998,get_save_format,tensorflow/tensorflow/python/keras/testing_utils.py,404,function, 4999,get_model_type,tensorflow/tensorflow/python/keras/testing_utils.py,413,function,Gets the model type that should be tested. 5000,get_small_sequential_mlp,tensorflow/tensorflow/python/keras/testing_utils.py,423,function, 5001,get_small_functional_mlp,tensorflow/tensorflow/python/keras/testing_utils.py,434,function, 5002,SmallSubclassMLP,tensorflow/tensorflow/python/keras/testing_utils.py,442,class,A subclass model based small MLP. 5003,_SmallSubclassMLPCustomBuild,tensorflow/tensorflow/python/keras/testing_utils.py,467,class,A subclass model small MLP that uses a custom build method. 5004,get_small_subclass_mlp,tensorflow/tensorflow/python/keras/testing_utils.py,487,function, 5005,get_small_subclass_mlp_with_custom_build,tensorflow/tensorflow/python/keras/testing_utils.py,491,function, 5006,get_small_mlp,tensorflow/tensorflow/python/keras/testing_utils.py,495,function,Get a small mlp of the model type specified by `get_model_type`. 5007,_SubclassModel,tensorflow/tensorflow/python/keras/testing_utils.py,509,class,A Keras subclass model. 5008,_SubclassModelCustomBuild,tensorflow/tensorflow/python/keras/testing_utils.py,545,class,A Keras subclass model that uses a custom build method. 5009,get_model_from_layers,tensorflow/tensorflow/python/keras/testing_utils.py,566,function,"Builds a model from a sequence of layers. Args: model_layers: The layers used to build the network. input_shape: Shape tuple of the input or 'TensorShape' instance. input_dtype: Datatype of the input. name: Name for the model. input_ragged: Boolean, whether the input data is a ragged tensor. input_sparse: Boolean, whether the input data is a sparse tensor. Returns: A Keras model." 5010,Bias,tensorflow/tensorflow/python/keras/testing_utils.py,631,class, 5011,_MultiIOSubclassModel,tensorflow/tensorflow/python/keras/testing_utils.py,640,class,Multi IO Keras subclass model. 5012,_MultiIOSubclassModelCustomBuild,tensorflow/tensorflow/python/keras/testing_utils.py,676,class,Multi IO Keras subclass model that uses a custom build method. 5013,get_multi_io_model,tensorflow/tensorflow/python/keras/testing_utils.py,724,function,"Builds a multi-io model that contains two branches. The produced model will be of the type specified by `get_model_type`. To build a two-input, two-output model: Specify a list of layers for branch a and branch b, but do not specify any shared input branch or shared output branch. The resulting model will apply each branch to a different input, to produce two outputs. The first value in branch_a must be the Keras 'Input' layer for branch a, and the first value in branch_b must be the Keras 'Input' layer for branch b. example usage: ``` branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()] branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()] model = get_multi_io_model(branch_a, branch_b) ``` To build a two-input, one-output model: Specify a list of layers for branch a and branch b, and specify a shared output branch. The resulting model will apply each branch to a different input. It will then apply the shared output branch to a tuple containing the intermediate outputs of each branch, to produce a single output. The first layer in the shared_output_branch must be able to merge a tuple of two tensors. The first value in branch_a must be the Keras 'Input' layer for branch a, and the first value in branch_b must be the Keras 'Input' layer for branch b. example usage: ``` input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()] input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()] shared_output_branch = [Concatenate(), Dense(), Dense()] model = get_multi_io_model(input_branch_a, input_branch_b, shared_output_branch=shared_output_branch) ``` To build a one-input, two-output model: Specify a list of layers for branch a and branch b, and specify a shared input branch. The resulting model will take one input, and apply the shared input branch to it. It will then respectively apply each branch to that intermediate result in parallel, to produce two outputs. The first value in the shared_input_branch must be the Keras 'Input' layer for the whole model. Branch a and branch b should not contain any Input layers. example usage: ``` shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()] output_branch_a = [Dense(), Dense()] output_branch_b = [Dense(), Dense()] model = get_multi_io_model(output__branch_a, output_branch_b, shared_input_branch=shared_input_branch) ``` Args: branch_a: A sequence of layers for branch a of the model. branch_b: A sequence of layers for branch b of the model. shared_input_branch: An optional sequence of layers to apply to a single input, before applying both branches to that intermediate result. If set, the model will take only one input instead of two. Defaults to None. shared_output_branch: An optional sequence of layers to merge the intermediate results produced by branch a and branch b. If set, the model will produce only one output instead of two. Defaults to None. Returns: A multi-io model of the type specified by `get_model_type`, specified by the different branches." 5014,get_v2_optimizer,tensorflow/tensorflow/python/keras/testing_utils.py,866,function,"Get the v2 optimizer requested. This is only necessary until v2 are the default, as we are testing in Eager, and Eager + v1 optimizers fail tests. When we are in v2, the strings alone should be sufficient, and this mapping can theoretically be removed. Args: name: string name of Keras v2 optimizer. **kwargs: any kwargs to pass to the optimizer constructor. Returns: Initialized Keras v2 optimizer. Raises: ValueError: if an unknown name was passed." 5015,get_expected_metric_variable_names,tensorflow/tensorflow/python/keras/testing_utils.py,891,function,Returns expected metric variable names given names and prefix/suffix. 5016,enable_v2_dtype_behavior,tensorflow/tensorflow/python/keras/testing_utils.py,900,function,Decorator for enabling the layer V2 dtype behavior on a test. 5017,disable_v2_dtype_behavior,tensorflow/tensorflow/python/keras/testing_utils.py,905,function,Decorator for disabling the layer V2 dtype behavior on a test. 5018,_set_v2_dtype_behavior,tensorflow/tensorflow/python/keras/testing_utils.py,910,function,Returns version of 'fn' that runs with v2 dtype behavior on or off. 5019,device,tensorflow/tensorflow/python/keras/testing_utils.py,925,function,Uses gpu when requested and available. 5020,use_gpu,tensorflow/tensorflow/python/keras/testing_utils.py,936,function,Uses gpu when requested and available. 5021,_get_elephant,tensorflow/tensorflow/python/keras/applications/applications_load_weight_test.py,78,function, 5022,ApplicationsLoadWeightTest,tensorflow/tensorflow/python/keras/applications/applications_load_weight_test.py,91,class, 5023,ApplicationsTest,tensorflow/tensorflow/python/keras/applications/applications_test.py,74,class, 5024,_get_output_shape,tensorflow/tensorflow/python/keras/applications/applications_test.py,135,function, 5025,dense_block,tensorflow/tensorflow/python/keras/applications/densenet.py,57,function,"A dense block. Arguments: x: input tensor. blocks: integer, the number of building blocks. name: string, block label. Returns: Output tensor for the block." 5026,transition_block,tensorflow/tensorflow/python/keras/applications/densenet.py,73,function,"A transition block. Arguments: x: input tensor. reduction: float, compression rate at transition layers. name: string, block label. Returns: output tensor for the block." 5027,conv_block,tensorflow/tensorflow/python/keras/applications/densenet.py,99,function,"A building block for a dense block. Arguments: x: input tensor. growth_rate: float, growth rate at dense layers. name: string, block label. Returns: Output tensor for the block." 5028,DenseNet,tensorflow/tensorflow/python/keras/applications/densenet.py,129,function,"Instantiates the DenseNet architecture. Reference: - [Densely Connected Convolutional Networks]( https://arxiv.org/abs/1608.06993) (CVPR 2017) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.densenet.preprocess_input` for an example. Arguments: blocks: numbers of building blocks for the four dense layers. include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `'channels_last'` data format) or `(3, 224, 224)` (with `'channels_first'` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. pooling: optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5029,DenseNet121,tensorflow/tensorflow/python/keras/applications/densenet.py,322,function,Instantiates the Densenet121 architecture. 5030,DenseNet169,tensorflow/tensorflow/python/keras/applications/densenet.py,335,function,Instantiates the Densenet169 architecture. 5031,DenseNet201,tensorflow/tensorflow/python/keras/applications/densenet.py,348,function,Instantiates the Densenet201 architecture. 5032,preprocess_input,tensorflow/tensorflow/python/keras/applications/densenet.py,360,function, 5033,decode_predictions,tensorflow/tensorflow/python/keras/applications/densenet.py,366,function, 5034,EfficientNet,tensorflow/tensorflow/python/keras/applications/efficientnet.py,194,function,"Instantiates the EfficientNet architecture using given scaling coefficients. Reference: - [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks]( https://arxiv.org/abs/1905.11946) (ICML 2019) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Arguments: width_coefficient: float, scaling coefficient for network width. depth_coefficient: float, scaling coefficient for network depth. default_size: integer, default input image size. dropout_rate: float, dropout rate before final classifier layer. drop_connect_rate: float, dropout rate at skip connections. depth_divisor: integer, a unit of network width. activation: activation function. blocks_args: list of dicts, parameters to construct block modules. model_name: string, model name. include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False. It should have exactly 3 inputs channels. pooling: optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5035,block,tensorflow/tensorflow/python/keras/applications/efficientnet.py,413,function,"An inverted residual block. Arguments: inputs: input tensor. activation: activation function. drop_rate: float between 0 and 1, fraction of the input units to drop. name: string, block label. filters_in: integer, the number of input filters. filters_out: integer, the number of output filters. kernel_size: integer, the dimension of the convolution window. strides: integer, the stride of the convolution. expand_ratio: integer, scaling coefficient for the input filters. se_ratio: float between 0 and 1, fraction to squeeze the input filters. id_skip: boolean. Returns: output tensor for the block." 5036,EfficientNetB0,tensorflow/tensorflow/python/keras/applications/efficientnet.py,519,function, 5037,EfficientNetB1,tensorflow/tensorflow/python/keras/applications/efficientnet.py,545,function, 5038,EfficientNetB2,tensorflow/tensorflow/python/keras/applications/efficientnet.py,571,function, 5039,EfficientNetB3,tensorflow/tensorflow/python/keras/applications/efficientnet.py,597,function, 5040,EfficientNetB4,tensorflow/tensorflow/python/keras/applications/efficientnet.py,623,function, 5041,EfficientNetB5,tensorflow/tensorflow/python/keras/applications/efficientnet.py,649,function, 5042,EfficientNetB6,tensorflow/tensorflow/python/keras/applications/efficientnet.py,675,function, 5043,EfficientNetB7,tensorflow/tensorflow/python/keras/applications/efficientnet.py,701,function, 5044,preprocess_input,tensorflow/tensorflow/python/keras/applications/efficientnet.py,736,function, 5045,decode_predictions,tensorflow/tensorflow/python/keras/applications/efficientnet.py,741,function, 5046,write_ckpt_to_h5,tensorflow/tensorflow/python/keras/applications/efficientnet_weight_update_util.py,51,function,"Map the weights in checkpoint file (tf) to h5 file (keras). Args: path_h5: str, path to output hdf5 file to write weights loaded from ckpt files. path_ckpt: str, path to the ckpt files (e.g. 'efficientnet-b0/model.ckpt') that records efficientnet weights from original repo https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet keras_model: keras model, built from keras.applications efficientnet functions (e.g. EfficientNetB0) use_ema: Bool, whether to use ExponentialMovingAverage result or not" 5047,get_variable_names_from_ckpt,tensorflow/tensorflow/python/keras/applications/efficientnet_weight_update_util.py,121,function,"Get list of tensor names from checkpoint. Args: path_ckpt: str, path to the ckpt files use_ema: Bool, whether to use ExponentialMovingAverage result or not. Returns: List of variable names from checkpoint." 5048,get_tf_blocks,tensorflow/tensorflow/python/keras/applications/efficientnet_weight_update_util.py,145,function,Extract the block names from list of full weight names. 5049,get_keras_blocks,tensorflow/tensorflow/python/keras/applications/efficientnet_weight_update_util.py,154,function,Extract the block names from list of full weight names. 5050,keras_name_to_tf_name_stem_top,tensorflow/tensorflow/python/keras/applications/efficientnet_weight_update_util.py,161,function,"Mapping name in h5 to ckpt that is in stem or top (head). we map name keras_name that points to a weight in h5 file to a name of weight in ckpt file. Args: keras_name: str, the name of weight in the h5 file of keras implementation use_ema: Bool, use the ExponentialMovingAverage resuolt in ckpt or not model_name_tf: str, the name of model in ckpt. Returns: String for the name of weight as in ckpt file. Raises: KeyError: if we cannot parse the keras_name." 5051,keras_name_to_tf_name_block,tensorflow/tensorflow/python/keras/applications/efficientnet_weight_update_util.py,213,function,"Mapping name in h5 to ckpt that belongs to a block. we map name keras_name that points to a weight in h5 file to a name of weight in ckpt file. Args: keras_name: str, the name of weight in the h5 file of keras implementation keras_block: str, the block name for keras implementation (e.g. 'block1a') tf_block: str, the block name for tf implementation (e.g. 'blocks_0') use_ema: Bool, use the ExponentialMovingAverage resuolt in ckpt or not model_name_tf: str, the name of model in ckpt. Returns: String for the name of weight as in ckpt file. Raises: ValueError if keras_block does not show up in keras_name" 5052,check_match,tensorflow/tensorflow/python/keras/applications/efficientnet_weight_update_util.py,300,function,"Check if the weights in h5 and ckpt match. we match each name from keras_weight_names that is in keras_block and check if there is 1-1 correspondence to names from tf_weight_names that is in tf_block Args: keras_block: str, the block name for keras implementation (e.g. 'block1a') tf_block: str, the block name for tf implementation (e.g. 'blocks_0') keras_weight_names: list of str, weight names in keras implementation tf_weight_names: list of str, weight names in tf implementation model_name_tf: str, the name of model in ckpt." 5053,preprocess_input,tensorflow/tensorflow/python/keras/applications/imagenet_utils.py,104,function,Preprocesses a tensor or Numpy array encoding a batch of images. 5054,decode_predictions,tensorflow/tensorflow/python/keras/applications/imagenet_utils.py,129,function,"Decodes the prediction of an ImageNet model. Arguments: preds: Numpy array encoding a batch of predictions. top: Integer, how many top-guesses to return. Defaults to 5. Returns: A list of lists of top class prediction tuples `(class_name, class_description, score)`. One list of tuples per sample in batch input. Raises: ValueError: In case of invalid shape of the `pred` array (must be 2D)." 5055,_preprocess_numpy_input,tensorflow/tensorflow/python/keras/applications/imagenet_utils.py,169,function,"Preprocesses a Numpy array encoding a batch of images. Arguments: x: Input array, 3D or 4D. data_format: Data format of the image array. mode: One of ""caffe"", ""tf"" or ""torch"". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. Returns: Preprocessed Numpy array." 5056,_preprocess_symbolic_input,tensorflow/tensorflow/python/keras/applications/imagenet_utils.py,242,function,"Preprocesses a tensor encoding a batch of images. Arguments: x: Input tensor, 3D or 4D. data_format: Data format of the image tensor. mode: One of ""caffe"", ""tf"" or ""torch"". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. Returns: Preprocessed tensor." 5057,obtain_input_shape,tensorflow/tensorflow/python/keras/applications/imagenet_utils.py,296,function,"Internal utility to compute/validate a model's input shape. Arguments: input_shape: Either None (will return the default network input shape), or a user-provided shape to be validated. default_size: Default input width/height for the model. min_size: Minimum input width/height accepted by the model. data_format: Image data format to use. require_flatten: Whether the model is expected to be linked to a classifier via a Flatten layer. weights: One of `None` (random initialization) or 'imagenet' (pre-training on ImageNet). If weights='imagenet' input channels must be equal to 3. Returns: An integer shape tuple (may include None entries). Raises: ValueError: In case of invalid argument values." 5058,correct_pad,tensorflow/tensorflow/python/keras/applications/imagenet_utils.py,388,function,"Returns a tuple for zero-padding for 2D convolution with downsampling. Arguments: inputs: Input tensor. kernel_size: An integer or tuple/list of 2 integers. Returns: A tuple." 5059,validate_activation,tensorflow/tensorflow/python/keras/applications/imagenet_utils.py,411,function,"validates that the classifer_activation is compatible with the weights. Args: classifier_activation: str or callable activation function weights: The pretrained weights to load. Raises: ValueError: if an activation other than `None` or `softmax` are used with pretrained weights." 5060,TestImageNetUtils,tensorflow/tensorflow/python/keras/applications/imagenet_utils_test.py,29,class, 5061,InceptionResNetV2,tensorflow/tensorflow/python/keras/applications/inception_resnet_v2.py,45,function,"Instantiates the Inception-ResNet v2 architecture. Reference: - [Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning](https://arxiv.org/abs/1602.07261) (AAAI 2017) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.inception_resnet_v2.preprocess_input` for an example. Arguments: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is `False` (otherwise the input shape has to be `(299, 299, 3)` (with `'channels_last'` data format) or `(3, 299, 299)` (with `'channels_first'` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 75. E.g. `(150, 150, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `'avg'` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `'max'` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is `True`, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. **kwargs: For backwards compatibility only. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5062,conv2d_bn,tensorflow/tensorflow/python/keras/applications/inception_resnet_v2.py,251,function,"Utility function to apply conv + BN. Arguments: x: input tensor. filters: filters in `Conv2D`. kernel_size: kernel size as in `Conv2D`. strides: strides in `Conv2D`. padding: padding mode in `Conv2D`. activation: activation in `Conv2D`. use_bias: whether to use a bias in `Conv2D`. name: name of the ops; will become `name + '_ac'` for the activation and `name + '_bn'` for the batch norm layer. Returns: Output tensor after applying `Conv2D` and `BatchNormalization`." 5063,inception_resnet_block,tensorflow/tensorflow/python/keras/applications/inception_resnet_v2.py,293,function,"Adds an Inception-ResNet block. This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the `block_type` argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A: `block_type='block35'` - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` Arguments: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, and the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block (see [activations](../activations.md)). When `activation=None`, no activation is applied (i.e., ""linear"" activation: `a(x) = x`). Returns: Output tensor for the block. Raises: ValueError: if `block_type` is not one of `'block35'`, `'block17'` or `'block8'`." 5064,preprocess_input,tensorflow/tensorflow/python/keras/applications/inception_resnet_v2.py,377,function, 5065,decode_predictions,tensorflow/tensorflow/python/keras/applications/inception_resnet_v2.py,382,function, 5066,InceptionV3,tensorflow/tensorflow/python/keras/applications/inception_v3.py,48,function,"Instantiates the Inception v3 architecture. Reference: - [Rethinking the Inception Architecture for Computer Vision]( http://arxiv.org/abs/1512.00567) (CVPR 2016) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in the `tf.keras.backend.image_data_format()`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.inception_v3.preprocess_input` for an example. Arguments: include_top: Boolean, whether to include the fully-connected layer at the top, as the last layer of the network. Default to `True`. weights: One of `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded. Default to `imagenet`. input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. `input_tensor` is useful for sharing inputs between multiple different networks. Default to None. input_shape: Optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(299, 299, 3)` (with `channels_last` data format) or `(3, 299, 299)` (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 75. E.g. `(150, 150, 3)` would be one valid value. `input_shape` will be ignored if the `input_tensor` is provided. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` (default) means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Default to 1000. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5067,conv2d_bn,tensorflow/tensorflow/python/keras/applications/inception_v3.py,362,function,"Utility function to apply conv + BN. Arguments: x: input tensor. filters: filters in `Conv2D`. num_row: height of the convolution kernel. num_col: width of the convolution kernel. padding: padding mode in `Conv2D`. strides: strides in `Conv2D`. name: name of the ops; will become `name + '_conv'` for the convolution and `name + '_bn'` for the batch norm layer. Returns: Output tensor after applying `Conv2D` and `BatchNormalization`." 5068,preprocess_input,tensorflow/tensorflow/python/keras/applications/inception_v3.py,408,function, 5069,decode_predictions,tensorflow/tensorflow/python/keras/applications/inception_v3.py,413,function, 5070,MobileNet,tensorflow/tensorflow/python/keras/applications/mobilenet.py,85,function,"Instantiates the MobileNet architecture. Reference: - [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications]( https://arxiv.org/abs/1704.04861) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in the `tf.keras.backend.image_data_format()`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.mobilenet.preprocess_input` for an example. Arguments: input_shape: Optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or (3, 224, 224) (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. Default to `None`. `input_shape` will be ignored if the `input_tensor` is provided. alpha: Controls the width of the network. This is known as the width multiplier in the MobileNet paper. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. Default to 1.0. depth_multiplier: Depth multiplier for depthwise convolution. This is called the resolution multiplier in the MobileNet paper. Default to 1.0. dropout: Dropout rate. Default to 0.001. include_top: Boolean, whether to include the fully-connected layer at the top of the network. Default to `True`. weights: One of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. Default to `imagenet`. input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. `input_tensor` is useful for sharing inputs between multiple different networks. Default to None. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` (default) means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: Optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Defaults to 1000. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. **kwargs: For backwards compatibility only. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5071,_conv_block,tensorflow/tensorflow/python/keras/applications/mobilenet.py,314,function,"Adds an initial convolution layer (with batch normalization and relu6). Arguments: inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last` data format) or (3, rows, cols) (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value. filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. kernel: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. # Input shape 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. # Output shape 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to stride. Returns: Output tensor of block." 5072,_depthwise_conv_block,tensorflow/tensorflow/python/keras/applications/mobilenet.py,365,function,"Adds a depthwise convolution block. A depthwise convolution block consists of a depthwise conv, batch normalization, relu6, pointwise convolution, batch normalization and relu6 activation. Arguments: inputs: Input tensor of shape `(rows, cols, channels)` (with `channels_last` data format) or (channels, rows, cols) (with `channels_first` data format). pointwise_conv_filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the pointwise convolution). alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. block_id: Integer, a unique identification designating the block number. # Input shape 4D tensor with shape: `(batch, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, rows, cols, channels)` if data_format='channels_last'. # Output shape 4D tensor with shape: `(batch, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to stride. Returns: Output tensor of block." 5073,preprocess_input,tensorflow/tensorflow/python/keras/applications/mobilenet.py,445,function, 5074,decode_predictions,tensorflow/tensorflow/python/keras/applications/mobilenet.py,450,function, 5075,MobileNetV2,tensorflow/tensorflow/python/keras/applications/mobilenet_v2.py,97,function,"Instantiates the MobileNetV2 architecture. Reference: - [MobileNetV2: Inverted Residuals and Linear Bottlenecks]( https://arxiv.org/abs/1801.04381) (CVPR 2018) Optionally loads weights pre-trained on ImageNet. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.mobilenet_v2.preprocess_input` for an example. Arguments: input_shape: Optional shape tuple, to be specified if you would like to use a model with an input image resolution that is not (224, 224, 3). It should have exactly 3 inputs channels (224, 224, 3). You can also omit this option if you would like to infer input_shape from an input_tensor. If you choose to include both input_tensor and input_shape then input_shape will be used if they match, if the shapes do not match then we will throw an error. E.g. `(160, 160, 3)` would be one valid value. alpha: Float between 0 and 1. controls the width of the network. This is known as the width multiplier in the MobileNetV2 paper, but the name is kept for consistency with `applications.MobileNetV1` model in Keras. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. include_top: Boolean, whether to include the fully-connected layer at the top of the network. Defaults to `True`. weights: String, one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. pooling: String, optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: Integer, optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. **kwargs: For backwards compatibility only. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape or invalid alpha, rows when weights='imagenet' ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5076,_inverted_res_block,tensorflow/tensorflow/python/keras/applications/mobilenet_v2.py,417,function,Inverted ResNet block. 5077,_make_divisible,tensorflow/tensorflow/python/keras/applications/mobilenet_v2.py,490,function, 5078,preprocess_input,tensorflow/tensorflow/python/keras/applications/mobilenet_v2.py,501,function, 5079,decode_predictions,tensorflow/tensorflow/python/keras/applications/mobilenet_v2.py,506,function, 5080,NASNet,tensorflow/tensorflow/python/keras/applications/nasnet.py,65,function,"Instantiates a NASNet model. Reference: - [Learning Transferable Architectures for Scalable Image Recognition]( https://arxiv.org/abs/1707.07012) (CVPR 2018) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.nasnet.preprocess_input` for an example. Arguments: input_shape: Optional shape tuple, the input shape is by default `(331, 331, 3)` for NASNetLarge and `(224, 224, 3)` for NASNetMobile. It should have exactly 3 input channels, and width and height should be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value. penultimate_filters: Number of filters in the penultimate layer. NASNet models use the notation `NASNet (N @ P)`, where: - N is the number of blocks - P is the number of penultimate filters num_blocks: Number of repeated blocks of the NASNet model. NASNet models use the notation `NASNet (N @ P)`, where: - N is the number of blocks - P is the number of penultimate filters stem_block_filters: Number of filters in the initial stem block skip_reduction: Whether to skip the reduction step at the tail end of the network. filter_multiplier: Controls the width of the network. - If `filter_multiplier` < 1.0, proportionally decreases the number of filters in each layer. - If `filter_multiplier` > 1.0, proportionally increases the number of filters in each layer. - If `filter_multiplier` = 1, default number of filters from the paper are used at each layer. include_top: Whether to include the fully-connected layer at the top of the network. weights: `None` (random initialization) or `imagenet` (ImageNet weights) input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: Optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. default_size: Specifies the default image size of the model classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. Returns: A `keras.Model` instance. Raises: ValueError: In case of invalid argument for `weights`, invalid input shape or invalid `penultimate_filters` value. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5081,NASNetMobile,tensorflow/tensorflow/python/keras/applications/nasnet.py,327,function,"Instantiates a Mobile NASNet model in ImageNet mode. Reference: - [Learning Transferable Architectures for Scalable Image Recognition]( https://arxiv.org/abs/1707.07012) (CVPR 2018) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.nasnet.preprocess_input` for an example. Arguments: input_shape: Optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` for NASNetMobile It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value. include_top: Whether to include the fully-connected layer at the top of the network. weights: `None` (random initialization) or `imagenet` (ImageNet weights) For loading `imagenet` weights, `input_shape` should be (224, 224, 3) input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: Optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Returns: A Keras model instance. Raises: ValueError: In case of invalid argument for `weights`, or invalid input shape. RuntimeError: If attempting to run this model with a backend that does not support separable convolutions." 5082,NASNetLarge,tensorflow/tensorflow/python/keras/applications/nasnet.py,403,function,"Instantiates a NASNet model in ImageNet mode. Reference: - [Learning Transferable Architectures for Scalable Image Recognition]( https://arxiv.org/abs/1707.07012) (CVPR 2018) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.nasnet.preprocess_input` for an example. Arguments: input_shape: Optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(331, 331, 3)` for NASNetLarge. It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value. include_top: Whether to include the fully-connected layer at the top of the network. weights: `None` (random initialization) or `imagenet` (ImageNet weights) For loading `imagenet` weights, `input_shape` should be (331, 331, 3) input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: Optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Returns: A Keras model instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. RuntimeError: If attempting to run this model with a backend that does not support separable convolutions." 5083,_separable_conv_block,tensorflow/tensorflow/python/keras/applications/nasnet.py,477,function,"Adds 2 blocks of [relu-separable conv-batchnorm]. Arguments: ip: Input tensor filters: Number of output filters per layer kernel_size: Kernel size of separable convolutions strides: Strided convolution for downsampling block_id: String block_id Returns: A Keras tensor" 5084,_adjust_block,tensorflow/tensorflow/python/keras/applications/nasnet.py,538,function,"Adjusts the input `previous path` to match the shape of the `input`. Used in situations where the output number of filters needs to be changed. Arguments: p: Input tensor which needs to be modified ip: Input tensor whose shape needs to be matched filters: Number of output filters to be matched block_id: String block_id Returns: Adjusted Keras tensor" 5085,_normal_a_cell,tensorflow/tensorflow/python/keras/applications/nasnet.py,623,function,"Adds a Normal cell for NASNet-A (Fig. 4 in the paper). Arguments: ip: Input tensor `x` p: Input tensor `p` filters: Number of output filters block_id: String block_id Returns: A Keras tensor" 5086,_reduction_a_cell,tensorflow/tensorflow/python/keras/applications/nasnet.py,702,function,"Adds a Reduction cell for NASNet-A (Fig. 4 in the paper). Arguments: ip: Input tensor `x` p: Input tensor `p` filters: Number of output filters block_id: String block_id Returns: A Keras tensor" 5087,preprocess_input,tensorflow/tensorflow/python/keras/applications/nasnet.py,803,function, 5088,decode_predictions,tensorflow/tensorflow/python/keras/applications/nasnet.py,808,function, 5089,ResNet,tensorflow/tensorflow/python/keras/applications/resnet.py,60,function,"Instantiates the ResNet, ResNetV2, and ResNeXt architecture. Reference: - [Deep Residual Learning for Image Recognition]( https://arxiv.org/abs/1512.03385) (CVPR 2015) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.resnet.preprocess_input` for an example. Arguments: stack_fn: a function that returns output tensor for the stacked residual blocks. preact: whether to use pre-activation or not (True for ResNetV2, False for ResNet and ResNeXt). use_bias: whether to use biases for convolutional layers or not (True for ResNet and ResNetV2, False for ResNeXt). model_name: string, model name. include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or `(3, 224, 224)` (with `channels_first` data format). It should have exactly 3 inputs channels. pooling: optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. **kwargs: For backwards compatibility only. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5090,block1,tensorflow/tensorflow/python/keras/applications/resnet.py,229,function,"A residual block. Arguments: x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. stride: default 1, stride of the first layer. conv_shortcut: default True, use convolution shortcut if True, otherwise identity shortcut. name: string, block label. Returns: Output tensor for the residual block." 5091,stack1,tensorflow/tensorflow/python/keras/applications/resnet.py,274,function,"A set of stacked residual blocks. Arguments: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. name: string, stack label. Returns: Output tensor for the stacked blocks." 5092,block2,tensorflow/tensorflow/python/keras/applications/resnet.py,293,function,"A residual block. Arguments: x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. stride: default 1, stride of the first layer. conv_shortcut: default False, use convolution shortcut if True, otherwise identity shortcut. name: string, block label. Returns: Output tensor for the residual block." 5093,stack2,tensorflow/tensorflow/python/keras/applications/resnet.py,342,function,"A set of stacked residual blocks. Arguments: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. name: string, stack label. Returns: Output tensor for the stacked blocks." 5094,block3,tensorflow/tensorflow/python/keras/applications/resnet.py,362,function,"A residual block. Arguments: x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. stride: default 1, stride of the first layer. groups: default 32, group size for grouped convolution. conv_shortcut: default True, use convolution shortcut if True, otherwise identity shortcut. name: string, block label. Returns: Output tensor for the residual block." 5095,stack3,tensorflow/tensorflow/python/keras/applications/resnet.py,431,function,"A set of stacked residual blocks. Arguments: x: input tensor. filters: integer, filters of the bottleneck layer in a block. blocks: integer, blocks in the stacked blocks. stride1: default 2, stride of the first layer in the first block. groups: default 32, group size for grouped convolution. name: string, stack label. Returns: Output tensor for the stacked blocks." 5096,ResNet50,tensorflow/tensorflow/python/keras/applications/resnet.py,459,function,Instantiates the ResNet50 architecture. 5097,ResNet101,tensorflow/tensorflow/python/keras/applications/resnet.py,480,function,Instantiates the ResNet101 architecture. 5098,ResNet152,tensorflow/tensorflow/python/keras/applications/resnet.py,501,function,Instantiates the ResNet152 architecture. 5099,preprocess_input,tensorflow/tensorflow/python/keras/applications/resnet.py,522,function, 5100,decode_predictions,tensorflow/tensorflow/python/keras/applications/resnet.py,529,function, 5101,ResNet50V2,tensorflow/tensorflow/python/keras/applications/resnet_v2.py,33,function,Instantiates the ResNet50V2 architecture. 5102,ResNet101V2,tensorflow/tensorflow/python/keras/applications/resnet_v2.py,64,function,Instantiates the ResNet101V2 architecture. 5103,ResNet152V2,tensorflow/tensorflow/python/keras/applications/resnet_v2.py,95,function,Instantiates the ResNet152V2 architecture. 5104,preprocess_input,tensorflow/tensorflow/python/keras/applications/resnet_v2.py,125,function, 5105,decode_predictions,tensorflow/tensorflow/python/keras/applications/resnet_v2.py,131,function, 5106,VGG16,tensorflow/tensorflow/python/keras/applications/vgg16.py,46,function,"Instantiates the VGG16 model. Reference: - [Very Deep Convolutional Networks for Large-Scale Image Recognition]( https://arxiv.org/abs/1409.1556) (ICLR 2015) By default, it loads weights pre-trained on ImageNet. Check 'weights' for other options. This model can be built both with 'channels_first' data format (channels, height, width) or 'channels_last' data format (height, width, channels). The default input size for this model is 224x224. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.vgg16.preprocess_input` for an example. Arguments: include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or `(3, 224, 224)` (with `channels_first` data format). It should have exactly 3 input channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5107,preprocess_input,tensorflow/tensorflow/python/keras/applications/vgg16.py,229,function, 5108,decode_predictions,tensorflow/tensorflow/python/keras/applications/vgg16.py,235,function, 5109,VGG19,tensorflow/tensorflow/python/keras/applications/vgg19.py,46,function,"Instantiates the VGG19 architecture. Reference: - [Very Deep Convolutional Networks for Large-Scale Image Recognition]( https://arxiv.org/abs/1409.1556) (ICLR 2015) By default, it loads weights pre-trained on ImageNet. Check 'weights' for other options. This model can be built both with 'channels_first' data format (channels, height, width) or 'channels_last' data format (height, width, channels). The default input size for this model is 224x224. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.vgg19.preprocess_input` for an example. Arguments: include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or `(3, 224, 224)` (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5110,preprocess_input,tensorflow/tensorflow/python/keras/applications/vgg19.py,234,function, 5111,decode_predictions,tensorflow/tensorflow/python/keras/applications/vgg19.py,240,function, 5112,Xception,tensorflow/tensorflow/python/keras/applications/xception.py,52,function,"Instantiates the Xception architecture. Reference: - [Xception: Deep Learning with Depthwise Separable Convolutions]( https://arxiv.org/abs/1610.02357) (CVPR 2017) Optionally loads weights pre-trained on ImageNet. Note that the data format convention used by the model is the one specified in your Keras config at `~/.keras/keras.json`. Note that the default input image size for this model is 299x299. Caution: Be sure to properly pre-process your inputs to the application. Please see `applications.xception.preprocess_input` for an example. Arguments: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(299, 299, 3)`. It should have exactly 3 inputs channels, and width and height should be no smaller than 71. E.g. `(150, 150, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. classifier_activation: A `str` or callable. The activation function to use on the ""top"" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the ""top"" layer. Returns: A `keras.Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. ValueError: if `classifier_activation` is not `softmax` or `None` when using a pretrained top layer." 5113,preprocess_input,tensorflow/tensorflow/python/keras/applications/xception.py,318,function, 5114,decode_predictions,tensorflow/tensorflow/python/keras/applications/xception.py,323,function, 5115,TimerCallBack,tensorflow/tensorflow/python/keras/benchmarks/benchmark_util.py,28,class,Callback for logging time in each epoch or batch. 5116,measure_performance,tensorflow/tensorflow/python/keras/benchmarks/benchmark_util.py,49,function,"Run models and measure the performance. Arguments: model_fn: Model function to be benchmarked. x: Input data. See `x` in the `fit()` method of `keras.Model`. y: Target data. See `y` in the `fit()` method of `keras.Model`. epochs: Integer. Number of epochs to train the model. If unspecified, `epochs` will default to 2. batch_size: Integer. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. run_iters: Integer. Number of iterations to run the performance measurement. If unspecified, `run_iters` will default to 4. optimizer: String (name of optimizer) or optimizer instance. See `tf.keras.optimizers`. loss: String (name of objective function), objective function or `tf.keras.losses.Loss` instance. See `tf.keras.losses`. metrics: Lists of metrics to be evaluated by the model during training. See `metrics` in the `compile()` method of `keras.Model`. verbose: 0, 1, 2. Verbosity mode. See `verbose` in the `fit()` method of `keras.Model`. If unspecified, `verbose` will default to 0. num_gpus: Number of GPUs to run the model. distribution_strategy: Distribution strategies. It could be `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified, `distribution_strategy` will default to 'off'. Note that, `TPU` and `parameter_server` are not supported yet. Returns: Performance summary, which contains build_time, compile_time, startup_time, avg_epoch_time, wall_time, exp_per_sec, epochs, distribution_strategy. Raise: ValueError: If `x` is none or if `optimizer` is not provided or if `loss` is not provided or if `num_gpus` is negative." 5117,_collective_communication,tensorflow/tensorflow/python/keras/benchmarks/distribution_util.py,32,function,"Return a CollectiveCommunication based on all_reduce_alg. Args: all_reduce_alg: a string specifying which collective communication to pick, or None. Returns: tf.distribute.experimental.CollectiveCommunication object Raises: ValueError: if `all_reduce_alg` not in [None, ""ring"", ""nccl""]" 5118,_mirrored_cross_device_ops,tensorflow/tensorflow/python/keras/benchmarks/distribution_util.py,58,function,"Return a CrossDeviceOps based on all_reduce_alg and num_packs. Args: all_reduce_alg: a string specifying which cross device op to pick, or None. num_packs: an integer specifying number of packs for the cross device op. Returns: tf.distribute.CrossDeviceOps object or None. Raises: ValueError: if `all_reduce_alg` not in [None, ""nccl"", ""hierarchical_copy""]." 5119,get_distribution_strategy,tensorflow/tensorflow/python/keras/benchmarks/distribution_util.py,86,function,"Return a DistributionStrategy for running the model. Args: distribution_strategy: a string specifying which distribution strategy to use. Accepted values are ""off"", ""one_device"", ""mirrored"", and ""multi_worker_mirrored"" -- case insensitive. ""off"" means not to use Distribution Strategy. num_gpus: Number of GPUs to run this model. Returns: tf.distribute.DistibutionStrategy object. Raises: ValueError: if `distribution_strategy` is ""off"" or ""one_device"" and `num_gpus` is larger than 1; or `num_gpus` is negative." 5120,configure_cluster,tensorflow/tensorflow/python/keras/benchmarks/distribution_util.py,141,function,"Set multi-worker cluster spec in TF_CONFIG environment variable. Args: worker_hosts: comma-separated list of worker ip:port pairs. Returns: Number of workers in the cluster." 5121,get_strategy_scope,tensorflow/tensorflow/python/keras/benchmarks/distribution_util.py,175,function, 5122,DummyContextManager,tensorflow/tensorflow/python/keras/benchmarks/distribution_util.py,184,class, 5123,_run_benchmark,tensorflow/tensorflow/python/keras/benchmarks/eager_microbenchmarks_test.py,30,function, 5124,MicroBenchmarksBase,tensorflow/tensorflow/python/keras/benchmarks/eager_microbenchmarks_test.py,47,class,Run and report benchmark results. 5125,KerasLayerCallOverheadBenchmarks,tensorflow/tensorflow/python/keras/benchmarks/eager_microbenchmarks_test.py,144,class, 5126,KerasModelCPUBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_cpu_benchmark_test.py,33,class,"Required Arguments for measure_performance. x: Input data, it could be Numpy or load from tfds. y: Target data. If `x` is a dataset, generator instance, `y` should not be specified. loss: Loss function for model. optimizer: Optimizer for model. Other details can see in `measure_performance()` method of benchmark_util." 5127,SubclassedKerasModel,tensorflow/tensorflow/python/keras/benchmarks/model_components_benchmarks_test.py,32,class, 5128,make_keras_model,tensorflow/tensorflow/python/keras/benchmarks/model_components_benchmarks_test.py,55,function, 5129,make_sequential_keras_model,tensorflow/tensorflow/python/keras/benchmarks/model_components_benchmarks_test.py,70,function, 5130,run_benchmark,tensorflow/tensorflow/python/keras/benchmarks/model_components_benchmarks_test.py,86,function, 5131,KerasComponentsBenchmarks,tensorflow/tensorflow/python/keras/benchmarks/model_components_benchmarks_test.py,103,class, 5132,_imdb_lstm_model,tensorflow/tensorflow/python/keras/benchmarks/model_memory_profile.py,46,function,LSTM model. 5133,main,tensorflow/tensorflow/python/keras/benchmarks/model_memory_profile.py,62,function, 5134,AntirectifierBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py,25,class,Benchmarks for Antirectifier using `tf.test.Benchmark`. 5135,Antirectifier,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py,108,class,Build simple custome layer. 5136,BidirectionalLSTMBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/bidirectional_lstm_benchmark_test.py,25,class,Benchmarks for Bidirectional LSTM using `tf.test.Benchmark`. 5137,Cifar10CNNBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/cifar10_cnn_benchmark_test.py,25,class,Benchmarks for CNN using `tf.test.Benchmark`. 5138,ConvMnistBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py,27,class,Benchmarks for Convnet using `tf.test.Benchmark`. 5139,HierarchicalRNNBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_hierarchical_rnn_benchmark_test.py,25,class,Benchmarks for Hierarchical RNN using `tf.test.Benchmark`. 5140,IRNNMnistBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_irnn_benchmark_test.py,25,class,Benchmarks for IRNN using `tf.test.Benchmark`. 5141,MLPReutersBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/reuters_mlp_benchmark_test.py,27,class,Benchmarks for MLP using `tf.test.Benchmark`. 5142,TextWithTransformerBenchmark,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py,25,class,"Benchmarks for Text classification with Transformer using `tf.test.Benchmark`." 5143,MultiHeadSelfAttention,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py,120,class,Implement multi head self attention as a Keras layer. 5144,TransformerBlock,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py,172,class,Implement a Transformer block as a layer. 5145,TokenAndPositionEmbedding,tensorflow/tensorflow/python/keras/benchmarks/keras_examples_benchmarks/text_classification_transformer_benchmark_test.py,196,class,Implement embedding layer. 5146,BenchmarkSaveApplications,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/densenet_benchmark_test.py,25,class, 5147,BenchmarkSaveApplications,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/efficientnet_benchmark_test.py,25,class, 5148,BenchmarkSaveApplications,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/inception_resnet_v2_benchmark_test.py,25,class, 5149,BenchmarkSaveApplications,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/mobilenet_benchmark_test.py,25,class, 5150,BenchmarkSaveApplications,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/nasnet_large_benchmark_test.py,25,class, 5151,BenchmarkSaveApplications,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/resnet152_v2_benchmark_test.py,25,class, 5152,save_and_load_benchmark,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/saved_model_benchmark_util.py,30,function,Util for saved model benchmarks. 5153,BenchmarkSaveApplications,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/vgg_benchmark_test.py,25,class, 5154,BenchmarkSaveApplications,tensorflow/tensorflow/python/keras/benchmarks/saved_model_benchmarks/xception_benchmark_test.py,25,class, 5155,load_data,tensorflow/tensorflow/python/keras/datasets/boston_housing.py,28,function,"Loads the Boston Housing dataset. This is a dataset taken from the StatLib library which is maintained at Carnegie Mellon University. Samples contain 13 attributes of houses at different locations around the Boston suburbs in the late 1970s. Targets are the median values of the houses at a location (in k$). The attributes themselves are defined in the [StatLib website](http://lib.stat.cmu.edu/datasets/boston). Arguments: path: path where to cache the dataset locally (relative to `~/.keras/datasets`). test_split: fraction of the data to reserve as test set. seed: Random seed for shuffling the data before computing the test split. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train, x_test**: numpy arrays with shape `(num_samples, 13)` containing either the training samples (for x_train), or test samples (for y_train). **y_train, y_test**: numpy arrays of shape `(num_samples,)` containing the target scalars. The targets are float scalars typically between 10 and 50 that represent the home prices in k$." 5156,load_batch,tensorflow/tensorflow/python/keras/datasets/cifar.py,26,function,"Internal utility for parsing CIFAR data. Arguments: fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. Returns: A tuple `(data, labels)`." 5157,load_data,tensorflow/tensorflow/python/keras/datasets/cifar10.py,32,function,"Loads [CIFAR10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). This is a dataset of 50,000 32x32 color training images and 10,000 test images, labeled over 10 categories. See more info at the [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html). Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train, x_test**: uint8 arrays of RGB image data with shape `(num_samples, 3, 32, 32)` if `tf.keras.backend.image_data_format()` is `'channels_first'`, or `(num_samples, 32, 32, 3)` if the data format is `'channels_last'`. **y_train, y_test**: uint8 arrays of category labels (integers in range 0-9) each with shape (num_samples, 1)." 5158,load_data,tensorflow/tensorflow/python/keras/datasets/cifar100.py,32,function,"Loads [CIFAR100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). This is a dataset of 50,000 32x32 color training images and 10,000 test images, labeled over 100 fine-grained classes that are grouped into 20 coarse-grained classes. See more info at the [CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html). Arguments: label_mode: one of ""fine"", ""coarse"". If it is ""fine"" the category labels are the fine-grained labels, if it is ""coarse"" the output labels are the coarse-grained superclasses. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train, x_test**: uint8 arrays of RGB image data with shape `(num_samples, 3, 32, 32)` if `tf.keras.backend.image_data_format()` is `'channels_first'`, or `(num_samples, 32, 32, 3)` if the data format is `'channels_last'`. **y_train, y_test**: uint8 arrays of category labels with shape (num_samples, 1). Raises: ValueError: in case of invalid `label_mode`." 5159,load_data,tensorflow/tensorflow/python/keras/datasets/fashion_mnist.py,31,function,"Loads the Fashion-MNIST dataset. This is a dataset of 60,000 28x28 grayscale images of 10 fashion categories, along with a test set of 10,000 images. This dataset can be used as a drop-in replacement for MNIST. The class labels are: | Label | Description | |:-----:|-------------| | 0 | T-shirt/top | | 1 | Trouser | | 2 | Pullover | | 3 | Dress | | 4 | Coat | | 5 | Sandal | | 6 | Shirt | | 7 | Sneaker | | 8 | Bag | | 9 | Ankle boot | Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train, x_test**: uint8 arrays of grayscale image data with shape (num_samples, 28, 28). **y_train, y_test**: uint8 arrays of labels (integers in range 0-9) with shape (num_samples,). License: The copyright for Fashion-MNIST is held by Zalando SE. Fashion-MNIST is licensed under the [MIT license]( https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE)." 5160,load_data,tensorflow/tensorflow/python/keras/datasets/imdb.py,32,function,"Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/). This is a dataset of 25,000 movies reviews from IMDB, labeled by sentiment (positive/negative). Reviews have been preprocessed, and each review is encoded as a list of word indexes (integers). For convenience, words are indexed by overall frequency in the dataset, so that for instance the integer ""3"" encodes the 3rd most frequent word in the data. This allows for quick filtering operations such as: ""only consider the top 10,000 most common words, but eliminate the top 20 most common words"". As a convention, ""0"" does not stand for a specific word, but instead is used to encode any unknown word. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). num_words: integer or None. Words are ranked by how often they occur (in the training set) and only the `num_words` most frequent words are kept. Any less frequent word will appear as `oov_char` value in the sequence data. If None, all words are kept. Defaults to None, so all words are kept. skip_top: skip the top N most frequently occurring words (which may not be informative). These words will appear as `oov_char` value in the dataset. Defaults to 0, so no words are skipped. maxlen: int or None. Maximum sequence length. Any longer sequence will be truncated. Defaults to None, which means no truncation. seed: int. Seed for reproducible data shuffling. start_char: int. The start of a sequence will be marked with this character. Defaults to 1 because 0 is usually the padding character. oov_char: int. The out-of-vocabulary character. Words that were cut out because of the `num_words` or `skip_top` limits will be replaced with this character. index_from: int. Index actual words with this index and higher. **kwargs: Used for backwards compatibility. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train, x_test**: lists of sequences, which are lists of indexes (integers). If the num_words argument was specific, the maximum possible index value is `num_words - 1`. If the `maxlen` argument was specified, the largest possible sequence length is `maxlen`. **y_train, y_test**: lists of integer labels (1 or 0). Raises: ValueError: in case `maxlen` is so low that no input sequence could be kept. Note that the 'out of vocabulary' character is only used for words that were present in the training set but are not included because they're not making the `num_words` cut here. Words that were not seen in the training set but are in the test set have simply been skipped." 5161,get_word_index,tensorflow/tensorflow/python/keras/datasets/imdb.py,166,function,"Retrieves a dict mapping words to their index in the IMDB dataset. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). Returns: The word index dictionary. Keys are word strings, values are their index." 5162,load_data,tensorflow/tensorflow/python/keras/datasets/mnist.py,28,function,"Loads the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). This is a dataset of 60,000 28x28 grayscale images of the 10 digits, along with a test set of 10,000 images. More info can be found at the [MNIST homepage](http://yann.lecun.com/exdb/mnist/). Arguments: path: path where to cache the dataset locally (relative to `~/.keras/datasets`). Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train, x_test**: uint8 arrays of grayscale image data with shapes (num_samples, 28, 28). **y_train, y_test**: uint8 arrays of digit labels (integers in range 0-9) with shapes (num_samples,). License: Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset, which is a derivative work from original NIST datasets. MNIST dataset is made available under the terms of the [Creative Commons Attribution-Share Alike 3.0 license.]( https://creativecommons.org/licenses/by-sa/3.0/)" 5163,load_data,tensorflow/tensorflow/python/keras/datasets/reuters.py,32,function,"Loads the Reuters newswire classification dataset. This is a dataset of 11,228 newswires from Reuters, labeled over 46 topics. This was originally generated by parsing and preprocessing the classic Reuters-21578 dataset, but the preprocessing code is no longer packaged with Keras. See this [github discussion](https://github.com/keras-team/keras/issues/12072) for more info. Each newswire is encoded as a list of word indexes (integers). For convenience, words are indexed by overall frequency in the dataset, so that for instance the integer ""3"" encodes the 3rd most frequent word in the data. This allows for quick filtering operations such as: ""only consider the top 10,000 most common words, but eliminate the top 20 most common words"". As a convention, ""0"" does not stand for a specific word, but instead is used to encode any unknown word. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). num_words: integer or None. Words are ranked by how often they occur (in the training set) and only the `num_words` most frequent words are kept. Any less frequent word will appear as `oov_char` value in the sequence data. If None, all words are kept. Defaults to None, so all words are kept. skip_top: skip the top N most frequently occurring words (which may not be informative). These words will appear as `oov_char` value in the dataset. Defaults to 0, so no words are skipped. maxlen: int or None. Maximum sequence length. Any longer sequence will be truncated. Defaults to None, which means no truncation. test_split: Float between 0 and 1. Fraction of the dataset to be used as test data. Defaults to 0.2, meaning 20% of the dataset is used as test data. seed: int. Seed for reproducible data shuffling. start_char: int. The start of a sequence will be marked with this character. Defaults to 1 because 0 is usually the padding character. oov_char: int. The out-of-vocabulary character. Words that were cut out because of the `num_words` or `skip_top` limits will be replaced with this character. index_from: int. Index actual words with this index and higher. **kwargs: Used for backwards compatibility. Returns: Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. **x_train, x_test**: lists of sequences, which are lists of indexes (integers). If the num_words argument was specific, the maximum possible index value is `num_words - 1`. If the `maxlen` argument was specified, the largest possible sequence length is `maxlen`. **y_train, y_test**: lists of integer labels (1 or 0). Note: The 'out of vocabulary' character is only used for words that were present in the training set but are not included because they're not making the `num_words` cut here. Words that were not seen in the training set but are in the test set have simply been skipped." 5164,get_word_index,tensorflow/tensorflow/python/keras/datasets/reuters.py,155,function,"Retrieves a dict mapping words to their index in the Reuters dataset. Arguments: path: where to cache the data (relative to `~/.keras/dataset`). Returns: The word index dictionary. Keys are word strings, values are their index." 5165,TrainingCheckpointTests,tensorflow/tensorflow/python/keras/distribute/checkpointing_test.py,36,class, 5166,create_test_objects,tensorflow/tensorflow/python/keras/distribute/collective_all_reduce_strategy_test.py,54,function, 5167,CollectiveAllReduceStrategyTestBase,tensorflow/tensorflow/python/keras/distribute/collective_all_reduce_strategy_test.py,81,class, 5168,DistributedCollectiveAllReduceStrategyTest,tensorflow/tensorflow/python/keras/distribute/collective_all_reduce_strategy_test.py,242,class, 5169,DistributedCollectiveAllReduceStrategyTestWithChief,tensorflow/tensorflow/python/keras/distribute/collective_all_reduce_strategy_test.py,272,class, 5170,LocalCollectiveAllReduceStrategy,tensorflow/tensorflow/python/keras/distribute/collective_all_reduce_strategy_test.py,300,class, 5171,MaybeStrategyScope,tensorflow/tensorflow/python/keras/distribute/ctl_correctness_test.py,48,class,Provides a context allowing no distribution strategy. 5172,get_model,tensorflow/tensorflow/python/keras/distribute/ctl_correctness_test.py,66,function, 5173,get_data,tensorflow/tensorflow/python/keras/distribute/ctl_correctness_test.py,81,function, 5174,compute_loss,tensorflow/tensorflow/python/keras/distribute/ctl_correctness_test.py,91,function, 5175,iteration_inside_func,tensorflow/tensorflow/python/keras/distribute/ctl_correctness_test.py,99,function,Helper function to test iterating over data inside a tf.function. 5176,iteration_outside_func,tensorflow/tensorflow/python/keras/distribute/ctl_correctness_test.py,163,function,Helper function to test iterating over data outside a tf.function. 5177,TestDistributionStrategyDnnCorrectness,tensorflow/tensorflow/python/keras/distribute/ctl_correctness_test.py,217,class,Test custom training loop correctness with a simple DNN model. 5178,KerasMetricsTest,tensorflow/tensorflow/python/keras/distribute/custom_training_loop_metrics_test.py,33,class, 5179,CustomModel,tensorflow/tensorflow/python/keras/distribute/custom_training_loop_models_test.py,39,class, 5180,KerasModelsTest,tensorflow/tensorflow/python/keras/distribute/custom_training_loop_models_test.py,55,class, 5181,OptimizerTest,tensorflow/tensorflow/python/keras/distribute/custom_training_loop_optimizer_test.py,33,class, 5182,simple_sequential_model,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,70,function, 5183,simple_subclassed_model,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,78,function, 5184,simple_multi_inputs_multi_outputs_model,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,92,function, 5185,get_multi_inputs_multi_outputs_data,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,104,function, 5186,batch_wrapper,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,147,function, 5187,get_model,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,159,function, 5188,get_sample_weights_model,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,166,function, 5189,get_dataset,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,175,function, 5190,get_predict_dataset,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,184,function, 5191,convert_numpy_to_dataset_with_unknown_cardinality,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,192,function, 5192,multi_input_output_model,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,206,function, 5193,strategy_minus_tpu_combinations,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,245,function, 5194,tpu_strategy_combinations,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,250,function, 5195,tpu_strategy_combinations_graph_only,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,255,function, 5196,all_strategy_combinations,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,259,function, 5197,all_strategy_minus_default_and_tpu_combinations,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,263,function, 5198,all_strategy_combinations_minus_default,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,274,function, 5199,strategy_and_optimizer_combinations,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,279,function, 5200,BatchCountingCB,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,322,class, 5201,TestDistributionStrategyWithNumpyArrays,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,352,class, 5202,TestDistributionStrategyWithDatasets,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,862,class, 5203,TestRegularizerLoss,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,1552,class, 5204,TestDistributionStrategyWithKerasModels,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,1607,class, 5205,_functional_with_add_loss_and_metric,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,2280,function, 5206,_sequential_with_add_loss_and_metric,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,2306,function, 5207,_functional_with_layer_reuse,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,2336,function, 5208,TestDistributionStrategyWithMultipleAddLossAndMetricCalls,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,2366,class,Tests complex models with multiple add loss and metric calls. 5209,DeterministicModel,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,2416,class,"Deterministic Model that always outputs the same initial result. It verifies the `call` method is run inside the same distribution strategy that the model was initially passed." 5210,TestModelCapturesStrategy,tensorflow/tensorflow/python/keras/distribute/distribute_strategy_test.py,2438,class,Tests that model creation captures the strategy. 5211,set_weights,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,55,function,"Sets the weights of the replicated models. The weights of the replicated models are set to the weights of the original model. The weights of the replicated model are Mirrored variables and hence we need to use the `update` call within a DistributionStrategy scope. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. dist_model: The replicated models on the different devices. weights: The weights of the original model." 5212,unwrap_values,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,83,function,"Unwrap the list of values contained in the PerReplica parameters. This function calls `flatten_per_replica_values` to parse each of the input parameters into a list of values on the different devices. If we set `with_loss_tensor` to be True, we also call `reduce` on the list of losses on the different devices to give us one loss tensor. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. grouped_inputs: PerReplica inputs returned from the train or test function that we ran on each device. grouped_outputs: PerReplica outputs returned from the train or test function that we ran on each device. grouped_updates: PerReplica updates returned from the train or test function that we ran on each device. grouped_session_args: PerReplica session args returned from the train or test function that we ran on each device. with_loss_tensor: Boolean that indicates if we need to add the reduced loss tensor as one of the outputs. Returns: Values of each of the PerReplica parameters." 5213,unwrap_output_dict,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,140,function,Unwrap the list of outputs contained in the PerReplica parameters. 5214,unwrap_outputs,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,172,function,"Unwrap the list of outputs contained in the PerReplica parameters. This function calls `flatten_per_replica_values` to parse each of the input parameters into a list of outputs on the different devices. If we set `with_loss_tensor` to be True, we also call `reduce` on the list of losses on the different devices to give us one loss tensor. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. grouped_outputs: PerReplica outputs returned from the train or test function that we ran on each device. with_loss_tensor: Boolean that indicates if we need to add the reduced loss tensor as one of the outputs. Returns: Values of each of the PerReplica outputs." 5215,flatten_per_replica_values,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,216,function,"Unwraps and flattens a nest of PerReplica parameters. PerReplica values have one value associated with each device. Each entry in the PerReplica dict has a device `key` and the corresponding value on the device as the `value`. In this function we take a PerReplica value or a list of PerReplica values and return all the values in the PerReplica dict. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. per_replica_values: List of PerReplica object or a single PerReplica object. Returns: List of values of all the PerReplica objects." 5216,validate_callbacks,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,240,function,"Validate whether given callbacks are supported by DistributionStrategy. Args: input_callbacks: List of callbacks passed by the user to fit. optimizer: Optimizer instance used to train the model. Raises: ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the callbacks passed. ValueError: If `write_grads` is one of the parameters passed as part of the TensorBoard callback." 5217,validate_distributed_dataset_inputs,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,275,function,"Validate all the components of a DistributedValue Dataset input. Args: distribution_strategy: The current DistributionStrategy used to call `fit`/`evaluate`. x: Input Dataset DistributedValue object. For example, when we use `MirroredStrategy` this is a PerReplica object with a tensor for each device set in the dict. x can also be a tuple or dict. The keys of the dict should match the names of the input layers of the model. y: Target Dataset DistributedValue object. For example, when we use `MirroredStrategy` this is a PerReplica object with a tensor for each device set in the dict. y can also be a tuple or dict. The keys of the dict should match the names of the output layers of the model. sample_weights: Sample weights Dataset DistributedValue object. For example, when we use `MirroredStrategy` this is a PerReplica object with a tensor for each device set in the dict. Returns: The unwrapped values list of the x and y DistributedValues inputs. Raises: ValueError: If x and y do not have support for being evaluated as tensors. or if x and y contain elements that are not tensors or if x and y contain elements that have a shape or dtype mismatch." 5218,validate_per_replica_inputs,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,325,function,"Validates PerReplica dataset input list. Args: distribution_strategy: The current DistributionStrategy used to call `fit`, `evaluate` and `predict`. x: A list of PerReplica objects that represent the input or target values. Returns: List containing the first element of each of the PerReplica objects in the input list. Raises: ValueError: If any of the objects in the `per_replica_list` is not a tensor." 5219,validate_all_tensor_types,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,362,function, 5220,validate_all_tensor_shapes,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,370,function, 5221,_wait_for_variable_initialization,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,379,function,Utility to wait for variables to be initialized. 5222,init_restore_or_wait_for_variables,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,402,function,Initialize or restore variables or wait for variables to be initialized. 5223,validate_inputs,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,413,function,"Validate inputs when using DistributionStrategy. Args: x: Model Inputs. y: Model Targets. Raises: ValueError: if input is not a Dataset or a numpy array(when we use MirroredStrategy)." 5224,global_batch_size_supported,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,434,function, 5225,is_tpu_strategy,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,439,function,We're executing TPU Strategy. 5226,is_dataset_shape_fully_defined,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,445,function,Returns whether a dataset contains a final partial batch. 5227,process_batch_and_step_size,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,452,function,Process the batch size and step size based on input and dist strategy. 5228,get_input_params,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,472,function,"Calculate the number of batches and steps/steps_per_epoch. Args: distribution_strategy: The DistributionStrategy used to compile the model. num_samples: The number of samples from which we determine the batch size and steps. steps: The specified number of steps. batch_size: The specified batch_size. mode: ModeKey representing whether input will be used for training, evaluation, or prediction. This is used to relax the constraints on consuming all the training samples to keep compatibility till we support partial batches. If none, then partial batches are not allowed. Returns: steps: The steps or steps_per_epoch argument depending on if a user is calling `fit`, `evaluate` or `predict`. If the is_training flag is set we don't require the number of samples to be used completely. batch_size: The batch size to be used in model iterations. Raises: ValueError: If the number of batches or steps evaluates to 0." 5229,get_batch_dimension,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,576,function, 5230,get_iterator,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,584,function, 5231,initialize_iterator,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,591,function, 5232,_get_input_from_iterator,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,598,function,Get elements from the iterator and verify the input shape and type. 5233,_prepare_feed_values,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,623,function,"Prepare feed values to the model execution function. Arguments: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. sample_weights: Optional list of sample weight arrays. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. Returns: Feed values for the model in the given mode." 5234,is_distributing_by_cloning,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,672,function,"Decide whether this model is going to be distributed via cloning. We are going to distribute the model by cloning in graph mode. Args: model: Keras model to distribute. Returns: True if the `model` is going to be distributed using cloning and False otherwise." 5235,_custom_compile_for_predict,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,692,function,Custom compile for TPU predict mode. 5236,_build_network_on_replica,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,706,function,"Build an updated model on replicas. We create a new Keras model while sharing the variables from the old graph. Building a new sub-graph is required since the original keras model creates placeholders for the input and the output that are not accessible till we call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`. The sharing of weights and layers between the old and the new model guarantee that we're using Strategy variables and any updates on either model are reflected correctly in callbacks and loop iterations. We need to make sure we share the optimizers between the old and the new model as well so that optimizer state is not lost if the user is running fit multiple times. Args: model: Model to be replicated across Replicas mode: Which of fit/eval/predict is building the distributed network inputs: Input variables to be passed to the model targets: Target tensor to be passed to model.compile Returns: A new model with shared layers with the old model." 5237,_build_distributed_network,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,776,function,Create a cloned model on each replica. 5238,_clone_and_build_model,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,786,function,Clone and build the given keras_model. 5239,clone_model_on_replicas,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,828,function,Create a cloned model on each replica. 5240,_make_execution_function,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,838,function,Makes or reuses function to run one step of distributed model execution. 5241,_make_execution_function_without_cloning,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,852,function,Creates a function to run one step of distributed model execution. 5242,_make_replica_execution_function,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,883,function,A single step of the distributed execution on a replica. 5243,_make_replicated_models_with_cloning,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,905,function,Build models on each replica. 5244,_make_execution_function_with_cloning,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,916,function,Clones or re-uses models to run one step of distributed model execution. 5245,_make_graph_execution_function,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,950,function,Makes function to run one step of distributed model in graph mode. 5246,_make_eager_execution_function,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,991,function,Makes function to run one step of distributed model eager execution. 5247,_copy_weights_to_distributed_model,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1031,function,Copies weights from original model to distributed models. 5248,_copy_weights_to_original_model,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1043,function,Copies weights from first distributed model back to original model. 5249,_per_replica_aggregate_batch,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1052,function,Aggregates the per-replica batch-level outputs from a distributed step. 5250,_reset_metrics,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1065,function, 5251,get_distributed_model,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1074,function, 5252,set_distributed_model,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1079,function, 5253,get_distributed_function,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1084,function, 5254,set_distributed_function,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1089,function, 5255,_generate_cache_key,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1094,function, 5256,distributed_scope,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1100,function, 5257,call_replica_local_fn,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1105,function,"Call a function that uses replica-local variables. This function correctly handles calling `fn` in a cross-replica context. Arguments: fn: The function to call. *args: Positional arguments to the `fn`. **kwargs: Keyword argument to `fn`. Returns: The result of calling `fn`." 5258,is_current_worker_chief,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1136,function, 5259,filter_distributed_callbacks,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1140,function,"Filter Callbacks based on the worker context when running multi-worker. Arguments: callbacks_list: A list of `Callback` instances. model: Keras model instance. Returns: The list of `Callback` instances that should be run on this worker." 5260,_update_sample_weight_modes,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1174,function,Update sample_weight_mode of the distributed model. 5261,concat_along_batch_dimension,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils.py,1195,function,Concats prediction outputs along the batch dimension. 5262,DistributedTrainingUtilsTest,tensorflow/tensorflow/python/keras/distribute/distributed_training_utils_test.py,28,class, 5263,eager_mode_test_configuration,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,57,function, 5264,graph_mode_test_configuration,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,62,function, 5265,all_strategy_and_input_config_combinations,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,67,function, 5266,strategy_minus_tpu_and_input_config_combinations_eager,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,74,function, 5267,strategies_for_embedding_models,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,81,function,"Returns distribution strategies to test for embedding models. Since embedding models take longer to train, we disregard DefaultStrategy in order to prevent testing timeouts." 5268,test_combinations_for_embedding_model,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,94,function, 5269,test_combinations_with_tpu_strategies,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,109,function, 5270,MaybeDistributionScope,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,120,class,Provides a context allowing no distribution strategy. 5271,batch_wrapper,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,138,function, 5272,get_batch_size,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,144,function, 5273,get_data_size,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,155,function,"Gets the size of data in list, tuple, dict, or a numpy array." 5274,get_shapes,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,168,function, 5275,get_correctness_test_inputs,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,175,function,Generates the inputs for correctness check when enable Keras with DS. 5276,fit_eval_and_predict,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,246,function,Generates results for fit/predict/evaluate for given model. 5277,compare_results,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,289,function,Compares results of model compiled with/without distribution strategy. 5278,should_skip_tpu_with_eager,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,356,function, 5279,LearningRateBatchScheduler,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,362,class,Scheduler that dynamically sets the learning rate of model. 5280,TestDistributionStrategyCorrectnessBase,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,377,class,Model agnostic testing infra to test correctness of Keras models. 5281,TestDistributionStrategyEmbeddingModelCorrectnessBase,tensorflow/tensorflow/python/keras/distribute/keras_correctness_test_base.py,588,class,Base class to test correctness of Keras models with embedding layers. 5282,all_strategy_combinations_with_eager_and_graph_modes,tensorflow/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py,33,function, 5283,all_strategy_combinations_with_graph_mode,tensorflow/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py,39,function, 5284,is_default_strategy,tensorflow/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py,45,function, 5285,TestDistributionStrategyDnnCorrectness,tensorflow/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py,50,class, 5286,TestDistributionStrategyDnnMetricCorrectness,tensorflow/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py,132,class, 5287,TestDistributionStrategyDnnMetricEvalCorrectness,tensorflow/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py,171,class, 5288,SubclassedModel,tensorflow/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py,219,class, 5289,TestDistributionStrategyDnnCorrectnessWithSubclassedModel,tensorflow/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py,243,class, 5290,DistributionStrategyEmbeddingModelCorrectnessTest,tensorflow/tensorflow/python/keras/distribute/keras_embedding_model_correctness_test.py,28,class, 5291,DistributionStrategySiameseEmbeddingModelCorrectnessTest,tensorflow/tensorflow/python/keras/distribute/keras_embedding_model_correctness_test.py,74,class, 5292,DistributionStrategyCnnCorrectnessTest,tensorflow/tensorflow/python/keras/distribute/keras_image_model_correctness_test.py,29,class, 5293,_labeled_dataset_fn,tensorflow/tensorflow/python/keras/distribute/keras_metrics_test.py,30,function, 5294,_boolean_dataset_fn,tensorflow/tensorflow/python/keras/distribute/keras_metrics_test.py,41,function, 5295,_threshold_dataset_fn,tensorflow/tensorflow/python/keras/distribute/keras_metrics_test.py,54,function, 5296,_regression_dataset_fn,tensorflow/tensorflow/python/keras/distribute/keras_metrics_test.py,67,function, 5297,all_combinations,tensorflow/tensorflow/python/keras/distribute/keras_metrics_test.py,73,function, 5298,tpu_combinations,tensorflow/tensorflow/python/keras/distribute/keras_metrics_test.py,84,function, 5299,KerasMetricsTest,tensorflow/tensorflow/python/keras/distribute/keras_metrics_test.py,92,class, 5300,get_model,tensorflow/tensorflow/python/keras/distribute/keras_optimizer_v2_test.py,39,function, 5301,MirroredStrategyOptimizerV2Test,tensorflow/tensorflow/python/keras/distribute/keras_optimizer_v2_test.py,46,class, 5302,_replica_id,tensorflow/tensorflow/python/keras/distribute/keras_optimizer_v2_test.py,136,function, 5303,strategy_combinations_eager_data_fn,tensorflow/tensorflow/python/keras/distribute/keras_premade_models_test.py,34,function, 5304,get_numpy,tensorflow/tensorflow/python/keras/distribute/keras_premade_models_test.py,47,function, 5305,get_dataset,tensorflow/tensorflow/python/keras/distribute/keras_premade_models_test.py,53,function, 5306,KerasPremadeModelsTest,tensorflow/tensorflow/python/keras/distribute/keras_premade_models_test.py,60,class, 5307,_DistributionStrategyRnnModelCorrectnessTest,tensorflow/tensorflow/python/keras/distribute/keras_rnn_model_correctness_test.py,35,class, 5308,DistributionStrategyGruModelCorrectnessTest,tensorflow/tensorflow/python/keras/distribute/keras_rnn_model_correctness_test.py,72,class, 5309,DistributionStrategyLstmModelCorrectnessTest,tensorflow/tensorflow/python/keras/distribute/keras_rnn_model_correctness_test.py,91,class, 5310,KerasSaveLoadTest,tensorflow/tensorflow/python/keras/distribute/keras_save_load_test.py,27,class, 5311,strategies_for_stateful_embedding_model,tensorflow/tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py,29,function,Returns TPUStrategy with single core device assignment. 5312,test_combinations_for_stateful_embedding_model,tensorflow/tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py,38,function, 5313,DistributionStrategyStatefulLstmModelCorrectnessTest,tensorflow/tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py,46,class, 5314,Counter,tensorflow/tensorflow/python/keras/distribute/keras_utils_test.py,43,class,"Counts the number of times each callback method was run. Attributes: method_counts: dict. Contains the counts of time each callback method was run." 5315,TestDistributionStrategyWithCallbacks,tensorflow/tensorflow/python/keras/distribute/keras_utils_test.py,73,class, 5316,TestDistributionStrategyErrorCases,tensorflow/tensorflow/python/keras/distribute/keras_utils_test.py,182,class, 5317,TestDistributionStrategyWithLossMasking,tensorflow/tensorflow/python/keras/distribute/keras_utils_test.py,356,class, 5318,TestDistributionStrategyWithNormalizationLayer,tensorflow/tensorflow/python/keras/distribute/keras_utils_test.py,391,class, 5319,TestDistributionStrategySaveLoadWeights,tensorflow/tensorflow/python/keras/distribute/keras_utils_test.py,436,class, 5320,TestDistributionStrategyValidation,tensorflow/tensorflow/python/keras/distribute/keras_utils_test.py,499,class, 5321,TestDistributionStrategyWithStaticShapes,tensorflow/tensorflow/python/keras/distribute/keras_utils_test.py,536,class, 5322,MinimizeLossStepTest,tensorflow/tensorflow/python/keras/distribute/minimize_loss_test.py,67,class, 5323,MiniModel,tensorflow/tensorflow/python/keras/distribute/mirrored_strategy_test.py,37,class,"Minimal model for mnist. Useful for testing and debugging on slow TPU simulators." 5324,MirroredStrategyDefunTest,tensorflow/tensorflow/python/keras/distribute/mirrored_strategy_test.py,59,class, 5325,_mimic_two_cpus,tensorflow/tensorflow/python/keras/distribute/mirrored_variable_test.py,33,function, 5326,MirroredVariableCreationTest,tensorflow/tensorflow/python/keras/distribute/mirrored_variable_test.py,55,class,"Base class that tests mirrored variable creator. Currently it assumes all strategy objects have two replicas." 5327,ModelAndInput,tensorflow/tensorflow/python/keras/distribute/model_collection_base.py,21,class,Base class to provide model and its corresponding inputs. 5328,checkpoint_exists,tensorflow/tensorflow/python/keras/distribute/multi_worker_callback_tf2_test.py,37,function,Returns whether the checkpoint `filepath` refers to exists. 5329,_model_setup,tensorflow/tensorflow/python/keras/distribute/multi_worker_callback_tf2_test.py,46,function,"Set up a MNIST Keras model for testing purposes. This function builds a MNIST Keras model and returns relevant information for testing. Args: test_obj: The `TestCase` testing object. file_format: File format for checkpoints. 'tf' or 'h5'. Returns: A tuple of (model, saving_filepath, train_ds, steps) where train_ds is the training dataset." 5330,_get_task_config,tensorflow/tensorflow/python/keras/distribute/multi_worker_callback_tf2_test.py,75,function, 5331,KerasCallbackMultiProcessTest,tensorflow/tensorflow/python/keras/distribute/multi_worker_callback_tf2_test.py,79,class, 5332,ParameterServerStrategy,tensorflow/tensorflow/python/keras/distribute/multi_worker_test.py,52,class,Temporarily mock the original strategy to bypass cluster_spec check. 5333,_clone_and_build_model,tensorflow/tensorflow/python/keras/distribute/multi_worker_test.py,67,function, 5334,MultiWorkerVerificationCallback,tensorflow/tensorflow/python/keras/distribute/multi_worker_test.py,96,class,"MultiWorkerVerificationCallback verifies the callbacks in multi-worker scheme. This Callback is intended to be used for verifying the callback is indeed called the correct number of times in various task types. Attributes: _task_dict: A nested dictionary storing the number of times a callback has been called in specific task type, task index, and method name. Look up structure is task_name -> task_id -> tracking_method_name -> invoke_count For example, a _task_dict of { 'ps': { 0: { 'on_epoch_begin': 2 }, 1: { 'on_epoch_begin': 2 } }, 'worker': { 0: { 'on_epoch_begin': 2 }, 1: { 'on_epoch_begin': 2 } } } indicates the ps task has 'on_epoch_begin' called twice on each of the two indices, and likewise for worker task." 5335,KerasMultiWorkerTestIndependentWorker,tensorflow/tensorflow/python/keras/distribute/multi_worker_test.py,203,class, 5336,mnist_synthetic_dataset,tensorflow/tensorflow/python/keras/distribute/multi_worker_testing_utils.py,28,function,Generate synthetic MNIST dataset for testing. 5337,get_mnist_model,tensorflow/tensorflow/python/keras/distribute/multi_worker_testing_utils.py,52,function,Define a deterministically-initialized CNN model for MNIST testing. 5338,MultiWorkerTutorialTest,tensorflow/tensorflow/python/keras/distribute/multi_worker_tutorial_test.py,44,class,Test multi-worker training flow demo'ed in go/multi-worker-with-keras. 5339,distributions_and_v1_optimizers,tensorflow/tensorflow/python/keras/distribute/optimizer_combinations.py,81,function,A common set of combination with DistributionStrategies and Optimizers. 5340,distributions_and_v2_optimizers,tensorflow/tensorflow/python/keras/distribute/optimizer_combinations.py,92,function,A common set of combination with DistributionStrategies and Optimizers. 5341,distributions_and_v1_and_v2_optimizers,tensorflow/tensorflow/python/keras/distribute/optimizer_combinations.py,103,function,A common set of combination with DistributionStrategies and Optimizers. 5342,SavedModelSaveAndLoadTest,tensorflow/tensorflow/python/keras/distribute/saved_model_mixed_api_test.py,35,class, 5343,SavedModelKerasModelTest,tensorflow/tensorflow/python/keras/distribute/saved_model_save_load_test.py,35,class, 5344,SavedModelTFModuleTest,tensorflow/tensorflow/python/keras/distribute/saved_model_save_load_test.py,94,class, 5345,is_tpu_strategy,tensorflow/tensorflow/python/keras/distribute/saved_model_test_base.py,66,function, 5346,get_tolerance,tensorflow/tensorflow/python/keras/distribute/saved_model_test_base.py,71,function, 5347,simple_models_with_strategies,tensorflow/tensorflow/python/keras/distribute/saved_model_test_base.py,78,function, 5348,simple_models_with_strategy_pairs,tensorflow/tensorflow/python/keras/distribute/saved_model_test_base.py,85,function, 5349,tfmodule_models_with_strategies,tensorflow/tensorflow/python/keras/distribute/saved_model_test_base.py,93,function, 5350,tfmodule_models_with_strategy_pairs,tensorflow/tensorflow/python/keras/distribute/saved_model_test_base.py,100,function, 5351,load_and_run_with_saved_model_api,tensorflow/tensorflow/python/keras/distribute/saved_model_test_base.py,108,function,"Loads a saved_model using tf.saved_model API, and runs it." 5352,TestSavedModelBase,tensorflow/tensorflow/python/keras/distribute/saved_model_test_base.py,130,class,Base class for testing saving/loading with DS. 5353,_get_data_for_simple_models,tensorflow/tensorflow/python/keras/distribute/simple_models.py,35,function, 5354,SimpleFunctionalModel,tensorflow/tensorflow/python/keras/distribute/simple_models.py,44,class,A simple functional model and its inputs. 5355,SimpleSequentialModel,tensorflow/tensorflow/python/keras/distribute/simple_models.py,69,class,A simple sequential model and its inputs. 5356,_SimpleModel,tensorflow/tensorflow/python/keras/distribute/simple_models.py,94,class, 5357,SimpleSubclassModel,tensorflow/tensorflow/python/keras/distribute/simple_models.py,104,class,A simple subclass model and its data. 5358,_SimpleModule,tensorflow/tensorflow/python/keras/distribute/simple_models.py,125,class, 5359,SimpleTFModuleModel,tensorflow/tensorflow/python/keras/distribute/simple_models.py,135,class,A simple model based on tf.Module and its data. 5360,SingleLossStepTest,tensorflow/tensorflow/python/keras/distribute/step_fn_test.py,34,class, 5361,get_tpu_cluster_resolver,tensorflow/tensorflow/python/keras/distribute/tpu_strategy_test_utils.py,34,function, 5362,get_tpu_strategy,tensorflow/tensorflow/python/keras/distribute/tpu_strategy_test_utils.py,43,function, 5363,WorkerTrainingState,tensorflow/tensorflow/python/keras/distribute/worker_training_state.py,40,class,"Training state management class. This class provides apis for backing up and restoring the training state. This allows model and epoch information to be saved periodically and restore for fault-tolerance, also known as preemption-recovery purpose." 5364,ModelCheckpointTest,tensorflow/tensorflow/python/keras/distribute/worker_training_state_test.py,33,class, 5365,Layer,tensorflow/tensorflow/python/keras/engine/base_layer.py,103,class,"This is the class from which all layers inherit. A layer is a callable object that takes as input one or more tensors and that outputs one or more tensors. It involves *computation*, defined in the `call()` method, and a *state* (weight variables), defined either in the constructor `__init__()` or in the `build()` method. Users will just instantiate a layer and then treat it as a callable. Arguments: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights (default of `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type of the first input in TensorFlow 1). dynamic: Set this to `True` if your layer should only be run eagerly, and should not be used to generate a static computation graph. This would be the case for a Tree-RNN or a recursive network, for example, or generally for any layer that manipulates tensors using Python control flow. If `False`, we assume that the layer can safely be used to generate a static computation graph. Attributes: name: The name of the layer (string). dtype: The dtype of the layer's computations and weights. If mixed precision is used with a `tf.keras.mixed_precision.experimental.Policy`, this is instead just the dtype of the layer's weights, as the computations are done in a different dtype. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). trainable: Whether the layer should be trained (boolean), i.e. whether its potentially-trainable weights should be returned as part of `layer.trainable_weights`. input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. We recommend that descendants of `Layer` implement the following methods: * `__init__()`: Defines custom layer attributes, and creates layer state variables that do not depend on input shapes, using `add_weight()`. * `build(self, input_shape)`: This method can be used to create weights that depend on the shape(s) of the input(s), using `add_weight()`. `__call__()` will automatically build the layer (if it has not been built yet) by calling `build()`. * `call(self, *args, **kwargs)`: Called in `__call__` after making sure `build()` has been called. `call()` performs the logic of applying the layer to the input tensors (which should be passed in as argument). Two reserved keyword arguments you can optionally use in `call()` are: - `training` (boolean, whether the call is in inference mode or training mode) - `mask` (boolean tensor encoding masked timesteps in the input, used in RNN layers) * `get_config(self)`: Returns a dictionary containing the configuration used to initialize this layer. If the keys differ from the arguments in `__init__`, then override `from_config(self)` as well. This method is used when saving the layer or a model that contains this layer. Examples: Here's a basic example: a layer with two variables, `w` and `b`, that returns `y = w . x + b`. It shows how to implement `build()` and `call()`. Variables set as attributes of a layer are tracked as weights of the layers (in `layer.weights`). ```python class SimpleDense(Layer): def __init__(self, units=32): super(SimpleDense, self).__init__() self.units = units def build(self, input_shape): # Create the state of the layer (weights) w_init = tf.random_normal_initializer() self.w = tf.Variable( initial_value=w_init(shape=(input_shape[-1], self.units), dtype='float32'), trainable=True) b_init = tf.zeros_initializer() self.b = tf.Variable( initial_value=b_init(shape=(self.units,), dtype='float32'), trainable=True) def call(self, inputs): # Defines the computation from inputs to outputs return tf.matmul(inputs, self.w) + self.b # Instantiates the layer. linear_layer = SimpleDense(4) # This will also call `build(input_shape)` and create the weights. y = linear_layer(tf.ones((2, 2))) assert len(linear_layer.weights) == 2 # These weights are trainable, so they're listed in `trainable_weights`: assert len(linear_layer.trainable_weights) == 2 ``` Note that the method `add_weight()` offers a shortcut to create weights: ```python class SimpleDense(Layer): def __init__(self, units=32): super(SimpleDense, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b ``` Besides trainable weights, updated via backpropagation during training, layers can also have non-trainable weights. These weights are meant to be updated manually during `call()`. Here's a example layer that computes the running sum of its inputs: ```python class ComputeSum(Layer): def __init__(self, input_dim): super(ComputeSum, self).__init__() # Create a non-trainable weight. self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False) def call(self, inputs): self.total.assign_add(tf.reduce_sum(inputs, axis=0)) return self.total my_sum = ComputeSum(2) x = tf.ones((2, 2)) y = my_sum(x) print(y.numpy()) # [2. 2.] y = my_sum(x) print(y.numpy()) # [4. 4.] assert my_sum.weights == [my_sum.total] assert my_sum.non_trainable_weights == [my_sum.total] assert my_sum.trainable_weights == [] ``` For more information about creating layers, see the guide [Writing custom layers and models with Keras]( https://www.tensorflow.org/guide/keras/custom_layers_and_models) About the layer's `dtype` attribute: Each layer has a dtype, which is typically the dtype of the layer's computations and variables. A layer's dtype can be queried via the `Layer.dtype` property. The dtype is specified with the `dtype` constructor argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()` if no dtype is passed. `floatx()` itself defaults to ""float32"". Additionally, layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed precision is used, layers may have different computation and variable dtypes. See `tf.keras.mixed_precision.experimental.Policy` for details on layer dtypes." 5366,TensorFlowOpLayer,tensorflow/tensorflow/python/keras/engine/base_layer.py,3042,class,"Wraps a TensorFlow Operation in a Layer. This class is used internally by the Functional API. When a user uses a raw TensorFlow Operation on symbolic tensors originating from an `Input` Layer, the resultant operation will be wrapped with this Layer object in order to make the operation compatible with the Keras API. This Layer will create a new, identical operation (except for inputs and outputs) every time it is called. If `run_eagerly` is `True`, the op creation and calculation will happen inside an Eager function. Instances of this Layer are created when `autolambda` is called, which is whenever a Layer's `__call__` encounters symbolic inputs that do not have Keras metadata, or when a Network's `__init__` encounters outputs that do not have Keras metadata. Attributes: node_def: String, the serialized NodeDef of the Op this layer will wrap. name: String, the name of the Layer. constants: Dict of NumPy arrays, the values of any Tensors needed for this Operation that do not originate from a Keras `Input` Layer. Since all placeholders must come from Keras `Input` Layers, these Tensors must be treated as constant in the Functional API. trainable: Bool, whether this Layer is trainable. Currently Variables are not supported, and so this parameter has no effect. dtype: The default dtype of this Layer. Inherited from `Layer` and has no effect on this class, however is used in `get_config`." 5367,AddLoss,tensorflow/tensorflow/python/keras/engine/base_layer.py,3165,class,"Adds its inputs as a loss. Attributes: unconditional: Whether or not the loss should be conditioned on the inputs." 5368,AddMetric,tensorflow/tensorflow/python/keras/engine/base_layer.py,3189,class,"Adds its inputs as a metric. Attributes: aggregation: 'mean' or None. How the inputs should be aggregated. metric_name: The name to use for this metric." 5369,_in_functional_construction_mode,tensorflow/tensorflow/python/keras/engine/base_layer.py,3215,function,Check the arguments to see if we are constructing a functional model. 5370,_convert_numpy_or_python_types,tensorflow/tensorflow/python/keras/engine/base_layer.py,3244,function, 5371,DynamicLayer,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,64,class, 5372,InvalidLayer,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,80,class, 5373,BaseLayerTest,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,86,class, 5374,SymbolicSupportTest,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,768,class, 5375,NestedTrackingTest,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,903,class, 5376,NameScopingTest,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,1088,class, 5377,AutographControlFlowTest,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,1147,class, 5378,AddLayer,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,1402,class,"A layer which adds its input to a variable. Useful for testing a layer with a variable" 5379,IdentityLayer,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,1416,class,"A layer that returns its input. Useful for testing a layer without a variable." 5380,DTypeTest,tensorflow/tensorflow/python/keras/engine/base_layer_test.py,1427,class, 5381,create_mean_metric,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,47,function, 5382,make_variable,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,55,function,"Temporary util to create a variable (relies on `variable_scope.variable`). Some reuse-related technicalities prevent us from using `variable_scope.get_variable()` directly, so we use a subcomponent that has fewer constraints (`variable_scope.variable()`). In the longer term, it seems like a similar ""default variable creator"" method should exist in `Trackable` instead. When this happens, we can get rid of this temporary solution. TODO(fchollet): remove this method when no longer needed. Arguments: name: Variable name. shape: Variable shape. dtype: The type of the variable. Defaults to `self.dtype` or `float32`. initializer: Initializer instance (callable). trainable: Whether the variable should be part of the layer's ""trainable_variables"" (e.g. variables, biases) or ""non_trainable_variables"" (e.g. BatchNorm mean, stddev). Note, if the current variable scope is marked as non-trainable then this parameter is ignored and any added variables are also marked as non-trainable. `trainable` defaults to `True` unless `synchronization` is set to `ON_READ`. caching_device: Passed to `tf.Variable`. validate_shape: Passed to `tf.Variable`. constraint: Constraint instance (callable). use_resource: Whether to use a `ResourceVariable`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. If `synchronization` is set to `ON_READ`, `trainable` must not be set to `True`. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. partitioner: Not handled at this time. Returns: Variable instance." 5383,collect_previous_mask,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,146,function,"Retrieves the output mask(s) of the previous node. Arguments: input_tensors: An arbitrary structure of Tensors. Returns: A mask tensor or list of mask tensors." 5384,have_all_keras_metadata,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,162,function, 5385,generate_placeholders_from_shape,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,166,function, 5386,create_keras_history,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,170,function,"Wraps TensorFlow Operations for compatibility with the Functional API. This method checks to see if a Tensor in `tensors` is missing Keras metadata and has its origin in a Keras `Input` Layer. If so, this method will replace the raw TensorFlow Operations that created this tensor with `TensorFlowOpLayer` instances that create identical operations. Any Tensors not originating from a Keras `Input` Layer will be treated as constants when constructing `TensorFlowOpLayer` instances. Arguments: tensors: A structure of Tensors, some of which come from raw TensorFlow operations and need to have Keras metadata assigned to them. Returns: created_layers: List. The `TensorFlowOpLayer` instances created to wrap the raw Tensorflow operations." 5387,_create_keras_history_helper,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,193,function,"Helper method for `create_keras_history`. Arguments: tensors: A structure of Tensors for which to create Keras metadata. processed_ops: Set. TensorFlow operations that have already been wrapped in `TensorFlowOpLayer` instances. created_layers: List. The `TensorFlowOpLayer` instances created. Returns: Tuple. First element is the updated set of TensorFlow Operations that have been wrapped in `TensorFlowOpLayer` instances. Second element is a list of the `TensorFlowOpLayer` instances created." 5388,unnest_if_single_tensor,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,283,function, 5389,needs_keras_history,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,294,function,"Check if any Tensors need to be wrapped in TensorFlowOpLayers. This will never return True inside a sublayer, because sublayers do not need to create Keras History. Otherwise, this returns True if one or more of `tensors` originates from a `keras.Input` and does not have `_keras_history` set. Arguments: tensors: An arbitrary nested structure of Tensors. ignore_call_context: Whether to ignore the check of if currently outside of a `call` context. This is `True` when creating KerasHistory inside `Node`, where we always know that Tensors are being used with the Functional API. Returns: Bool, whether at least one Tensor needs to be wrapped." 5390,is_in_keras_graph,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,323,function,Returns if currently executing inside of a Keras graph. 5391,is_in_eager_or_tf_function,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,328,function,Returns if in eager mode or inside of a tf.function. 5392,is_in_tf_function,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,333,function,Returns if inside of a tf.function. 5393,uses_keras_history,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,351,function,"Check if at least one Tensor originates from a `keras.Input`. This is `True` if at least one Tensor has its origin in a `keras.Input`. Any Tensor that originates from a `keras.Input` will have a dependency Tensor with a `_keras_history` attribute attached. Tensors that have already been checked to not originate from a `keras.Input` are marked as `_keras_history_checked`. Arguments: tensors: An arbitrary nested structure of Tensors. Returns: Bool, whether at least one Tensor originates from a `keras.Input`." 5394,mark_checked,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,396,function,"Marks that these Tensors should not be tracked. This prevents Layers from attempting to create TensorFlowOpLayers for these Tensors. Arguments: tensors: An arbitrary structure of Tensors." 5395,call_context,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,412,function,Returns currently active `CallContext`. 5396,CallContext,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,424,class,"Keeps track of properties currently inside a Layer/Model's `call`. Attributes: in_call: Whether currently inside the `call` of a Layer. layer: The `Layer` whose `call` is currently active. inputs: The inputs to the currently active `Layer`. build_graph: Whether currently inside a Graph or FuncGraph. training: Whether currently executing in training or inference mode. saving: Whether currently saving to SavedModel. frozen: Whether currently executing inside a `Layer` with `trainable` set to `False`. in_keras_graph: Whether executing inside the Keras Graph." 5397,CallContextManager,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,513,class,Context manager for `CallContext`. 5398,training_arg_passed_to_call,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,546,function,Returns whether a user passed the `training` argument in `__call__`. 5399,is_subclassed,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,554,function,Returns True if the object is a subclassed layer or subclassed model. 5400,from_saved_model,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,560,function,Returns whether the layer is loaded from a SavedModel. 5401,check_graph_consistency,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,565,function,"Checks that tensors passed to `add_*` method match the Keras graph. When one of the `add_*` method is called inside a V2 conditional branch, the underlying tensor gets created in a FuncGraph managed by control_flow_v2. We need to raise clear error messages in such cases. Arguments: tensor: Tensor to check, or `False` if it is known that an error should be raised. method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}. force_raise: If an error should be raised regardless of `tensor`. Raises: RuntimeError: In case of an out-of-graph tensor." 5402,mark_as_return,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,693,function,Marks `outputs` as the return values for automatic control deps. 5403,enable_v2_dtype_behavior,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,723,function,"Enable the V2 dtype behavior for Keras layers. By default, the V2 dtype behavior is enabled in TensorFlow 2, so this function is only useful if `tf.compat.v1.disable_v2_behavior` has been called. Since mixed precision requires V2 dtype behavior to be enabled, this function allows you to use mixed precision in Keras layers if `disable_v2_behavior` has been called. When enabled, the dtype of Keras layers defaults to floatx (which is typically float32) instead of None. In addition, layers will automatically cast floating-point inputs to the layer's dtype. >>> x = tf.ones((4, 4, 4, 4), dtype='float64') >>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) >>> print(layer.dtype) # float32 since V2 dtype behavior is enabled float32 >>> y = layer(x) # Layer casts inputs since V2 dtype behavior is enabled >>> print(y.dtype.name) float32 A layer author can opt-out their layer from the automatic input casting by passing `autocast=False` to the base Layer's constructor. This disables the autocasting part of the V2 behavior for that layer, but not the defaulting to floatx part of the V2 behavior. When a global `tf.keras.mixed_precision.experimental.Policy` is set, a Keras layer's dtype will default to the global policy instead of floatx. Layers will automatically cast inputs to the policy's compute_dtype." 5404,disable_v2_dtype_behavior,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,758,function,"Disables the V2 dtype behavior for Keras layers. See `tf.compat.v1.keras.layers.enable_v2_dtype_behavior`." 5405,v2_dtype_behavior_enabled,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,767,function,Returns True if the V2 dtype behavior is enabled. 5406,TrackableWeightHandler,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,774,class,"Keras wrapper for handling tracking.Trackable object saving and restoring. This class handles Trackables in both V1 and V2 modes, ensuring that they can be saved and restored with the correct data and without adding additional ops on every save. Attributes: trackable: The trackable to wrap. num_tensors: The number of tensors that this trackable requires for saving." 5407,no_ragged_support,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,846,function, 5408,is_split_variable,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,854,function,Returns True if `v` is either a PartionedVariable or a SharedVariable. 5409,has_weights,tensorflow/tensorflow/python/keras/engine/base_layer_utils.py,859,function, 5410,TrackableWeightHandlerTest,tensorflow/tensorflow/python/keras/engine/base_layer_utils_test.py,38,class, 5411,OpLayerTest,tensorflow/tensorflow/python/keras/engine/base_layer_utils_test.py,79,class, 5412,Layer,tensorflow/tensorflow/python/keras/engine/base_layer_v1.py,77,class,"Base layer class. This is the class from which all layers inherit. A layer is a class implementing common neural networks operations, such as convolution, batch norm, etc. These operations require managing weights, losses, updates, and inter-layer connectivity. Users will just instantiate a layer and then treat it as a callable. We recommend that descendants of `Layer` implement the following methods: * `__init__()`: Save configuration in member variables * `build()`: Called once from `__call__`, when we know the shapes of inputs and `dtype`. Should have the calls to `add_weight()`, and then call the super's `build()` (which sets `self.built = True`, which is nice in case the user wants to call `build()` manually before the first `__call__`). * `call()`: Called in `__call__` after making sure `build()` has been called once. Should actually perform the logic of applying the layer to the input tensors (which should be passed in as the first argument). Arguments: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights (default of `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type of the first input in TensorFlow 1). dynamic: Set this to `True` if your layer should only be run eagerly, and should not be used to generate a static computation graph. This would be the case for a Tree-RNN or a recursive network, for example, or generally for any layer that manipulates tensors using Python control flow. If `False`, we assume that the layer can safely be used to generate a static computation graph. Attributes: name: The name of the layer (string). dtype: The dtype of the layer's computations and weights. If mixed precision is used with a `tf.keras.mixed_precision.experimental.Policy`, this is instead just the dtype of the layer's weights, as the computations are done in a different dtype. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. Each layer has a dtype, which is typically the dtype of the layer's computations and variables. A layer's dtype can be queried via the `Layer.dtype` property. The dtype is specified with the `dtype` constructor argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()` if no dtype is passed. `floatx()` itself defaults to ""float32"". Additionally, layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed precision is used, layers may have different computation and variable dtypes. See `tf.keras.mixed_precision.experimental.Policy` for details on layer dtypes." 5413,KerasHistory,tensorflow/tensorflow/python/keras/engine/base_layer_v1.py,2406,class,"Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an `InputLayer`. This allows Keras to track how each Tensor was produced, and this information is later retraced by the `keras.engine.Network` class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Tensor is called. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via `nest.flatten`." 5414,PreprocessingLayer,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer.py,49,class,Base class for PreprocessingLayers. 5415,CombinerPreprocessingLayer,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer.py,69,class,"Base class for PreprocessingLayers that do computation using a Combiner. This class provides several helper methods to make creating a PreprocessingLayer easier. It assumes that the core of your computation will be done via a Combiner object. Subclassing this class to create a PreprocessingLayer allows your layer to be compatible with distributed computation. This class is compatible with Tensorflow 2.0+." 5416,convert_to_list,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer.py,246,function,"Convert a TensorLike, CompositeTensor, or ndarray into a Python list." 5417,Combiner,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer.py,285,class,"Functional object that defines a shardable computation. This object defines functions required to create and manipulate data objects. These data objects, referred to below as 'accumulators', are computation- specific and may be implemented alongside concrete subclasses of Combiner (if necessary - some computations may be simple enough that standard Python types can be used as accumulators). The intent for this class is that by describing computations in this way, we can arbitrarily shard a dataset, perform computations on a subset, and then merge the computation into a final result. This enables distributed computation. The combiner itself does not own any state - all computational state is owned by the accumulator objects. This is so that we can have an arbitrary number of Combiners (thus sharding the computation N ways) without risking any change to the underlying computation. These accumulator objects are uniquely associated with each Combiner; a Combiner defines what the accumulator object should be and will only work with accumulators of that type." 5418,AddingPreprocessingLayer,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer_test.py,47,class, 5419,AddingPreprocessingLayerV1,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer_test.py,119,class, 5420,get_layer,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer_test.py,125,function, 5421,PreprocessingLayerTest,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer_test.py,133,class, 5422,ConvertToListTest,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer_test.py,403,class, 5423,CombinerPreprocessingLayer,tensorflow/tensorflow/python/keras/engine/base_preprocessing_layer_v1.py,26,class,"V1-compatible CombinerPreprocessingLayer. This class overrides several methods of the CombinerPreprocessingLayer to make it compatible with V1 execution. End users should not need to worry about the implementation details here; Keras will export the appropriate class under the 'CombinerPreprocessingLayer' symbol. (Users should not directly instantiate engine.base_preprocessing_layer/_v1.CombinerPreprocessingLayer). When creating a subclass of PreprocessingLayer, you can create a V1-compatible subclass as follows: class MyProcLayer(MyProcLayer, base_preprocessing_layer_v1.CombinerPreprocessingLayer): pass Note that the same classname is required for serialization purposes. This is only necessary for internal classes, since any class that inherits from tf.keras.[...].CombinerPreprocessingLayer will get the right symbol." 5424,Container,tensorflow/tensorflow/python/keras/engine/compile_utils.py,33,class,Base Container class. 5425,LossesContainer,tensorflow/tensorflow/python/keras/engine/compile_utils.py,106,class,A container class for losses passed to `Model.compile`. 5426,MetricsContainer,tensorflow/tensorflow/python/keras/engine/compile_utils.py,276,class,A container class for metrics passed to `Model.compile`. 5427,create_pseudo_output_names,tensorflow/tensorflow/python/keras/engine/compile_utils.py,493,function,Create pseudo output names for a subclassed Model. 5428,create_pseudo_input_names,tensorflow/tensorflow/python/keras/engine/compile_utils.py,498,function,Create pseudo input names for a subclassed Model. 5429,_create_pseudo_names,tensorflow/tensorflow/python/keras/engine/compile_utils.py,503,function,"Creates pseudo {input | output} names for subclassed Models. Warning: this function should only be used to define default names for `Metics` and `SavedModel`. No other use cases should rely on a `Model`'s input or output names. Example with dict: `{'a': [x1, x2], 'b': x3}` becomes: `['a_1', 'a_2', 'b']` Example with list: `[x, y]` becomes: `['output_1', 'output_2']` Arguments: tensors: `Model`'s outputs or inputs. prefix: 'output_' for outputs, 'input_' for inputs. Returns: Flattened list of pseudo names." 5430,map_to_output_names,tensorflow/tensorflow/python/keras/engine/compile_utils.py,548,function,"Maps a dict to a list using `output_names` as keys. This is a convenience feature only. When a `Model`'s outputs are a list, you can specify per-output losses and metrics as a dict, where the keys are the output names. If you specify per-output losses and metrics via the same structure as the `Model`'s outputs (recommended), no mapping is performed. For the Functional API, the output names are the names of the last layer of each output. For the Subclass API, the output names are determined by `create_pseudo_output_names` (For example: `['output_1', 'output_2']` for a list of outputs). This mapping preserves backwards compatibility for `compile` and `fit`. Arguments: y_pred: Sample outputs of the Model, to determine if this convenience feature should be applied (`struct` is returned unmodified if `y_pred` isn't a flat list). output_names: List. The names of the outputs of the Model. struct: The structure to map. Returns: `struct` mapped to a list in same order as `output_names`." 5431,map_missing_dict_keys,tensorflow/tensorflow/python/keras/engine/compile_utils.py,595,function,Replaces missing dict keys in `struct` with `None` placeholders. 5432,match_dtype_and_rank,tensorflow/tensorflow/python/keras/engine/compile_utils.py,605,function,Match dtype and rank of predictions. 5433,get_mask,tensorflow/tensorflow/python/keras/engine/compile_utils.py,625,function,Returns Keras mask from tensor. 5434,apply_mask,tensorflow/tensorflow/python/keras/engine/compile_utils.py,630,function,Applies any mask on predictions to sample weights. 5435,LossesContainerTest,tensorflow/tensorflow/python/keras/engine/compile_utils_test.py,35,class, 5436,MetricsContainerTest,tensorflow/tensorflow/python/keras/engine/compile_utils_test.py,342,class, 5437,ControlFlowLayer1,tensorflow/tensorflow/python/keras/engine/control_flow_test.py,37,class,Layer with an `if` condition in call. 5438,ControlFlowLayer2,tensorflow/tensorflow/python/keras/engine/control_flow_test.py,47,class,Layer with a `for` loop in call. 5439,NestedControlFlowLayer,tensorflow/tensorflow/python/keras/engine/control_flow_test.py,60,class,Layer nested with a control flow layer. 5440,ControlFlowModel,tensorflow/tensorflow/python/keras/engine/control_flow_test.py,71,class,Model with an `if` condition in call. 5441,NestedControlFlowModel,tensorflow/tensorflow/python/keras/engine/control_flow_test.py,81,class,Model with an `if` condition in call using a control flow layer. 5442,FunctionControlFlowModel,tensorflow/tensorflow/python/keras/engine/control_flow_test.py,96,class,Model with control flow where `call` is wrapped in function already. 5443,AutographWrapperTest,tensorflow/tensorflow/python/keras/engine/control_flow_test.py,108,class, 5444,MultiInputSubclassed,tensorflow/tensorflow/python/keras/engine/correctness_test.py,30,class,Subclassed Model that adds its inputs and then adds a bias. 5445,multi_input_functional,tensorflow/tensorflow/python/keras/engine/correctness_test.py,43,function,Functional Model that adds its inputs and then adds a bias. 5446,SimpleBiasTest,tensorflow/tensorflow/python/keras/engine/correctness_test.py,55,class, 5447,MultipleInputTest,tensorflow/tensorflow/python/keras/engine/correctness_test.py,91,class, 5448,DataAdapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,73,class,"Base class for input data adapter. In TF 2.0, tf.data is the preferred API for user to feed in data. In order to simplify the training code path, all the input data object will be converted to `tf.data.Dataset` if possible. Note that since this class is mainly targeted for TF 2.0, it might have a lot of assumptions under the hood, eg eager context by default, distribution strategy, etc. In the meantime, some legacy feature support might be dropped, eg, Iterator from dataset API in v1, etc. The sample usage of this class is like: ``` x = tf.data.Dataset.range(100) adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter] applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)] if len(applicable_adapters) != 1: raise ValueError(""Expect only one adapter class to handle the input"") dataset = applicable_adapters[0](x).get_dataset() for data in dataset: # training ```" 5449,TensorLikeDataAdapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,232,class,"Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy." 5450,GenericArrayLikeDataAdapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,423,class,"Adapter that handles array-like data without forcing it into memory. As an example, this adapter handles `keras.utils.HDF5Matrix` which holds datasets that may be too big to fully fit into memory. Specifically, this adapter handles any Python class which implements: `__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings as Numpy, but it ignores any case where all the inputs are Tensors or Numpy arrays (because that case is handled by the base TensorLikeDataAdapter). It ignores scipy sparse matrices and Composite Tensors because those are handled by the CompositeTensorDataAdapter. It also does not handle lists/tuples of scalars, because those are handled by the ListsOfScalarsDataAdapter." 5451,CompositeTensorDataAdapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,519,class,Adapter that handles composite tensor. 5452,ListsOfScalarsDataAdapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,607,class,Adapter that handles lists of scalars and lists of lists of scalars. 5453,DatasetAdapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,671,class,Adapter that handles `tf.data.Dataset`. 5454,GeneratorDataAdapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,746,class,Adapter that handles python generators and iterators. 5455,KerasSequenceAdapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,874,class,Adapter that handles `keras.utils.Sequence`. 5456,select_data_adapter,tensorflow/tensorflow/python/keras/engine/data_adapter.py,956,function,Selects a data adapter than can handle a given x and y. 5457,_type_name,tensorflow/tensorflow/python/keras/engine/data_adapter.py,974,function,Generates a description of the type of an object. 5458,_process_tensorlike,tensorflow/tensorflow/python/keras/engine/data_adapter.py,988,function,"Process tensor-like inputs. This function: (1) Converts `Numpy` arrays to `Tensor`s. (2) Converts `Scipy` sparse matrices to `SparseTensor`s. (2) Converts `list`s to `tuple`s (for `tf.data` support). Args: inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like. Returns: Structure of `Tensor`s or tensor-like." 5459,is_none_or_empty,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1018,function, 5460,broadcast_sample_weight_modes,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1027,function,Match sample_weight_modes structure with output structure. 5461,DataHandler,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1063,class,Handles iterating over epoch-level `tf.data.Iterator` objects. 5462,_make_class_weight_map_fn,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1274,function,"Applies class weighting to a `Dataset`. The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where `y` must be a single `Tensor`. Arguments: class_weight: A map where the keys are integer class ids and values are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}` Returns: A function that can be used with `tf.data.Dataset.map` to apply class weighting." 5463,expand_1d,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1330,function,Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s. 5464,train_validation_split,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1343,function,"Split arrays into train and validation subsets in deterministic order. The last part of data will become validation data. Arguments: arrays: Tensors to split. Allowed inputs are arbitrarily nested structures of Tensors and NumPy arrays. validation_split: Float between 0 and 1. The proportion of the dataset to include in the validation split. The rest of the dataset will be included in the training split. Returns: `(train_arrays, validation_arrays)`" 5465,unpack_x_y_sample_weight,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1406,function,"Unpacks user-provided data tuple. This is a convenience utility to be used when overriding `Model.train_step`, `Model.test_step`, or `Model.predict_step`. This utility makes it easy to support data of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`. Standalone usage: >>> features_batch = tf.ones((10, 5)) >>> labels_batch = tf.zeros((10, 5)) >>> data = (features_batch, labels_batch) >>> # `y` and `sample_weight` will default to `None` if not provided. >>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) >>> sample_weight is None True Example in overridden `Model.train_step`: ```python class MyModel(tf.keras.Model): def train_step(self, data): # If `sample_weight` is not provided, all samples will be weighted # equally. x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) with tf.GradientTape() as tape: y_pred = self(x, training=True) loss = self.compiled_loss( y, y_pred, sample_weight, regularization_losses=self.losses) trainable_variables = self.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) self.compiled_metrics.update_state(y, y_pred, sample_weight) return {m.name: m.result() for m in self.metrics} ``` Arguments: data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`. Returns: The unpacked tuple, with `None`s for `y` and `sample_weight` if they are not provided." 5466,pack_x_y_sample_weight,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1468,function,"Packs user-provided data into a tuple. This is a convenience utility for packing data into the tuple formats that `Model.fit` uses. Standalone usage: >>> x = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x) >>> isinstance(data, tf.Tensor) True >>> y = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x, y) >>> isinstance(data, tuple) True >>> x, y = data Arguments: x: Features to pass to `Model`. y: Ground-truth targets to pass to `Model`. sample_weight: Sample weight for each element. Returns: Tuple in the format used in `Model.fit`." 5467,single_batch_iterator,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1509,function,Creates a single-batch dataset. 5468,_check_data_cardinality,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1531,function, 5469,_scipy_sparse_to_sparse_tensor,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1542,function,Converts a SciPy sparse matrix to a SparseTensor. 5470,_is_distributed_dataset,tensorflow/tensorflow/python/keras/engine/data_adapter.py,1554,function, 5471,DummyArrayLike,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,44,class,Dummy array-like object. 5472,fail_on_convert,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,65,function, 5473,DataAdapterTestBase,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,72,class, 5474,TestSequence,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,98,class, 5475,TensorLikeDataAdapterTest,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,112,class, 5476,GenericArrayLikeDataAdapterTest,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,385,class, 5477,DatasetAdapterTest,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,616,class, 5478,GeneratorDataAdapterTest,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,659,class, 5479,KerasSequenceAdapterTest,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,726,class, 5480,DataHandlerTest,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,782,class, 5481,TestValidationSplit,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,1001,class, 5482,TestUtils,tensorflow/tensorflow/python/keras/engine/data_adapter_test.py,1055,class, 5483,TestDeferredSequential,tensorflow/tensorflow/python/keras/engine/deferred_sequential_test.py,38,class, 5484,get_model,tensorflow/tensorflow/python/keras/engine/deferred_sequential_test.py,206,function, 5485,TestDNNModel,tensorflow/tensorflow/python/keras/engine/feature_columns_integration_test.py,33,class, 5486,FeatureColumnsIntegrationTest,tensorflow/tensorflow/python/keras/engine/feature_columns_integration_test.py,46,class,"Most Sequential model API tests are covered in `training_test.py`. " 5487,Functional,tensorflow/tensorflow/python/keras/engine/functional.py,52,class,"A `Functional` model is a `Model` defined as a directed graph of layers. Three types of `Model` exist: subclassed `Model`, `Functional` model, and `Sequential` (a special case of `Functional`). In general, more Keras features are supported with `Functional` than with subclassed `Model`s, specifically: - Model cloning (`keras.models.clone`) - Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()` - Whole-model saving (`model.save()`) A `Functional` model can be instantiated by passing two arguments to `__init__`. The first argument is the `keras.Input` Tensors that represent the inputs to the model. The second argument specifies the output tensors that represent the outputs of this model. Both arguments can be a nested structure of tensors. Example: ``` inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))} t = keras.layers.Dense(1, activation='relu')(inputs['x1']) outputs = keras.layers.Add()([t, inputs['x2']) model = keras.Model(inputs, outputs) ``` A `Functional` model constructed using the Functional API can also include raw TensorFlow functions, with the exception of functions that create Variables or assign ops. Example: ``` inputs = keras.Input(shape=(10,)) x = keras.layers.Dense(1)(inputs) outputs = tf.nn.relu(x) model = keras.Model(inputs, outputs) ``` Arguments: inputs: List of input tensors (must be created via `tf.keras.Input()`). outputs: List of outputs tensors. name: String, optional. Name of the model. trainable: Boolean, whether the model's variables should be trainable." 5488,_make_node_key,tensorflow/tensorflow/python/keras/engine/functional.py,817,function, 5489,_map_graph_network,tensorflow/tensorflow/python/keras/engine/functional.py,821,function,"Validates a network's topology and gather its layers and nodes. Arguments: inputs: List of input tensors. outputs: List of outputs tensors. Returns: A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`. - nodes: list of Node instances. - nodes_by_depth: dict mapping ints (depth) to lists of node instances. - layers: list of Layer instances. - layers_by_depth: dict mapping ints (depth) to lists of layer instances. Raises: ValueError: In case the network is not valid (e.g. disconnected graph)." 5490,_build_map,tensorflow/tensorflow/python/keras/engine/functional.py,943,function,"This method topologically sorts nodes in order from inputs to outputs. It uses a depth-first search to topologically sort nodes that appear in the _keras_history connectivity metadata of `outputs`. Args: outputs: the output tensors whose _keras_history metadata should be walked. This may be an arbitrary nested structure. Returns: A tuple like (ordered_nodes, layer_to_first_traversal_index) ordered_nodes: list of nodes appearing in the keras history, topologically sorted from original inputs to the `outputs`. (If outputs have different sets of ancestors, the inputs to one output may appear after a different output). layer_to_first_traversal_index: A dict mapping layer to the traversal index in the DFS where it is seen. Note: if a layer is shared by several nodes, the dict will only store the index corresponding to the *first* time the layer seen." 5491,_build_map_helper,tensorflow/tensorflow/python/keras/engine/functional.py,974,function,Recursive helper for `_build_map`. 5492,_map_subgraph_network,tensorflow/tensorflow/python/keras/engine/functional.py,1005,function,"Returns the nodes and layers in the topology from `inputs` to `outputs`. Args: inputs: List of input tensors. outputs: List of output tensors. Returns: A tuple of List{Node] and List[Layer]." 5493,_should_skip_first_node,tensorflow/tensorflow/python/keras/engine/functional.py,1022,function,Returns True if the first layer node should not be saved or loaded. 5494,_deserialize_keras_tensors,tensorflow/tensorflow/python/keras/engine/functional.py,1032,function,Deserializes Keras Tensors passed to `call`.. 5495,connect_ancillary_layers,tensorflow/tensorflow/python/keras/engine/functional.py,1052,function,Adds layers that are not connected to the outputs to the model. 5496,reconstruct_from_config,tensorflow/tensorflow/python/keras/engine/functional.py,1068,function,"Reconstructs graph from config object. Args: config: Dictionary returned from Network.get_config() custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. created_layers: Optional dictionary mapping names to Layer objects. Any layer not in this dictionary will be be created and added to the dict. This function will add new nodes to all layers (excluding InputLayers), instead of re-using pre-existing nodes in the layers. Returns: Tuple of (input tensors, output tensors, dictionary of created layers)" 5497,get_network_config,tensorflow/tensorflow/python/keras/engine/functional.py,1242,function,"Builds the config, which consists of the node graph and serialized layers. Args: network: A Network object. serialize_layer_fn: Function used to serialize layers. Returns: Config dictionary." 5498,NetworkConstructionTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,61,class, 5499,DeferredModeTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,1430,class, 5500,DefaultShapeInferenceBehaviorTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,1481,class, 5501,GraphUtilsTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,1755,class, 5502,NestedNetworkTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,1782,class, 5503,AddLossTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,1918,class, 5504,WeightAccessTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,2009,class, 5505,DTypeTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,2073,class, 5506,AttrTrackingLayer,tensorflow/tensorflow/python/keras/engine/functional_test.py,2103,class,"Count how many times `dynamic` and `stateful` are called. These counts are used to test that the attribute cache behaves as expected." 5507,CacheCorrectnessTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,2125,class, 5508,InputsOutputsErrorTest,tensorflow/tensorflow/python/keras/engine/functional_test.py,2325,class, 5509,InputLayer,tensorflow/tensorflow/python/keras/engine/input_layer.py,37,class,"Layer to be used as an entry point into a Network (a graph of layers). It can either wrap an existing tensor (pass an `input_tensor` argument) or create a placeholder tensor (pass arguments `input_shape`, and optionally, `dtype`). It is generally recommend to use the functional layer API via `Input`, (which creates an `InputLayer`) without directly using `InputLayer`. When using InputLayer with Keras Sequential model, it can be skipped by moving the input_shape parameter to the first layer after the InputLayer. This class can create placeholders for tf.Tensors, tf.SparseTensors, and tf.RaggedTensors by choosing 'sparse=True' or 'ragged=True'. Note that 'sparse' and 'ragged' can't be configured to True at same time. Usage: ```python # With explicit InputLayer. model = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=(4,)), tf.keras.layers.Dense(8)]) model.compile(tf.optimizers.RMSprop(0.001), loss='mse') model.fit(np.zeros((10, 4)), np.ones((10, 8))) # Without InputLayer and let the first layer to have the input_shape. # Keras will add a input for the model behind the scene. model = tf.keras.Sequential([ tf.keras.layers.Dense(8, input_shape=(4,))]) model.compile(tf.optimizers.RMSprop(0.001), loss='mse') model.fit(np.zeros((10, 4)), np.ones((10, 8))) ``` Arguments: input_shape: Shape tuple (not including the batch axis), or `TensorShape` instance (not including the batch axis). batch_size: Optional input batch size (integer or None). dtype: Optional datatype of the input. When not provided, the Keras default float type will be used. input_tensor: Optional tensor to use as layer input instead of creating a placeholder. sparse: Boolean, whether the placeholder created is meant to be sparse. Default to False. ragged: Boolean, whether the placeholder created is meant to be ragged. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see [this guide](https://www.tensorflow.org/guide/ragged_tensors). Default to False. name: Optional name of the layer (string)." 5510,Input,tensorflow/tensorflow/python/keras/engine/input_layer.py,211,function,"`Input()` is used to instantiate a Keras tensor. A Keras tensor is a TensorFlow symbolic tensor object, which we augment with certain attributes that allow us to build a Keras model just by knowing the inputs and outputs of the model. For instance, if `a`, `b` and `c` are Keras tensors, it becomes possible to do: `model = Model(input=[a, b], output=c)` Arguments: shape: A shape tuple (integers), not including the batch size. For instance, `shape=(32,)` indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known. batch_size: optional static batch size (integer). name: An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided. dtype: The data type expected by the input, as a string (`float32`, `float64`, `int32`...) sparse: A boolean specifying whether the placeholder to be created is sparse. Only one of 'ragged' and 'sparse' can be True. Note that, if `sparse` is False, sparse tensors can still be passed into the input - they will be densified with a default value of 0. tensor: Optional existing tensor to wrap into the `Input` layer. If set, the layer will not create a placeholder tensor. ragged: A boolean specifying whether the placeholder to be created is ragged. Only one of 'ragged' and 'sparse' can be True. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see [this guide](https://www.tensorflow.org/guide/ragged_tensors). **kwargs: deprecated arguments support. Supports `batch_shape` and `batch_input_shape`. Returns: A `tensor`. Example: ```python # this is a logistic regression in Keras x = Input(shape=(32,)) y = Dense(16, activation='softmax')(x) model = Model(x, y) ``` Note that even if eager execution is enabled, `Input` produces a symbolic tensor (i.e. a placeholder). This symbolic tensor can be used with other TensorFlow ops, as such: ```python x = Input(shape=(32,)) y = tf.square(x) ``` Raises: ValueError: If both `sparse` and `ragged` are provided. ValueError: If both `shape` and (`batch_input_shape` or `batch_shape`) are provided. ValueError: If both `shape` and `tensor` are None. ValueError: if any unrecognized parameters are provided." 5511,InputSpec,tensorflow/tensorflow/python/keras/engine/input_spec.py,34,class,"Specifies the rank, dtype and shape of every input to a layer. Layers can expose (if appropriate) an `input_spec` attribute: an instance of `InputSpec`, or a nested structure of `InputSpec` instances (one per input tensor). These objects enable the layer to run input compatibility checks for input structure, input rank, input shape, and input dtype. A None entry in a shape is compatible with any dimension, a None shape is compatible with any shape. Arguments: dtype: Expected DataType of the input. shape: Shape tuple, expected shape of the input (may include None for unchecked axes). ndim: Integer, expected rank of the input. max_ndim: Integer, maximum rank of the input. min_ndim: Integer, minimum rank of the input. axes: Dictionary mapping integer axes to a specific dimension value." 5512,to_tensor_shape,tensorflow/tensorflow/python/keras/engine/input_spec.py,109,function,"Returns a tf.TensorShape object that matches the shape specifications. If the InputSpec's shape or ndim is defined, this method will return a fully or partially-known shape. Otherwise, the returned TensorShape is None. Args: spec: an InputSpec object. Returns: a tf.TensorShape object" 5513,assert_input_compatibility,tensorflow/tensorflow/python/keras/engine/input_spec.py,132,function,"Checks compatibility between the layer and provided inputs. This checks that the tensor(s) `inputs` verify the input assumptions of a layer (if any). If not, a clear and actional exception gets raised. Arguments: input_spec: An InputSpec instance, list of InputSpec instances, a nested structure of InputSpec instances, or None. inputs: Input tensor, list of input tensors, or a nested structure of input tensors. layer_name: String, name of the layer (for error message formatting). Raises: ValueError: in case of mismatch between the provided inputs and the expectations of the layer." 5514,to_tensor_spec,tensorflow/tensorflow/python/keras/engine/input_spec.py,237,function,Converts a Keras InputSpec object to a TensorSpec. 5515,InputSpecTest,tensorflow/tensorflow/python/keras/engine/input_spec_test.py,25,class, 5516,InputSpecToTensorShapeTest,tensorflow/tensorflow/python/keras/engine/input_spec_test.py,35,class, 5517,enable_keras_tensors,tensorflow/tensorflow/python/keras/engine/keras_tensor.py,36,function,Enable using KerasTensors in Keras's functional API. 5518,disable_keras_tensors,tensorflow/tensorflow/python/keras/engine/keras_tensor.py,42,function,Disable using KerasTensors in Keras's functional API. 5519,keras_tensors_enabled,tensorflow/tensorflow/python/keras/engine/keras_tensor.py,48,function,Return a bool specifying if KerasTensors are enabled. 5520,KerasTensor,tensorflow/tensorflow/python/keras/engine/keras_tensor.py,53,class,"A representation of a Keras in/output during Functional API construction. `KerasTensor`s are tensor-like objects that represent the symbolic inputs and outputs of Keras layers during Functional model construction. They are comprised of the `tf.TypeSpec` of the (Composite)Tensor that will be consumed/produced in the corresponding location of the Functional model. KerasTensors are intended as a private API, so users should never need to directly instantiate `KerasTensor`s. **Building Functional Models with KerasTensors** `tf.keras.Input` produces `KerasTensor`s that represent the symbolic inputs to your model. Passing a `KerasTensor` to a `tf.keras.Layer` `__call__` lets the layer know that you are building a Functional model. The layer __call__ will infer the output signature and return `KerasTensor`s with `tf.TypeSpec`s corresponding to the symbolic outputs of that layer call. These output `KerasTensor`s will have all of the internal KerasHistory metadata attached to them that Keras needs to construct a Functional Model. Currently, layers infer the output signature by: * creating a scratch `FuncGraph` * making placeholders in the scratch graph that match the input typespecs * Calling `layer.call` on these placeholders * extracting the signatures of the outputs before clearing the scratch graph (Note: names assigned to KerasTensors by this process are not guaranteed to be unique, and are subject to implementation details). `tf.nest` methods are used to insure all of the inputs/output data structures get maintained, with elements swapped between KerasTensors and placeholders. In rare cases (such as when directly manipulating shapes using Keras layers), the layer may be able to partially infer the value of of the output in addition to just inferring the signature. When this happens, the returned KerasTensor will also contain the inferred value information. Follow-on layers can use this information. during their own output signature inference. E.g. if one layer produces a symbolic `KerasTensor` that the next layer uses as the shape of its outputs, partially knowing the value helps infer the output shape. **Automatically converting TF APIs to layers**: If you passing a `KerasTensor` to a TF API that supports dispatching, Keras will automatically turn that API call into a lambda layer in the Functional model, and return KerasTensors representing the symbolic outputs. Most TF APIs that take only tensors as input and produce output tensors will support dispatching. Calling a `tf.function` does not support dispatching, so you cannot pass `KerasTensor`s as inputs to a `tf.function`. Higher-order apis that take methods which produce tensors (e.g. `tf.while`, `tf.map_fn`, `tf.cond`) also do not currently support dispatching. So, you cannot directly pass KerasTensors as inputs to these APIs either. If you want to use these APIs inside of a Functional model, you must put them inside of a custom layer. Args: type_spec: The `tf.TypeSpec` for the symbolic input created by `tf.keras.Input`, or symbolically inferred for the output during a symbolic layer `__call__`. inferred_value: (Optional) a non-symbolic static value, possibly partially specified, that could be symbolically inferred for the outputs during a symbolic layer `__call__`. This will generally only happen when grabbing and manipulating `tf.int32` shapes directly as tensors. Statically inferring values in this way and storing them in the KerasTensor allows follow-on layers to infer output signatures more effectively. (e.g. when using a symbolic shape tensor to later construct a tensor with that shape). name: (optional) string name for this KerasTensor. Names automatically generated by symbolic layer `__call__`s are not guaranteed to be unique, and are subject to implementation details." 5521,_KerasTensorIterator,tensorflow/tensorflow/python/keras/engine/keras_tensor.py,341,class,Iterates over the leading dim of a KerasTensor. Performs 0 error checks. 5522,keras_tensor_to_placeholder,tensorflow/tensorflow/python/keras/engine/keras_tensor.py,362,function,Construct a graph placeholder to represent a KerasTensor when tracing. 5523,UserRegisteredSpec,tensorflow/tensorflow/python/keras/engine/keras_tensor.py,413,class,TypeSpec to represent user-registered symbolic objects. 5524,keras_tensor_from_tensor,tensorflow/tensorflow/python/keras/engine/keras_tensor.py,437,function,Convert a traced (composite)tensor to a representative KerasTensor. 5525,KerasTensorTest,tensorflow/tensorflow/python/keras/engine/keras_tensor_test.py,33,class, 5526,Node,tensorflow/tensorflow/python/keras/engine/node.py,38,class,"A `Node` describes the connectivity between two layers. Each time a layer is connected to some new input, a node is added to `layer._inbound_nodes`. Each time the output of a layer is used by another layer, a node is added to `layer._outbound_nodes`. Arguments: layer: The Layer for the Layer.__call__ this node represents. call_args: The positional arguments the Layer was called with. call_kwargs: The keyword arguments the Layer was called with. outputs: The outputs of the Layer.__call__" 5527,KerasHistory,tensorflow/tensorflow/python/keras/engine/node.py,249,class,"Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an `InputLayer`. This allows Keras to track how each Tensor was produced, and this information is later retraced by the `keras.engine.Network` class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Layer is called. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via `nest.flatten`." 5528,is_keras_tensor,tensorflow/tensorflow/python/keras/engine/node.py,274,function, 5529,_serialize_keras_tensor,tensorflow/tensorflow/python/keras/engine/node.py,278,function,Serializes a single Tensor passed to `call`. 5530,DummyTensor,tensorflow/tensorflow/python/keras/engine/node_test.py,27,class, 5531,DummyLayer,tensorflow/tensorflow/python/keras/engine/node_test.py,33,class, 5532,NetworkConstructionTest,tensorflow/tensorflow/python/keras/engine/node_test.py,37,class, 5533,PartialBatchPaddingHandler,tensorflow/tensorflow/python/keras/engine/partial_batch_padding_handler.py,30,class,A container that holds info about partial batches for `predict()`. 5534,Sequential,tensorflow/tensorflow/python/keras/engine/sequential.py,50,class,"`Sequential` groups a linear stack of layers into a `tf.keras.Model`. `Sequential` provides training and inference features on this model. Examples: >>> # Optionally, the first layer can receive an `input_shape` argument: >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(8, input_shape=(16,))) >>> # Afterwards, we do automatic shape inference: >>> model.add(tf.keras.layers.Dense(4)) >>> # This is identical to the following: >>> model = tf.keras.Sequential() >>> model.add(tf.keras.Input(shape=(16,))) >>> model.add(tf.keras.layers.Dense(8)) >>> # Note that you can also omit the `input_shape` argument. >>> # In that case the model doesn't have any weights until the first call >>> # to a training/evaluation method (since it isn't yet built): >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(8)) >>> model.add(tf.keras.layers.Dense(4)) >>> # model.weights not created yet >>> # Whereas if you specify the input shape, the model gets built >>> # continuously as you are adding layers: >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(8, input_shape=(16,))) >>> model.add(tf.keras.layers.Dense(4)) >>> len(model.weights) 4 >>> # When using the delayed-build pattern (no input shape specified), you can >>> # choose to manually build your model by calling >>> # `build(batch_input_shape)`: >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Dense(8)) >>> model.add(tf.keras.layers.Dense(4)) >>> model.build((None, 16)) >>> len(model.weights) 4 ```python # Note that when using the delayed-build pattern (no input shape specified), # the model gets built the first time you call `fit`, `eval`, or `predict`, # or the first time you call the model on some input data. model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(8)) model.add(tf.keras.layers.Dense(1)) model.compile(optimizer='sgd', loss='mse') # This builds the model for the first time: model.fit(x, y, batch_size=32, epochs=10) ```" 5535,_get_shape_tuple,tensorflow/tensorflow/python/keras/engine/sequential.py,518,function, 5536,relax_input_shape,tensorflow/tensorflow/python/keras/engine/sequential.py,527,function, 5537,clear_previously_created_nodes,tensorflow/tensorflow/python/keras/engine/sequential.py,535,function,Remove nodes from `created_nodes` from the layer's inbound_nodes. 5538,track_nodes_created_by_last_call,tensorflow/tensorflow/python/keras/engine/sequential.py,547,function,Adds to `created_nodes` the nodes created by the last call to `layer`. 5539,TestSequential,tensorflow/tensorflow/python/keras/engine/sequential_test.py,37,class,"Most Sequential model API tests are covered in `training_test.py`. " 5540,TestSequentialEagerIntegration,tensorflow/tensorflow/python/keras/engine/sequential_test.py,449,class, 5541,enable_multi_worker,tensorflow/tensorflow/python/keras/engine/training.py,103,function,Decorator that handles running `method` with multi-worker strategy. 5542,disable_multi_worker,tensorflow/tensorflow/python/keras/engine/training.py,123,function,Decorator that disallows multi-worker use of `method`. 5543,inject_functional_model_class,tensorflow/tensorflow/python/keras/engine/training.py,136,function,Inject `Functional` into the hierarchy of this class if needed. 5544,is_functional_model_init_params,tensorflow/tensorflow/python/keras/engine/training.py,152,function, 5545,Model,tensorflow/tensorflow/python/keras/engine/training.py,159,class,"`Model` groups layers into an object with training and inference features. Arguments: inputs: The input(s) of the model: a `keras.Input` object or list of `keras.Input` objects. outputs: The output(s) of the model. See Functional API example below. name: String, the name of the model. There are two ways to instantiate a `Model`: 1 - With the ""Functional API"", where you start from `Input`, you chain layer calls to specify the model's forward pass, and finally you create your model from inputs and outputs: ```python import tensorflow as tf inputs = tf.keras.Input(shape=(3,)) x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` 2 - By subclassing the `Model` class: in that case, you should define your layers in `__init__` and you should implement the model's forward pass in `call`. ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() ``` If you subclass `Model`, you can optionally have a `training` argument (boolean) in `call`, which you can use to specify a different behavior in training and inference: ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.dropout = tf.keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) if training: x = self.dropout(x, training=training) return self.dense2(x) model = MyModel() ``` Once the model is created, you can config the model with losses and metrics with `model.compile()`, train the model with `model.fit()`, or use the model to do prediction with `model.predict()`." 5546,reduce_per_replica,tensorflow/tensorflow/python/keras/engine/training.py,2647,function,"Reduce PerReplica objects. Arguments: values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are returned as-is. strategy: `tf.distribute.Strategy` object. reduction: One of 'first', 'concat'. Returns: Structure of `Tensor`s." 5547,concat,tensorflow/tensorflow/python/keras/engine/training.py,2677,function,Concats `tensor`s along `axis`. 5548,_is_tpu_multi_host,tensorflow/tensorflow/python/keras/engine/training.py,2686,function, 5549,_tpu_multi_host_concat,tensorflow/tensorflow/python/keras/engine/training.py,2691,function,Correctly order TPU PerReplica objects. 5550,_minimize,tensorflow/tensorflow/python/keras/engine/training.py,2706,function,"Minimizes loss for one step by updating `trainable_variables`. This is roughly equivalent to ```python gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) ``` However, this function also applies gradient clipping and loss scaling if the optimizer is a LossScaleOptimizer. Args: strategy: `tf.distribute.Strategy`. tape: A gradient tape. The loss must have been computed under this tape. optimizer: The optimizer used to minimize the loss. loss: The loss tensor. trainable_variables: The variables that will be updated in order to minimize the loss." 5551,_is_scalar,tensorflow/tensorflow/python/keras/engine/training.py,2760,function, 5552,write_scalar_summaries,tensorflow/tensorflow/python/keras/engine/training.py,2764,function, 5553,_minimum_control_deps,tensorflow/tensorflow/python/keras/engine/training.py,2770,function,Returns the minimum control dependencies to ensure step succeeded. 5554,_disallow_inside_tf_function,tensorflow/tensorflow/python/keras/engine/training.py,2782,function, 5555,_is_hdf5_filepath,tensorflow/tensorflow/python/keras/engine/training.py,2794,function, 5556,model_iteration,tensorflow/tensorflow/python/keras/engine/training_arrays.py,46,function,"Loop function for arrays of data with modes TRAIN/TEST/PREDICT. Arguments: model: Keras Model instance. inputs: Either a list or dictionary of arrays, or a dataset instance. targets: List/dictionary of input arrays. sample_weights: Optional list of sample weight arrays. batch_size: Integer batch size or None if unknown. epochs: Number of times to iterate over the data verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of callbacks to be called during training val_inputs: Either a list or dictionary of arrays, or a dataset instance. val_targets: List/dictionary of target arrays. val_sample_weights: Optional list of sample weight arrays. shuffle: Whether to shuffle the data at the beginning of each epoch concatenation of list the display names of the outputs of `f` and the list of display names of the outputs of `f_val`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with the default value of `None`. validation_freq: Only relevant if validation data is provided. Integer or `collections_abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. validation_in_fit: if true, then this method is invoked from within training iteration (for validation). In the case where `val_inputs` is a dataset, this flag indicates that its iterator and feed values are already created so should properly reuse resources. prepared_feed_values_from_dataset: if True, `inputs` is a list of feed tensors returned from `_prepare_feed_values` call on the validation dataset, so do not call it again on `inputs`. Should only be used for inline validation (i.e., only if `validation_in_fit` is also True). steps_name: The string name of the steps argument, either `steps`, `validation_steps`, or `steps_per_epoch`. Only used for error message formatting. **kwargs: Additional arguments for backwards compatibility. Returns: - In TRAIN mode: `History` object. - In TEST mode: Evaluation metrics. - In PREDICT mode: Outputs of the Model called on inputs. Raises: ValueError: in case of invalid arguments." 5557,_get_model_feed,tensorflow/tensorflow/python/keras/engine/training_arrays.py,461,function, 5558,_print_train_info,tensorflow/tensorflow/python/keras/engine/training_arrays.py,470,function, 5559,_get_num_samples_or_steps,tensorflow/tensorflow/python/keras/engine/training_arrays.py,480,function,Returns total number of samples (when training in batch mode) or steps. 5560,_prepare_feed_values,tensorflow/tensorflow/python/keras/engine/training_arrays.py,488,function,"Prepare feed values to the model execution function. Arguments: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. sample_weights: Optional list of sample weight arrays. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. Returns: Feed values for the model in the given mode." 5561,_get_iterator,tensorflow/tensorflow/python/keras/engine/training_arrays.py,538,function, 5562,_reinitialize_iterator,tensorflow/tensorflow/python/keras/engine/training_arrays.py,545,function, 5563,_make_execution_function,tensorflow/tensorflow/python/keras/engine/training_arrays.py,553,function,Makes function to run one step of model execution. 5564,_update_sample_weight_mode,tensorflow/tensorflow/python/keras/engine/training_arrays.py,560,function,Updates the sample_weight_mode of a given model. 5565,ArrayLikeTrainingLoop,tensorflow/tensorflow/python/keras/engine/training_arrays.py,594,class,"TrainingLoop that handle inputs like array. This is the default handler for most of the input data types, includes symbolic tensors or Numpy array-like, Datasets and iterators in graph mode (since they generate symbolic tensors). This Function is used to handle model with `run_eagerly` = False." 5566,ValidationDatasetNoLimitTest,tensorflow/tensorflow/python/keras/engine/training_arrays_test.py,38,class, 5567,PrintTrainingInfoTest,tensorflow/tensorflow/python/keras/engine/training_arrays_test.py,66,class, 5568,BatchCounterCallback,tensorflow/tensorflow/python/keras/engine/training_dataset_test.py,40,class, 5569,TestTrainingWithDataset,tensorflow/tensorflow/python/keras/engine/training_dataset_test.py,53,class, 5570,TestMetricsWithDatasets,tensorflow/tensorflow/python/keras/engine/training_dataset_test.py,526,class, 5571,_per_replica_execution_function,tensorflow/tensorflow/python/keras/engine/training_distributed.py,45,function, 5572,_build_model,tensorflow/tensorflow/python/keras/engine/training_distributed.py,51,function, 5573,_make_train_step_fn,tensorflow/tensorflow/python/keras/engine/training_distributed.py,60,function,"Create step fn. Arguments: model: a Keras Model instance. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. strategy: a `tf.distribute.Strategy` instance. output_labels: the output labels for the step function. Returns: A step function to run by `tf.distribute.Strategy`." 5574,experimental_tpu_fit_loop,tensorflow/tensorflow/python/keras/engine/training_distributed.py,123,function,"Fit loop for training with TPU tf.distribute.Strategy. Arguments: model: Keras Model instance. dataset: Dataset that returns inputs and targets epochs: Number of times to iterate over the data verbose: Integer, Verbosity mode, 0, 1 or 2 callbacks: List of callbacks to be called during training initial_epoch: Epoch at which to start training (useful for resuming a previous training run) steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. val_dataset: Dataset for validation data. validation_steps: Number of steps to run validation for (only if doing validation from data tensors). Ignored with the default value of `None`. validation_freq: Only relevant if validation data is provided. Integer or `collections.abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. Returns: Returns `None`. Raises: ValueError: in case of invalid arguments." 5575,experimental_tpu_test_loop,tensorflow/tensorflow/python/keras/engine/training_distributed.py,293,function,"Test loop for evaluating with TPU tf.distribute.Strategy. Arguments: model: Keras Model instance. dataset: Dataset for input data. verbose: Integer, Verbosity mode 0 or 1. steps: Total number of steps (batches of samples) before declaring predictions finished. Ignored with the default value of `None`. callbacks: List of callbacks to be called during training Returns: Scalar loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the outputs." 5576,experimental_tpu_predict_loop,tensorflow/tensorflow/python/keras/engine/training_distributed.py,423,function,"Predict loop for predicting with TPU tf.distribute.Strategy. Arguments: model: Keras Model instance. dataset: Dataset for input data. verbose: Integer, Verbosity mode 0 or 1. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. callbacks: List of callbacks to be called during training Returns: Array of predictions (if the model has a single output) or list of arrays of predictions (if the model has multiple outputs)." 5577,DistributionSingleWorkerTrainingLoop,tensorflow/tensorflow/python/keras/engine/training_distributed.py,577,class,Training loop for distribution strategy with single worker. 5578,_train_with_multi_worker,tensorflow/tensorflow/python/keras/engine/training_distributed.py,763,function,Decorator that handles multi worker training with distribution strategy. 5579,DistributionMultiWorkerTrainingLoop,tensorflow/tensorflow/python/keras/engine/training_distributed.py,782,class,Training loop for distribution strategy with multiple worker. 5580,_eager_loss_fn,tensorflow/tensorflow/python/keras/engine/training_eager.py,35,function, 5581,_eager_metrics_fn,tensorflow/tensorflow/python/keras/engine/training_eager.py,41,function,"Calculates the metrics for each output of the given model. Arguments: model: The model on which metrics are being calculated. outputs: The outputs of the given model. targets: The predictions or targets of the given model. sample_weights: Optional list of sample weights for each output. masks: Optional list of masks for each output. Returns: Returns the metric results for each output of the model." 5582,_model_loss,tensorflow/tensorflow/python/keras/engine/training_eager.py,84,function,"Calculates the loss for a given model. Arguments: model: The model on which metrics are being calculated. inputs: Either a dictionary of inputs to the model or a list of input arrays. targets: List of target arrays. output_loss_metrics: List of metrics that are used to aggregated output loss values. sample_weights: Optional list of sample weight arrays. training: Whether the model should be run in inference or training mode. Returns: Returns the model output, total loss, loss value calculated using the specified loss function and masks for each output. The total loss includes regularization losses and applies masking and sample weighting to the loss value." 5583,_process_single_batch,tensorflow/tensorflow/python/keras/engine/training_eager.py,221,function,"Calculate the loss and gradient for one input batch. The model weights are updated if training is set to True. Arguments: model: Model whose loss has to be calculated. inputs: List of input arrays. targets: List of target arrays. output_loss_metrics: List of metrics that are used to aggregated output loss values. sample_weights: Optional list of sample weight arrays. training: The boolean represents if the weights of the model are updated. 'fit' methods will set this to True while 'evaluate' methods will set this to False. Returns: output of the model, total loss, the loss and the mask associated with each output. Raises: ValueError: If the model has no loss to optimize." 5584,train_on_batch,tensorflow/tensorflow/python/keras/engine/training_eager.py,285,function,"Calculates the loss and gradient updates for one input batch. Arguments: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': list with a single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified." 5585,test_on_batch,tensorflow/tensorflow/python/keras/engine/training_eager.py,326,function,"Calculates the loss for one input batch. Arguments: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified." 5586,TrainingTest,tensorflow/tensorflow/python/keras/engine/training_eager_test.py,36,class, 5587,CorrectnessTest,tensorflow/tensorflow/python/keras/engine/training_eager_test.py,229,class, 5588,model_iteration,tensorflow/tensorflow/python/keras/engine/training_generator.py,41,function,"Loop function for arrays of data with modes TRAIN/TEST/PREDICT. Arguments: model: Keras Model instance. data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or `(x, y, sample_weights)`) or a generator or `keras.utils.data_utils.Sequence` object or Eager Iterator or Dataset. steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. epochs: Number of times to iterate over the data. verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. Note that the progress bar is not particularly useful when logged to a file, so verbose=2 is recommended when not running interactively (eg, in a production environment). callbacks: List of callbacks to be called during training. validation_data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or `(x, y, sample_weights)`) or a generator or `keras.utils.data_utils.Sequence` object or Eager Iterator or Dataset. validation_steps: Total number of steps (batches of samples) before declaring validation finished. validation_freq: Only relevant if validation data is provided. Integer or `collections.abc.Container` instance (e.g. list, tuple, etc.). If an integer, specifies how many training epochs to run before a new validation run is performed, e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs validation at the end of the 1st, 2nd, and 10th epochs. class_weight: Dictionary mapping class indices to a weight for the class. max_queue_size: Integer. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. shuffle: Boolean. Whether to shuffle the order of the batches at the beginning of each epoch. Only used with instances of `Sequence` (`keras.utils.Sequence`). Has no effect when `steps_per_epoch` is not `None`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run). mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. batch_size: Integer batch size or None if unknown. Will only be used if `data` is in NumPy/Tensor format. steps_name: The string name of the steps argument, either `steps`, `validation_steps`, or `steps_per_epoch`. Only used for error message formatting. **kwargs: Additional arguments for backwards compatibility. `steps` is accepted as an alias for `steps_per_epoch`. Returns: - In TRAIN mode: `History` object. - In TEST mode: Evaluation metrics. - In PREDICT mode: Outputs of the Model called on inputs. Raises: ValueError: in case of invalid arguments." 5589,_get_next_batch,tensorflow/tensorflow/python/keras/engine/training_generator.py,347,function,Retrieves the next batch of input data. 5590,_validate_arguments,tensorflow/tensorflow/python/keras/engine/training_generator.py,365,function,"Raises errors if arguments are invalid. Arguments: is_sequence: Boolean, whether data is a `keras.utils.data_utils.Sequence` instance. is_dataset: Boolean, whether data is a dataset instance. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. Ignored with the default value of `None`. validation_data: Either a tuple of NumPy/Tensor inputs (i.e. `(x,)` or `(x, y)` or `(x, y, sample_weights)`) or a generator or `keras.utils.data_utils.Sequence` object or Eager Iterator or Dataset. validation_steps: Total number of steps (batches of samples) before declaring validation finished. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. kwargs: Additional arguments for backwards compatibility. Raises: ValueError: If `steps_per_epoch` or `validation_steps` are not passed for data types that require them, or if unrecognized keyword arguments are passed." 5591,convert_to_generator_like,tensorflow/tensorflow/python/keras/engine/training_generator.py,422,function,"Make a generator out of NumPy or EagerTensor inputs. Arguments: data: Either a generator or `keras.utils.data_utils.Sequence` object or `Dataset`, `Iterator`, or a {1,2,3}-tuple of NumPy arrays or EagerTensors. If a tuple, the elements represent `(x, y, sample_weights)` and may be `None` or `[None]`. batch_size: Used when creating a generator out of tuples of NumPy arrays or EagerTensors. steps_per_epoch: Steps of the generator to run each epoch. If `None` the number of steps will be read from the data (for `keras.utils.data_utils.Sequence` types). epochs: Total number of epochs to run. shuffle: Whether the data should be shuffled. Returns: - Generator, `keras.utils.data_utils.Sequence`, or `Iterator`. Raises: - ValueError: If `batch_size` is not provided for NumPy or EagerTensor inputs." 5592,_make_enqueued_generator,tensorflow/tensorflow/python/keras/engine/training_generator.py,487,function,Create a buffered queue of next elements of the generator. 5593,_make_execution_function,tensorflow/tensorflow/python/keras/engine/training_generator.py,512,function,Makes function to run one step of model execution. 5594,_get_num_samples_or_steps,tensorflow/tensorflow/python/keras/engine/training_generator.py,533,function,"Returns number of samples or steps, and whether to use steps count mode." 5595,GeneratorOrSequenceTrainingLoop,tensorflow/tensorflow/python/keras/engine/training_generator.py,541,class,"Generator-like. Input is Python generator, or Sequence object. The difference between this class and `GeneratorLikeTrainingFunction` is that this class only handles inputs that with x, y and sample_weight fused into one param." 5596,EagerDatasetOrIteratorTrainingLoop,tensorflow/tensorflow/python/keras/engine/training_generator.py,638,class,A non-distributed Dataset or iterator in eager execution. 5597,GeneratorLikeTrainingLoop,tensorflow/tensorflow/python/keras/engine/training_generator.py,711,class,"TrainingLoop that handle inputs like python generator. This is the default handler for most of the input data types, includes symbolic tensors or Numpy array-like, Datasets and iterators in graph mode (since they generate symbolic tensors). This Function is used to handle model with `run_eagerly` = True." 5598,custom_generator,tensorflow/tensorflow/python/keras/engine/training_generator_test.py,44,function, 5599,custom_generator_changing_batch_size,tensorflow/tensorflow/python/keras/engine/training_generator_test.py,67,function, 5600,TestGeneratorMethods,tensorflow/tensorflow/python/keras/engine/training_generator_test.py,95,class, 5601,TestGeneratorMethodsWithSequences,tensorflow/tensorflow/python/keras/engine/training_generator_test.py,383,class, 5602,TestConvertToGeneratorLike,tensorflow/tensorflow/python/keras/engine/training_generator_test.py,489,class, 5603,TrainingGPUTest,tensorflow/tensorflow/python/keras/engine/training_gpu_test.py,32,class, 5604,_conv2d_filter,tensorflow/tensorflow/python/keras/engine/training_integration_test.py,37,function,Convolution with non-default strides and dilation rate is not supported. 5605,_gather_test_cases,tensorflow/tensorflow/python/keras/engine/training_integration_test.py,104,function, 5606,CoreLayerIntegrationTest,tensorflow/tensorflow/python/keras/engine/training_integration_test.py,122,class,Test that layers and models produce the correct tensor types. 5607,TrainingTest,tensorflow/tensorflow/python/keras/engine/training_test.py,70,class, 5608,TestExceptionsAndWarnings,tensorflow/tensorflow/python/keras/engine/training_test.py,1622,class, 5609,LossWeightingTest,tensorflow/tensorflow/python/keras/engine/training_test.py,1695,class, 5610,MaskingTest,tensorflow/tensorflow/python/keras/engine/training_test.py,1981,class, 5611,TestDynamicTrainability,tensorflow/tensorflow/python/keras/engine/training_test.py,2044,class, 5612,TestTrainingWithDataTensors,tensorflow/tensorflow/python/keras/engine/training_test.py,2250,class, 5613,TestTrainingWithMetrics,tensorflow/tensorflow/python/keras/engine/training_test.py,2768,class,Training tests related to metrics. 5614,BareUpdateLayer,tensorflow/tensorflow/python/keras/engine/training_test.py,3431,class, 5615,LambdaUpdateLayer,tensorflow/tensorflow/python/keras/engine/training_test.py,3446,class, 5616,NestedUpdateLayer,tensorflow/tensorflow/python/keras/engine/training_test.py,3462,class, 5617,SubgraphUpdateLayer,tensorflow/tensorflow/python/keras/engine/training_test.py,3476,class, 5618,TestAutoUpdates,tensorflow/tensorflow/python/keras/engine/training_test.py,3496,class, 5619,TestFunctionTracing,tensorflow/tensorflow/python/keras/engine/training_test.py,3586,class, 5620,Aggregator,tensorflow/tensorflow/python/keras/engine/training_utils.py,65,class,"Abstract base class used to aggregate batch-level outputs of a loop. Attributes: use_steps: Whether the loop is using `step` or `batch_size`. num_samples: Total number of samples: `batch_size * num_batches`. steps: Total number of steps. batch_size: Batch size. It is used for validation checks between inputs and outputs. results: What to return at the end of the aggregation loop." 5621,MetricsAggregator,tensorflow/tensorflow/python/keras/engine/training_utils.py,112,class,"Aggregator that calculates loss and metrics info. Attributes: use_steps: Whether the loop is using `step` or `batch_size`. num_samples: Total number of samples: `batch_size*num_batches`. steps: Total number of steps, ie number of times to iterate over a dataset to cover all samples." 5622,ConcatAggregator,tensorflow/tensorflow/python/keras/engine/training_utils.py,147,class,"Combine tensor-likes which cannot be merged on the fly. This class expects to aggregate a single tensor-like rather than a nested structure of tensor-likes." 5623,get_copy_pool,tensorflow/tensorflow/python/keras/engine/training_utils.py,198,function,"Shared threadpool for copying arrays. Pool instantiation takes ~ 2ms, so a singleton pool is used rather than creating a pool per SliceAggregator. Returns: The global copy threadpool." 5624,SliceAggregator,tensorflow/tensorflow/python/keras/engine/training_utils.py,214,class,"Combine arrays where the final size is known. This class expects to aggregate a single tensor-like rather than a nested structure of tensor-likes. NumPy copies are an operation that threads handle quite well because all of the heavy lifting is in c and does not need the GIL. Moreover, we can perform lock-free writes to the same buffer in multiple threads because the nature of result aggregation guarantees that either the indices are disjoint or the aggregator will throw an exception in finalize. Moreover, because aggregation is performed on the slowest varying dimension, assignments for a given batch will write to contiguous blocks of memory, further minimizing contention. There is, however, some scheduling and context switching overhead which will offset the gains from pipelining the slice assignment. Below a given threshold it is faster to simply assign in the main thread rather than enqueue the assignment in a side thread. The exact threshold will vary from system to system, but the time is not very sensitive to the exact transition so a value of 2 ** 14 was chosen which should be reasonable on most systems." 5625,OutputsAggregator,tensorflow/tensorflow/python/keras/engine/training_utils.py,312,class,Aggregator that concatenates outputs. 5626,get_progbar,tensorflow/tensorflow/python/keras/engine/training_utils.py,355,function,Get Progbar. 5627,slice_arrays,tensorflow/tensorflow/python/keras/engine/training_utils.py,366,function,"Slices batches out of provided arrays (workaround for eager tensors). Unfortunately eager tensors don't have the same slicing behavior as Numpy arrays (they follow the same slicing behavior as symbolic TF tensors), hence we cannot use `generic_utils.slice_arrays` directly and we have to implement this workaround based on `concat`. This has a performance cost. Arguments: arrays: Single array or list of arrays. indices: List of indices in the array that should be included in the output batch. contiguous: Boolean flag indicating whether the indices are contiguous. Returns: Slice of data (either single array or list of arrays)." 5628,check_num_samples,tensorflow/tensorflow/python/keras/engine/training_utils.py,402,function,"Determine the number of samples provided for training and evaluation. The number of samples is not defined when running with `steps`, in which case the number of samples is set to `None`. Arguments: ins: List of tensors to be fed to the Keras function. batch_size: Integer batch size or `None` if not defined. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. steps_name: The public API's parameter name for `steps`. Raises: ValueError: when `steps` is `None` and the attribute `ins.shape` does not exist. Also raises ValueError when `steps` is not `None` and `batch_size` is not `None` because they are mutually exclusive. Returns: When steps is `None`, returns the number of samples to be processed based on the size of the first dimension of the first input numpy array. When steps is not `None` and `batch_size` is `None`, returns `None`." 5629,standardize_single_array,tensorflow/tensorflow/python/keras/engine/training_utils.py,438,function,"Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1." 5630,standardize_input_data,tensorflow/tensorflow/python/keras/engine/training_utils.py,459,function,"Normalizes inputs and targets provided by users. Users may pass data as a list of arrays, dictionary of arrays, or as a single array. We normalize this to an ordered list of arrays (same order as `names`), while checking that the provided arrays have shapes that match the network's expectations. Arguments: data: User-provided input data (polymorphic). names: List of expected array names. shapes: Optional list of expected array shapes. check_batch_axis: Boolean; whether to check that the batch axis of the arrays matches the expected value found in `shapes`. exception_prefix: String prefix used for exception formatting. Returns: List of standardized input arrays (one array per model input). Raises: ValueError: in case of improperly formatted user-provided data." 5631,standardize_sample_or_class_weights,tensorflow/tensorflow/python/keras/engine/training_utils.py,585,function,"Maps `sample_weight` or `class_weight` to model outputs. Arguments: x_weight: User-provided `sample_weight` or `class_weight` argument. output_names: List of output names (strings) in the model. weight_type: A string used purely for exception printing. Returns: A list of `sample_weight` or `class_weight` where there are exactly one element per model output. Raises: ValueError: In case of invalid user-provided argument." 5632,standardize_class_weights,tensorflow/tensorflow/python/keras/engine/training_utils.py,631,function, 5633,standardize_sample_weights,tensorflow/tensorflow/python/keras/engine/training_utils.py,636,function, 5634,handle_partial_sample_weights,tensorflow/tensorflow/python/keras/engine/training_utils.py,641,function,"Adds 1.0 as sample weights for the outputs for which there is no weight. Args: outputs: List of model outputs. sample_weights: List of sample weight inputs. sample_weight_modes: List of sample weight modes or None. check_all_flat: Ensure that inputs are not nested structures. This is not a free check, so we may not want to run it eagerly every iteration. Returns: Tuple of sample weights, one sample weight for every output, and booleans describing the raw sample weights." 5635,check_array_lengths,tensorflow/tensorflow/python/keras/engine/training_utils.py,700,function,"Does user input validation for numpy arrays. Arguments: inputs: list of Numpy arrays of inputs. targets: list of Numpy arrays of targets. weights: list of Numpy arrays of sample weights. Raises: ValueError: in case of incorrectly formatted data." 5636,check_loss_and_target_compatibility,tensorflow/tensorflow/python/keras/engine/training_utils.py,755,function,"Does validation on the compatibility of targets and loss functions. This helps prevent users from using loss functions incorrectly. This check is purely for UX purposes. Arguments: targets: list of Numpy arrays of targets. loss_fns: list of loss functions. output_shapes: list of shapes of model outputs. Raises: ValueError: if a loss function or target array is incompatible with an output." 5637,collect_per_output_metric_info,tensorflow/tensorflow/python/keras/engine/training_utils.py,814,function,"Maps metric names and functions to model outputs. Arguments: metrics: a list or a list of lists or a dict of metric functions. output_names: a list of the names (strings) of model outputs. output_shapes: a list of the shapes (strings) of model outputs. loss_fns: a list of the loss functions corresponding to the model outputs. is_weighted: Boolean indicating whether the given metrics are weighted. Returns: A list (one entry per model output) of dicts. For instance, if the model has 2 outputs, and for the first output we want to compute ""binary_accuracy"" and ""binary_crossentropy"", and just ""binary_accuracy"" for the second output, the list would look like: `[{ 'acc': binary_accuracy(), 'ce': binary_crossentropy(), }, { 'acc': binary_accuracy(), }]` Raises: TypeError: if an incorrect type is passed for the `metrics` argument." 5638,batch_shuffle,tensorflow/tensorflow/python/keras/engine/training_utils.py,893,function,"Shuffles an array in a batch-wise fashion. Useful for shuffling HDF5 arrays (where one cannot access arbitrary indices). Arguments: index_array: array of indices to be shuffled. batch_size: integer. Returns: The `index_array` array, shuffled in a batch-wise fashion." 5639,standardize_weights,tensorflow/tensorflow/python/keras/engine/training_utils.py,917,function,"Performs sample weight validation and standardization. Everything gets normalized to a single sample-wise (or timestep-wise) weight array. If both `sample_weight` and `class_weight` are provided, the weights are multiplied. Arguments: y: Numpy array or Tensor of model targets to be weighted. sample_weight: User-provided `sample_weight` argument. class_weight: User-provided `class_weight` argument. sample_weight_mode: One of `None` or `""temporal""`. `""temporal""` indicated that we expect 2D weight data that will be applied to the last 2 dimensions of the targets (i.e. we are weighting timesteps, not samples). Returns: A numpy array of target weights, one entry per sample to weight. Raises: ValueError: In case of invalid user-provided arguments." 5640,has_symbolic_tensors,tensorflow/tensorflow/python/keras/engine/training_utils.py,1044,function, 5641,has_tensors,tensorflow/tensorflow/python/keras/engine/training_utils.py,1050,function,Returns true if `ls` contains tensors. 5642,get_metric_name,tensorflow/tensorflow/python/keras/engine/training_utils.py,1068,function,"Returns the name corresponding to the given metric input. Arguments: metric: Metric function name or reference. weighted: Boolean indicating if the given metric is weighted. Returns: The metric name." 5643,get_metric_function,tensorflow/tensorflow/python/keras/engine/training_utils.py,1103,function,"Returns the metric function corresponding to the given metric input. Arguments: metric: Metric function name or reference. output_shape: The shape of the output that this metric will be calculated for. loss_fn: The loss function used. Returns: The metric function." 5644,call_metric_function,tensorflow/tensorflow/python/keras/engine/training_utils.py,1145,function,Invokes metric function and returns the metric result tensor. 5645,get_loss_function,tensorflow/tensorflow/python/keras/engine/training_utils.py,1169,function,Returns the loss corresponding to the loss input in `compile` API. 5646,RespectCompiledTrainableState,tensorflow/tensorflow/python/keras/engine/training_utils.py,1201,class,"Set and restore trainable state if it has changed since compile. The keras API guarantees that the value of each Layer's `trainable` property at `Model.compile` time will be used when training that model. In order to respect this requirement, it may be necessary to set the trainable value of layers to their compile time values before beginning a training endpoint and restore the values before returing from said endpoint. This scope checks if any layer's trainable state has changed since Model compile, and performs this set and un-set bookkeeping. However, the trainable state of a layer changes quite infrequently, if ever, for many kinds of workflows. Moreover, updating every layer in a model is an expensive operation. As a result, we will only explicitly set and unset the trainable state of a model if a trainable value has changed since compile." 5647,validate_dataset_input,tensorflow/tensorflow/python/keras/engine/training_utils.py,1247,function,"Validates user input arguments when a dataset iterator is passed. Arguments: x: Input data. A `tf.data` dataset or iterator. y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s). Expected to be `None` when `x` is a dataset iterator. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. Expected to be `None` when `x` is a dataset iterator validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. Expected to be `None` when `x` is a dataset iterator. Raises: ValueError: if argument `y` or `sample_weight` or `validation_split` are provided by user." 5648,validate_input_types,tensorflow/tensorflow/python/keras/engine/training_utils.py,1285,function,Helper function to validate either inputs or targets. 5649,check_generator_arguments,tensorflow/tensorflow/python/keras/engine/training_utils.py,1303,function,Validates arguments passed when using a generator. 5650,check_steps_argument,tensorflow/tensorflow/python/keras/engine/training_utils.py,1319,function,"Validates `steps` argument based on input data's type. The cases when `steps` value must be provided are when 1. input data passed is an iterator. 2. model was built on top of symbolic tensors, input data is not required and is `None`. 3. input data passed is a symbolic tensor. Arguments: input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or tf.data.Dataset iterator or `None`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. steps_name: The public API's parameter name for `steps`. Returns: boolean, True if `steps` argument is required, else False. Raises: ValueError: if `steps` argument is required for given input data type but not provided." 5651,cast_single_tensor,tensorflow/tensorflow/python/keras/engine/training_utils.py,1367,function, 5652,cast_if_floating_dtype_and_mismatch,tensorflow/tensorflow/python/keras/engine/training_utils.py,1376,function,"Returns target data tensors using correct datatype. Checks that each target and output pair are the same datatype. If not, casts the target to the output's datatype. Args: targets: tensor or list of targets. outputs: tensor or list of outputs. Returns: Targets in appropriate datatype." 5653,cast_if_floating_dtype,tensorflow/tensorflow/python/keras/engine/training_utils.py,1403,function,"Casts the given data tensors to the default floating point type. Casts only if the input is already a floating point type. Args: x: tensor or list/tuple of tensors. dtype: The dtype to which Tensors should be cast. Returns: Converted input." 5654,cast_to_model_input_dtypes,tensorflow/tensorflow/python/keras/engine/training_utils.py,1418,function,"Casts the given data tensors to the dtypes of the model inputs. Args: x: tensor or list/tuple of tensors. model: The model. Returns: Converted input. Each tensor is casted to the corresponding input in `model.inputs`." 5655,prepare_sample_weight_modes,tensorflow/tensorflow/python/keras/engine/training_utils.py,1433,function,"Prepares sample weight modes for the model. Args: training_endpoints: List of model _TrainingEndpoints. sample_weight_mode: sample weight mode user input passed from compile API. Raises: ValueError: In case of invalid `sample_weight_mode` input." 5656,prepare_loss_functions,tensorflow/tensorflow/python/keras/engine/training_utils.py,1473,function,"Converts loss to a list of loss functions. Arguments: loss: String (name of objective function), objective function or `tf.losses.Loss` instance. See `tf.losses`. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. output_names: List of model output names. Returns: A list of loss objective functions. Raises: ValueError: If loss is a dict with keys not in model output names, or if loss is a list with len not equal to model outputs." 5657,prepare_loss_weights,tensorflow/tensorflow/python/keras/engine/training_utils.py,1515,function,"Converts loss weights to a list of loss weights. The result loss weights will be populated on the training endpoint. Arguments: training_endpoints: List of model training endpoints. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a dict, it is expected to map output names (strings) to scalar coefficients. Raises: ValueError: If loss weight is a dict with key not in model output names, or if loss is a list with len not equal to model outputs." 5658,is_feature_layer,tensorflow/tensorflow/python/keras/engine/training_utils.py,1559,function,Returns whether `layer` is a FeatureLayer or not. 5659,is_eager_dataset_or_iterator,tensorflow/tensorflow/python/keras/engine/training_utils.py,1564,function, 5660,assert_not_batched,tensorflow/tensorflow/python/keras/engine/training_utils.py,1571,function,"Asserts that `dataset` is not batched. The algorithm used by this method is sound but not complete. In other words, if the method fails to establish the assertion, it does not mean the dataset is batched. Example usage: ```python try: assert_not_batched(dataset) # safe to assume `dataset` it not batched here expect ValueError: # make no assumptions about `dataset` ``` Args: dataset: The dataset to analyze. Raises: ValueError: If the method cannot establish the assertion." 5661,assert_not_shuffled,tensorflow/tensorflow/python/keras/engine/training_utils.py,1626,function,"Asserts that `dataset` is not shuffled. The algorithm used by this method is sound but not complete. In other words, if the method fails to establish the assertion, it does not mean the dataset is shuffled. Example usage: ```python try: assert_not_shuffled(dataset) # safe to assume `dataset` it not shuffled here expect ValueError: # make no assumptions about `dataset` ``` Args: dataset: The dataset to analyze. Raises: ValueError: If the method cannot establish the assertion." 5662,verify_dataset_shuffled,tensorflow/tensorflow/python/keras/engine/training_utils.py,1682,function,"Verifies that the dataset is shuffled. Args: x: Dataset passed as an input to the model. Raises: ValueError: if the dataset is not already shuffled." 5663,is_dataset_or_iterator,tensorflow/tensorflow/python/keras/engine/training_utils.py,1702,function, 5664,get_iterator,tensorflow/tensorflow/python/keras/engine/training_utils.py,1707,function,Create and initialize an iterator from a dataset. 5665,initialize_iterator,tensorflow/tensorflow/python/keras/engine/training_utils.py,1717,function, 5666,extract_tensors_from_dataset,tensorflow/tensorflow/python/keras/engine/training_utils.py,1723,function,"Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. Arguments: dataset: Dataset instance. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None." 5667,unpack_iterator_input,tensorflow/tensorflow/python/keras/engine/training_utils.py,1737,function,"Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. Arguments: iterator: Instance of a dataset iterator. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None." 5668,infer_steps_for_dataset,tensorflow/tensorflow/python/keras/engine/training_utils.py,1771,function,"Infers steps_per_epoch needed to loop through a dataset. Arguments: model: Keras model instance. dataset: Input data of type tf.data.Dataset. steps: Number of steps to draw from the dataset (may be None if unknown). epochs: Number of times to iterate over the dataset. steps_name: The string name of the steps argument, either `steps`, `validation_steps`, or `steps_per_epoch`. Only used for error message formatting. Returns: Integer or `None`. Inferred number of steps to loop through the dataset. `None` is returned if 1) the size of the dataset is unknown and `steps` was not specified, or 2) this is multi-worker training and auto sharding is enabled. Raises: ValueError: In case of invalid argument values." 5669,ModelInputs,tensorflow/tensorflow/python/keras/engine/training_utils.py,1829,class,"Encapsulates model inputs. Allows for transforming model inputs while keeping the same structure." 5670,get_input_shape_and_dtype,tensorflow/tensorflow/python/keras/engine/training_utils.py,1910,function,"Retrieves input shape and input dtype of layer if applicable. Args: layer: Layer (or model) instance. Returns: Tuple (input_shape, input_dtype). Both could be None if the layer does not have a defined input shape. Raises: ValueError: in case an empty Sequential or Functional model is passed." 5671,get_static_batch_size,tensorflow/tensorflow/python/keras/engine/training_utils.py,1944,function,"Gets the static batch size of a Layer. Arguments: layer: a `Layer` instance. Returns: The static batch size of a Layer." 5672,generic_output_names,tensorflow/tensorflow/python/keras/engine/training_utils.py,1959,function, 5673,convert_eager_tensors_to_numpy,tensorflow/tensorflow/python/keras/engine/training_utils.py,1963,function,"Convert every EagerTensor in `structure` to NumPy. Arguments: structure: An arbitrary structure of elements to be converted to NumPy arrays. Returns: An identical structure with EagerTensors converted to NumPy arrays." 5674,list_to_tuple,tensorflow/tensorflow/python/keras/engine/training_utils.py,1982,function,"Datasets will stack the list of tensor, so switch them to tuples." 5675,should_run_validation,tensorflow/tensorflow/python/keras/engine/training_utils.py,1989,function,"Checks if validation should be run this epoch. Arguments: validation_freq: Integer or list. If an integer, specifies how many training epochs to run before a new validation run is performed. If a list, specifies the epochs on which to run validation. epoch: Integer, the number of the training epoch just completed. Returns: Bool, True if validation should be run. Raises: ValueError: if `validation_freq` is an Integer and less than 1, or if it is neither an Integer nor a Sequence." 5676,split_training_and_validation_data,tensorflow/tensorflow/python/keras/engine/training_utils.py,2019,function,Split input data into train/eval section based on validation_split. 5677,unpack_validation_data,tensorflow/tensorflow/python/keras/engine/training_utils.py,2042,function,"Unpack validation data based input type. The validation data is not touched if its dataset or dataset iterator. For other type of input (Numpy or tensor), it will be unpacked into tuple of 3 which is x, y and sample weights. Args: validation_data: dataset, dataset iterator, or numpy, tensor tuple. raise_if_ambiguous: boolean on whether to fail if validation_data cannot be parsed. Otherwise simply return validation_data, None, None and defer the decision to the caller. Returns: tuple of 3, (x, y, sample_weights) for numpy and tensor input." 5678,TrainingLoop,tensorflow/tensorflow/python/keras/engine/training_utils.py,2090,class,"TrainingLoop is a wrapper class around the training logic. This class is trying to encapsulate the different logic of fit/eval/predict with regard to different data input and model condition. Note that TrainingLoop is stateless, which means it doesn't contain any internal field and can be reused with different model and inputs." 5679,ModelInputsTest,tensorflow/tensorflow/python/keras/engine/training_utils_test.py,44,class, 5680,DatasetUtilsTest,tensorflow/tensorflow/python/keras/engine/training_utils_test.py,134,class, 5681,StandardizeWeightsTest,tensorflow/tensorflow/python/keras/engine/training_utils_test.py,244,class, 5682,MonitoredPool,tensorflow/tensorflow/python/keras/engine/training_utils_test.py,287,class, 5683,add_sleep,tensorflow/tensorflow/python/keras/engine/training_utils_test.py,301,function, 5684,cause_error,tensorflow/tensorflow/python/keras/engine/training_utils_test.py,309,function, 5685,AggregationTest,tensorflow/tensorflow/python/keras/engine/training_utils_test.py,326,class, 5686,Model,tensorflow/tensorflow/python/keras/engine/training_v1.py,79,class,"`Model` groups layers into an object with training and inference features. There are two ways to instantiate a `Model`: 1 - With the ""functional API"", where you start from `Input`, you chain layer calls to specify the model's forward pass, and finally you create your model from inputs and outputs: ```python import tensorflow as tf inputs = tf.keras.Input(shape=(3,)) x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` 2 - By subclassing the `Model` class: in that case, you should define your layers in `__init__` and you should implement the model's forward pass in `call`. ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() ``` If you subclass `Model`, you can optionally have a `training` argument (boolean) in `call`, which you can use to specify a different behavior in training and inference: ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.dropout = tf.keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) if training: x = self.dropout(x, training=training) return self.dense2(x) model = MyModel() ```" 5687,DistributedCallbackModel,tensorflow/tensorflow/python/keras/engine/training_v1.py,2834,class,Model that is used for callbacks with tf.distribute.Strategy. 5688,_TrainingEndpoint,tensorflow/tensorflow/python/keras/engine/training_v1.py,2874,class,"A container for the training output/target and related entities. In the case of model with multiple outputs, there is a one-to-one mapping between model output (y_pred), model target (y_true), loss, metrics etc. By unifying these entities into one class, different entity can access information between each other, rather than currently access different list of attributes of the model." 5689,_TrainingTarget,tensorflow/tensorflow/python/keras/engine/training_v1.py,3106,class,"Container for a target tensor (y_true) and its metadata (shape, loss...). Arguments: target: A target tensor for the model. It may be `None` if the output is excluded from loss computation. It is still kept as None since each output of the model should have a corresponding target. If the target is None, the rest of the attributes will be None as well. feedable: Boolean, whether the target is feedable (requires data to be passed in `fit` or `train_on_batch`), or not (model compiled with `target_tensors` argument). skip_target_weights: Boolean, whether the target should be skipped during weights calculation." 5690,_is_symbolic_tensor,tensorflow/tensorflow/python/keras/engine/training_v1.py,3139,function, 5691,_convert_scipy_sparse_tensor,tensorflow/tensorflow/python/keras/engine/training_v1.py,3143,function,"Handle scipy sparse tensor conversions. This method takes a value 'value' and returns the proper conversion. If value is a scipy sparse tensor and the expected input is a dense tensor, we densify 'value'. If value is a scipy sparse tensor and the expected input is a TF SparseTensor, we convert 'value' to a SparseTensor. If 'value' is not a scipy sparse tensor, or scipy is not imported, we pass it through unchanged. Arguments: value: An object that may be a scipy sparse tensor expected_input: The expected input placeholder. Returns: The possibly-converted 'value'." 5692,_get_metrics_from_layers,tensorflow/tensorflow/python/keras/engine/training_v1.py,3179,function,"Returns list of metrics from the given layers. This will not include the `compile` metrics of a model layer. Arguments: layers: List of layers. Returns: List of metrics." 5693,_non_none_constant_value,tensorflow/tensorflow/python/keras/engine/training_v1.py,3203,function, 5694,model_to_estimator,tensorflow/tensorflow/python/keras/estimator/__init__.py,35,function,"Constructs an `Estimator` instance from given keras model. If you use infrastructure or other tooling that relies on Estimators, you can still build a Keras model and use model_to_estimator to convert the Keras model to an Estimator for use with downstream systems. For usage example, please see: [Creating estimators from Keras Models]( https://www.tensorflow.org/guide/estimators#creating_estimators_from_keras_models). Sample Weights: Estimators returned by `model_to_estimator` are configured so that they can handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample weights when training or evaluating the Estimator, the first item returned by the input function should be a dictionary with keys `features` and `sample_weights`. Example below: ```python keras_model = tf.keras.Model(...) keras_model.compile(...) estimator = tf.keras.estimator.model_to_estimator(keras_model) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. Estimator's `model_fn` uses the structure of the model to clone the model. Defaults to `None`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. Defaults to `None`. custom_objects: Dictionary for cloning customized objects. This is used with classes that is not part of this pip package. For example, if user maintains a `relu6` class that inherits from `tf.keras.layers.Layer`, then pass `custom_objects={'relu6': relu6}`. Defaults to `None`. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. If unset a directory will be created with `tempfile.mkdtemp` config: `RunConfig` to config `Estimator`. Allows setting up things in `model_fn` based on configuration such as `num_ps_replicas`, or `model_dir`. Defaults to `None`. If both `config.model_dir` and the `model_dir` argument (above) are specified the `model_dir` **argument** takes precedence. checkpoint_format: Sets the format of the checkpoint saved by the estimator when training. May be `saver` or `checkpoint`, depending on whether to save checkpoints from `tf.train.Saver` or `tf.train.Checkpoint`. This argument currently defaults to `saver`. When 2.0 is released, the default will be `checkpoint`. Estimators use name-based `tf.train.Saver` checkpoints, while Keras models use object-based checkpoints from `tf.train.Checkpoint`. Currently, saving object-based checkpoints from `model_to_estimator` is only supported by Functional and Sequential models. Defaults to 'saver'. Returns: An Estimator from given keras model. Raises: ValueError: If neither keras_model nor keras_model_path was given. ValueError: If both keras_model and keras_model_path was given. ValueError: If the keras_model_path is a GCS URI. ValueError: If keras_model has not been compiled. ValueError: If an invalid checkpoint_format was given." 5695,model_to_estimator_v2,tensorflow/tensorflow/python/keras/estimator/__init__.py,133,function,"Constructs an `Estimator` instance from given keras model. If you use infrastructure or other tooling that relies on Estimators, you can still build a Keras model and use model_to_estimator to convert the Keras model to an Estimator for use with downstream systems. For usage example, please see: [Creating estimators from Keras Models]( https://www.tensorflow.org/guide/estimators#creating_estimators_from_keras_models). Sample Weights: Estimators returned by `model_to_estimator` are configured so that they can handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`). To pass sample weights when training or evaluating the Estimator, the first item returned by the input function should be a dictionary with keys `features` and `sample_weights`. Example below: ```python keras_model = tf.keras.Model(...) keras_model.compile(...) estimator = tf.keras.estimator.model_to_estimator(keras_model) def input_fn(): return dataset_ops.Dataset.from_tensors( ({'features': features, 'sample_weights': sample_weights}, targets)) estimator.train(input_fn, steps=1) ``` Note: We do not support creating weighted metrics in Keras and converting them to weighted metrics in the Estimator API using `model_to_estimator`. You will have to create these metrics directly on the estimator spec using the `add_metrics` function. To customize the estimator `eval_metric_ops` names, you can pass in the `metric_names_map` dictionary mapping the keras model output metric names to the custom names as follows: ```python input_a = tf.keras.layers.Input(shape=(16,), name='input_a') input_b = tf.keras.layers.Input(shape=(16,), name='input_b') dense = tf.keras.layers.Dense(8, name='dense_1') interm_a = dense(input_a) interm_b = dense(input_b) merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge') output_a = tf.keras.layers.Dense(3, activation='softmax', name='dense_2')( merged) output_b = tf.keras.layers.Dense(2, activation='softmax', name='dense_3')( merged) keras_model = tf.keras.models.Model( inputs=[input_a, input_b], outputs=[output_a, output_b]) keras_model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics={ 'dense_2': 'categorical_accuracy', 'dense_3': 'categorical_accuracy' }) metric_names_map = { 'dense_2_categorical_accuracy': 'acc_1', 'dense_3_categorical_accuracy': 'acc_2', } keras_est = tf.keras.estimator.model_to_estimator( keras_model=keras_model, config=config, metric_names_map=metric_names_map) ``` Args: keras_model: A compiled Keras model object. This argument is mutually exclusive with `keras_model_path`. Estimator's `model_fn` uses the structure of the model to clone the model. Defaults to `None`. keras_model_path: Path to a compiled Keras model saved on disk, in HDF5 format, which can be generated with the `save()` method of a Keras model. This argument is mutually exclusive with `keras_model`. Defaults to `None`. custom_objects: Dictionary for cloning customized objects. This is used with classes that is not part of this pip package. For example, if user maintains a `relu6` class that inherits from `tf.keras.layers.Layer`, then pass `custom_objects={'relu6': relu6}`. Defaults to `None`. model_dir: Directory to save `Estimator` model parameters, graph, summary files for TensorBoard, etc. If unset a directory will be created with `tempfile.mkdtemp` config: `RunConfig` to config `Estimator`. Allows setting up things in `model_fn` based on configuration such as `num_ps_replicas`, or `model_dir`. Defaults to `None`. If both `config.model_dir` and the `model_dir` argument (above) are specified the `model_dir` **argument** takes precedence. checkpoint_format: Sets the format of the checkpoint saved by the estimator when training. May be `saver` or `checkpoint`, depending on whether to save checkpoints from `tf.compat.v1.train.Saver` or `tf.train.Checkpoint`. The default is `checkpoint`. Estimators use name-based `tf.train.Saver` checkpoints, while Keras models use object-based checkpoints from `tf.train.Checkpoint`. Currently, saving object-based checkpoints from `model_to_estimator` is only supported by Functional and Sequential models. Defaults to 'checkpoint'. metric_names_map: Optional dictionary mapping Keras model output metric names to custom names. This can be used to override the default Keras model output metrics names in a multi IO model use case and provide custom names for the `eval_metric_ops` in Estimator. The Keras model metric names can be obtained using `model.metrics_names` excluding any loss metrics such as total loss and output losses. For example, if your Keras model has two outputs `out_1` and `out_2`, with `mse` loss and `acc` metric, then `model.metrics_names` will be `['loss', 'out_1_loss', 'out_2_loss', 'out_1_acc', 'out_2_acc']`. The model metric names excluding the loss metrics will be `['out_1_acc', 'out_2_acc']`. Returns: An Estimator from given keras model. Raises: ValueError: If neither keras_model nor keras_model_path was given. ValueError: If both keras_model and keras_model_path was given. ValueError: If the keras_model_path is a GCS URI. ValueError: If keras_model has not been compiled. ValueError: If an invalid checkpoint_format was given." 5696,_BaseFeaturesLayer,tensorflow/tensorflow/python/keras/feature_column/base_feature_layer.py,32,class,"Base class for DenseFeatures and SequenceFeatures. Defines common methods and helpers. Args: feature_columns: An iterable containing the FeatureColumns to use as inputs to your model. expected_column_type: Expected class for provided feature columns. trainable: Boolean, whether the layer's variables will be updated via gradient descent during training. name: Name to give to the DenseFeatures. **kwargs: Keyword arguments to construct a layer. Raises: ValueError: if an item in `feature_columns` doesn't match `expected_column_type`." 5697,DenseFeatures,tensorflow/tensorflow/python/keras/feature_column/dense_features.py,31,class,"A layer that produces a dense `Tensor` based on given `feature_columns`. Generally a single example in training data is described with FeatureColumns. At the first layer of the model, this column-oriented data should be converted to a single `Tensor`. This layer can be called multiple times with different features. This is the V1 version of this layer that uses variable_scope's or partitioner to create variables which works well with PartitionedVariables. Variable scopes are deprecated in V2, so the V2 version uses name_scopes instead. But currently that lacks support for partitioned variables. Use this if you need partitioned variables. Use the partitioner argument if you have a Keras model and uses `tf.compat.v1.keras.estimator.model_to_estimator` for training. Example: ```python price = tf.feature_column.numeric_column('price') keywords_embedded = tf.feature_column.embedding_column( tf.feature_column.categorical_column_with_hash_bucket(""keywords"", 10K), dimension=16) columns = [price, keywords_embedded, ...] partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=4) feature_layer = tf.compat.v1.keras.layers.DenseFeatures( feature_columns=columns, partitioner=partitioner) features = tf.io.parse_example( ..., features=tf.feature_column.make_parse_example_spec(columns)) dense_tensor = feature_layer(features) for units in [128, 64, 32]: dense_tensor = tf.compat.v1.keras.layers.Dense( units, activation='relu')(dense_tensor) prediction = tf.compat.v1.keras.layers.Dense(1)(dense_tensor) ```" 5698,_initialized_session,tensorflow/tensorflow/python/keras/feature_column/dense_features_test.py,46,function, 5699,DenseFeaturesTest,tensorflow/tensorflow/python/keras/feature_column/dense_features_test.py,53,class, 5700,IndicatorColumnTest,tensorflow/tensorflow/python/keras/feature_column/dense_features_test.py,684,class, 5701,EmbeddingColumnTest,tensorflow/tensorflow/python/keras/feature_column/dense_features_test.py,704,class, 5702,SharedEmbeddingColumnTest,tensorflow/tensorflow/python/keras/feature_column/dense_features_test.py,892,class, 5703,DenseFeaturesSerializationTest,tensorflow/tensorflow/python/keras/feature_column/dense_features_test.py,1019,class, 5704,SequenceFeatureColumnsTest,tensorflow/tensorflow/python/keras/feature_column/dense_features_test.py,1084,class,Tests DenseFeatures with sequence feature columns. 5705,DenseFeatures,tensorflow/tensorflow/python/keras/feature_column/dense_features_v2.py,29,class,"A layer that produces a dense `Tensor` based on given `feature_columns`. Generally a single example in training data is described with FeatureColumns. At the first layer of the model, this column oriented data should be converted to a single `Tensor`. This layer can be called multiple times with different features. This is the V2 version of this layer that uses name_scopes to create variables instead of variable_scopes. But this approach currently lacks support for partitioned variables. In that case, use the V1 version instead. Example: ```python price = tf.feature_column.numeric_column('price') keywords_embedded = tf.feature_column.embedding_column( tf.feature_column.categorical_column_with_hash_bucket(""keywords"", 10K), dimensions=16) columns = [price, keywords_embedded, ...] feature_layer = tf.keras.layers.DenseFeatures(columns) features = tf.io.parse_example( ..., features=tf.feature_column.make_parse_example_spec(columns)) dense_tensor = feature_layer(features) for units in [128, 64, 32]: dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor) prediction = tf.keras.layers.Dense(1)(dense_tensor) ```" 5706,_initialized_session,tensorflow/tensorflow/python/keras/feature_column/dense_features_v2_test.py,41,function, 5707,DenseFeaturesTest,tensorflow/tensorflow/python/keras/feature_column/dense_features_v2_test.py,48,class, 5708,SequenceFeatures,tensorflow/tensorflow/python/keras/feature_column/sequence_feature_column.py,36,class,"A layer for sequence input. All `feature_columns` must be sequence dense columns with the same `sequence_length`. The output of this method can be fed into sequence networks, such as RNN. The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`. `T` is the maximum sequence length for this batch, which could differ from batch to batch. If multiple `feature_columns` are given with `Di` `num_elements` each, their outputs are concatenated. So, the final `Tensor` has shape `[batch_size, T, D0 + D1 + ... + Dn]`. Example: ```python # Behavior of some cells or feature columns may depend on whether we are in # training or inference mode, e.g. applying dropout. training = True rating = sequence_numeric_column('rating') watches = sequence_categorical_column_with_identity( 'watches', num_buckets=1000) watches_embedding = embedding_column(watches, dimension=10) columns = [rating, watches_embedding] sequence_input_layer = SequenceFeatures(columns) features = tf.io.parse_example(..., features=make_parse_example_spec(columns)) sequence_input, sequence_length = sequence_input_layer( features, training=training) sequence_length_mask = tf.sequence_mask(sequence_length) rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size, training=training) rnn_layer = tf.keras.layers.RNN(rnn_cell, training=training) outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask) ```" 5709,_assert_all_equal_and_return,tensorflow/tensorflow/python/keras/feature_column/sequence_feature_column.py,165,function,Asserts that all tensors are equal and returns the first one. 5710,SequenceFeatureColumnIntegrationTest,tensorflow/tensorflow/python/keras/feature_column/sequence_feature_column_integration_test.py,41,class, 5711,_make_sequence_example,tensorflow/tensorflow/python/keras/feature_column/sequence_feature_column_integration_test.py,253,function, 5712,_initialized_session,tensorflow/tensorflow/python/keras/feature_column/sequence_feature_column_test.py,42,function, 5713,SequenceFeaturesTest,tensorflow/tensorflow/python/keras/feature_column/sequence_feature_column_test.py,50,class, 5714,SequenceFeaturesSerializationTest,tensorflow/tensorflow/python/keras/feature_column/sequence_feature_column_test.py,573,class, 5715,SequenceFeaturesSavingTest,tensorflow/tensorflow/python/keras/feature_column/sequence_feature_column_test.py,614,class, 5716,populate_deserializable_objects,tensorflow/tensorflow/python/keras/initializers/__init__.py,38,function,"Populates dict ALL_OBJECTS with every built-in initializer. " 5717,serialize,tensorflow/tensorflow/python/keras/initializers/__init__.py,134,function, 5718,deserialize,tensorflow/tensorflow/python/keras/initializers/__init__.py,139,function,Return an `Initializer` object from its config. 5719,get,tensorflow/tensorflow/python/keras/initializers/__init__.py,150,function, 5720,RandomNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v1.py,44,class, 5721,RandomUniform,tensorflow/tensorflow/python/keras/initializers/initializers_v1.py,54,class, 5722,TruncatedNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v1.py,64,class, 5723,LecunNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v1.py,72,class, 5724,LecunUniform,tensorflow/tensorflow/python/keras/initializers/initializers_v1.py,83,class, 5725,HeNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v1.py,94,class, 5726,HeUniform,tensorflow/tensorflow/python/keras/initializers/initializers_v1.py,105,class, 5727,Initializer,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,30,class,"Initializer base class: all Keras initializers inherit from this class. Initializers should implement a `__call__` method with the following signature: ```python def __call__(self, shape, dtype=None)`: # returns a tensor of shape `shape` and dtype `dtype` # containing values drawn from a distribution of your choice. ``` Optionally, you an also implement the method `get_config` and the class method `from_config` in order to support serialization -- just like with any Keras object. Here's a simple example: a random normal initializer. ```python import tensorflow as tf class ExampleRandomNormal(tf.keras.initializers.Initializer): def __init__(self, mean, stddev): self.mean = mean self.stddev = stddev def __call__(self, shape, dtype=None)`: return tf.random.normal( shape, mean=self.mean, stddev=self.stddev, dtype=dtype) def get_config(self): # To support serialization return {""mean"": self.mean, ""stddev"": self.stddev} ``` Note that we don't have to implement `from_config` in the example above since the constructor arguments of the class the keys in the config returned by `get_config` are the same. In this case, the default `from_config` works fine." 5728,Zeros,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,111,class,"Initializer that generates tensors initialized to 0. Also available via the shortcut function `tf.keras.initializers.zeros`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Zeros() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Zeros() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)" 5729,Ones,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,141,class,"Initializer that generates tensors initialized to 1. Also available via the shortcut function `tf.keras.initializers.ones`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Ones() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Ones() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)" 5730,Constant,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,173,class,"Initializer that generates tensors with constant values. Also available via the shortcut function `tf.keras.initializers.constant`. Only scalar values are allowed. The constant value provided must be convertible to the dtype requested when calling the initializer. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Constant(3.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Constant(3.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: value: A Python scalar." 5731,RandomUniform,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,219,class,"Initializer that generates tensors with a uniform distribution. Also available via the shortcut function `tf.keras.initializers.random_uniform`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate (inclusive). maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate (exclusive). seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype." 5732,RandomNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,261,class,"Initializer that generates tensors with a normal distribution. Also available via the shortcut function `tf.keras.initializers.random_normal`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype." 5733,TruncatedNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,302,class,"Initializer that generates a truncated normal distribution. Also available via the shortcut function `tf.keras.initializers.truncated_normal`. The values generated are similar to values from a `tf.keras.initializers.RandomNormal` initializer except that values more than two standard deviations from the mean are discarded and re-drawn. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype." 5734,VarianceScaling,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,348,class,"Initializer capable of adapting its scale to the shape of weights tensors. Also available via the shortcut function `tf.keras.initializers.variance_scaling`. With `distribution=""truncated_normal"" or ""untruncated_normal""`, samples are drawn from a truncated/untruncated normal distribution with a mean of zero and a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`, where `n` is: - number of input units in the weight tensor, if `mode=""fan_in""` - number of output units, if `mode=""fan_out""` - average of the numbers of input and output units, if `mode=""fan_avg""` With `distribution=""uniform""`, samples are drawn from a uniform distribution within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.VarianceScaling( ... scale=0.1, mode='fan_in', distribution='uniform') >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.VarianceScaling( ... scale=0.1, mode='fan_in', distribution='uniform') >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: scale: Scaling factor (positive float). mode: One of ""fan_in"", ""fan_out"", ""fan_avg"". distribution: Random distribution to use. One of ""truncated_normal"", ""untruncated_normal"" and ""uniform"". seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype." 5735,Orthogonal,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,403,class,"Initializer that generates an orthogonal matrix. Also available via the shortcut function `tf.keras.initializers.orthogonal`. If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns. If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` is initialized, where `n` is the length of the shape vector. The matrix is subsequently reshaped to give a tensor of the desired shape. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Orthogonal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Orthogonal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: gain: multiplicative factor to apply to the orthogonal matrix seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) ([pdf](https://arxiv.org/pdf/1312.6120.pdf))" 5736,Identity,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,455,class,"Initializer that generates the identity matrix. Also available via the shortcut function `tf.keras.initializers.identity`. Only usable for generating 2D matrices. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Identity() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Identity() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: gain: Multiplicative factor to apply to the identity matrix." 5737,GlorotUniform,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,492,class,"The Glorot uniform initializer, also called Xavier uniform initializer. Also available via the shortcut function `tf.keras.initializers.glorot_uniform`. Draws samples from a uniform distribution within `[-limit, limit]`, where `limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.GlorotUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.GlorotUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))" 5738,GlorotNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,535,class,"The Glorot normal initializer, also called Xavier normal initializer. Also available via the shortcut function `tf.keras.initializers.glorot_normal`. Draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.GlorotNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.GlorotNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))" 5739,LecunNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,579,class,"Lecun normal initializer. Also available via the shortcut function `tf.keras.initializers.lecun_normal`. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.LecunNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.LecunNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Arguments: seed: A Python integer. Used to seed the random generator. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) ([pdf] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)" 5740,LecunUniform,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,627,class,"Lecun uniform initializer. Also available via the shortcut function `tf.keras.initializers.lecun_uniform`. Draws samples from a uniform distribution within `[-limit, limit]`, where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the weight tensor). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.LecunUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.LecunUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Arguments: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)" 5741,HeNormal,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,670,class,"He normal initializer. Also available via the shortcut function `tf.keras.initializers.he_normal`. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.HeNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.HeNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Arguments: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))" 5742,HeUniform,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,710,class,"He uniform variance scaling initializer. Also available via the shortcut function `tf.keras.initializers.he_uniform`. Draws samples from a uniform distribution within `[-limit, limit]`, where `limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the weight tensor). Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.HeUniform() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.HeUniform() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Arguments: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))" 5743,_get_dtype,tensorflow/tensorflow/python/keras/initializers/initializers_v2.py,747,function, 5744,_jvp,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,27,function,Compute the jacobian of `f` at `primals` multiplied by `tangents`. 5745,_jacfwd,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,35,function,Compute the jacobian of `f` at `primals` using forward-mode autodiff. 5746,_grad,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,57,function,Return a function which computes the gradient of `f`. 5747,_hvp,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,72,function,Compute a forward-over-back Hessian-vector product. 5748,_vectorize_parameters,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,82,function,"Loop over `params`, providing a one-hot mask to `f` for each." 5749,_forward_over_back_hessian,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,102,function,"Computes the full Hessian matrix for the scalar-valued f(*params). Args: f: A function taking `params` and returning a scalar. params: A possibly nested structure of tensors. use_pfor: If true, uses `tf.vectorized_map` calls instead of looping. dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes (e.g. `tf.float32`) matching the structure of `f`'s returns. Returns: A possibly nested structure of matrix slices corresponding to `params`. Each slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`) in the corresponding element of `params` and `P` is the total number of parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating along the second axis." 5750,_test_gradients,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,124,function,"Tests forward/backward jacobians of `f`'s [0, `order`)-order gradients." 5751,ForwardpropTest,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,152,class, 5752,HessianTests,tensorflow/tensorflow/python/keras/integration_test/forwardprop_test.py,266,class, 5753,MiniModel,tensorflow/tensorflow/python/keras/integration_test/function_test.py,24,class,"Minimal model for mnist. Useful for testing and debugging on slow TPU simulators." 5754,DefunnedMiniModel,tensorflow/tensorflow/python/keras/integration_test/function_test.py,39,class, 5755,ModelWithOptimizer,tensorflow/tensorflow/python/keras/integration_test/function_test.py,46,class, 5756,FunctionTest,tensorflow/tensorflow/python/keras/integration_test/function_test.py,65,class, 5757,AutomaticControlDependenciesTest,tensorflow/tensorflow/python/keras/integration_test/function_test.py,213,class, 5758,_get_big_cnn_model,tensorflow/tensorflow/python/keras/integration_test/gradient_checkpoint_test.py,26,function,Creates a test model whose activations are significantly larger than model size. 5759,_get_split_cnn_model,tensorflow/tensorflow/python/keras/integration_test/gradient_checkpoint_test.py,45,function,Creates a test model that is split into `num_partitions` smaller models. 5760,_compute_loss,tensorflow/tensorflow/python/keras/integration_test/gradient_checkpoint_test.py,68,function, 5761,_limit_gpu_memory,tensorflow/tensorflow/python/keras/integration_test/gradient_checkpoint_test.py,74,function,Helper function to limit GPU memory for testing. 5762,_get_dummy_data,tensorflow/tensorflow/python/keras/integration_test/gradient_checkpoint_test.py,85,function, 5763,_train_no_recompute,tensorflow/tensorflow/python/keras/integration_test/gradient_checkpoint_test.py,91,function,Trains a single large model without gradient checkpointing. 5764,_train_with_recompute,tensorflow/tensorflow/python/keras/integration_test/gradient_checkpoint_test.py,111,function,Trains a single large model with gradient checkpointing using tf.recompute_grad. 5765,GradientCheckpointTest,tensorflow/tensorflow/python/keras/integration_test/gradient_checkpoint_test.py,141,class, 5766,TestKerasModelClass,tensorflow/tensorflow/python/keras/integration_test/gradients_test.py,23,class,A simple tensorflow keras Model class definition. 5767,GradientsTest,tensorflow/tensorflow/python/keras/integration_test/gradients_test.py,42,class, 5768,KerasNetworkTFRNNs,tensorflow/tensorflow/python/keras/integration_test/legacy_rnn_test.py,25,class, 5769,KerasNetworkKerasRNNs,tensorflow/tensorflow/python/keras/integration_test/legacy_rnn_test.py,36,class, 5770,LegacyRNNTest,tensorflow/tensorflow/python/keras/integration_test/legacy_rnn_test.py,47,class, 5771,get_test_data,tensorflow/tensorflow/python/keras/integration_test/legacy_rnn_test.py,373,function, 5772,ModuleTest,tensorflow/tensorflow/python/keras/integration_test/module_test.py,22,class, 5773,cycle,tensorflow/tensorflow/python/keras/integration_test/saved_model_test.py,27,function, 5774,_ModelWithOptimizer,tensorflow/tensorflow/python/keras/integration_test/saved_model_test.py,44,class, 5775,_import_and_infer,tensorflow/tensorflow/python/keras/integration_test/saved_model_test.py,62,function,Import a SavedModel into a TF 1.x-style graph and run `signature_key`. 5776,_run_signature,tensorflow/tensorflow/python/keras/integration_test/saved_model_test.py,70,function, 5777,SaveTest,tensorflow/tensorflow/python/keras/integration_test/saved_model_test.py,85,class, 5778,LoadTest,tensorflow/tensorflow/python/keras/integration_test/saved_model_test.py,121,class, 5779,KerasLoadTest,tensorflow/tensorflow/python/keras/integration_test/saved_model_test.py,180,class, 5780,get_tpu_cluster_resolver,tensorflow/tensorflow/python/keras/integration_test/tpu_strategy_test.py,32,function, 5781,get_tpu_strategy,tensorflow/tensorflow/python/keras/integration_test/tpu_strategy_test.py,41,function, 5782,TpuStrategyTest,tensorflow/tensorflow/python/keras/integration_test/tpu_strategy_test.py,48,class, 5783,VectorizedMapTest,tensorflow/tensorflow/python/keras/integration_test/vectorized_map_test.py,22,class, 5784,VersionAwareLayers,tensorflow/tensorflow/python/keras/layers/__init__.py,275,class,"Utility to be used internally to access layers in a V1/V2-aware fashion. When using layers within the Keras codebase, under the constraint that e.g. `layers.BatchNormalization` should be the `BatchNormalization` version corresponding to the current runtime (TF1 or TF2), do not simply access `layers.BatchNormalization` since it would ignore e.g. an early `compat.v2.disable_v2_behavior()` call. Instead, use an instance of `VersionAwareLayers` (which you can use just like the `layers` module)." 5785,LeakyReLU,tensorflow/tensorflow/python/keras/layers/advanced_activations.py,33,class,"Leaky version of a Rectified Linear Unit. It allows a small gradient when the unit is not active: ``` f(x) = alpha * x if x < 0 f(x) = x if x >= 0 ``` Usage: >>> layer = tf.keras.layers.LeakyReLU() >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [-0.9, -0.3, 0.0, 2.0] >>> layer = tf.keras.layers.LeakyReLU(alpha=0.1) >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [-0.3, -0.1, 0.0, 2.0] Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the batch axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha: Float >= 0. Negative slope coefficient. Default to 0.3." 5786,PReLU,tensorflow/tensorflow/python/keras/layers/advanced_activations.py,86,class,"Parametric Rectified Linear Unit. It follows: ``` f(x) = alpha * x for x < 0 f(x) = x for x >= 0 ``` where `alpha` is a learned array with the same shape as x. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha_initializer: Initializer function for the weights. alpha_regularizer: Regularizer for the weights. alpha_constraint: Constraint for the weights. shared_axes: The axes along which to share learnable parameters for the activation function. For example, if the incoming feature maps are from a 2D convolution with output shape `(batch, height, width, channels)`, and you wish to share parameters across space so that each filter only has one set of parameters, set `shared_axes=[1, 2]`." 5787,ELU,tensorflow/tensorflow/python/keras/layers/advanced_activations.py,180,class,"Exponential Linear Unit. It follows: ``` f(x) = alpha * (exp(x) - 1.) for x < 0 f(x) = x for x >= 0 ``` Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: alpha: Scale for the negative factor." 5788,ThresholdedReLU,tensorflow/tensorflow/python/keras/layers/advanced_activations.py,221,class,"Thresholded Rectified Linear Unit. It follows: ``` f(x) = x for x > theta f(x) = 0 otherwise` ``` Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: theta: Float >= 0. Threshold location of activation." 5789,Softmax,tensorflow/tensorflow/python/keras/layers/advanced_activations.py,263,class,"Softmax activation function. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: axis: Integer, axis along which the softmax normalization is applied." 5790,ReLU,tensorflow/tensorflow/python/keras/layers/advanced_activations.py,297,class,"Rectified Linear Unit activation function. With default values, it returns element-wise `max(x, 0)`. Otherwise, it follows: ``` f(x) = max_value if x >= max_value f(x) = x if threshold <= x < max_value f(x) = negative_slope * (x - threshold) otherwise ``` Usage: >>> layer = tf.keras.layers.ReLU() >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 2.0] >>> layer = tf.keras.layers.ReLU(max_value=1.0) >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 1.0] >>> layer = tf.keras.layers.ReLU(negative_slope=1.0) >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [-3.0, -1.0, 0.0, 2.0] >>> layer = tf.keras.layers.ReLU(threshold=1.5) >>> output = layer([-3.0, -1.0, 1.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 2.0] Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the batch axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Arguments: max_value: Float >= 0. Maximum activation value. Default to None, which means unlimited. negative_slope: Float >= 0. Negative slope coefficient. Default to 0. threshold: Float. Threshold value for thresholded activation. Default to 0." 5791,AdvancedActivationsTest,tensorflow/tensorflow/python/keras/layers/advanced_activations_test.py,31,class, 5792,Conv,tensorflow/tensorflow/python/keras/layers/convolutional.py,52,class,"Abstract N-D convolution layer (private, used as implementation base). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Note: layer attributes cannot be modified after the layer has been called once (except the `trainable` attribute). Arguments: rank: An integer, the rank of the convolution, e.g. ""2"" for 2D convolution. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the length of the convolution window. strides: An integer or tuple/list of n integers, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""`, `""same""`, or `""causal""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. `""causal""` results in causal (dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, ...)`. dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. groups: A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with `filters / groups` filters. The output is the concatenation of all the `groups` results along the channel axis. Input channels and `filters` must both be divisible by `groups`. activation: Activation function to use. If you don't specify anything, no activation is applied. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` the weights of this layer will be marked as trainable (and listed in `layer.trainable_weights`). name: A string, the name of the layer." 5793,Conv1D,tensorflow/tensorflow/python/keras/layers/convolutional.py,378,class,"1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved with the layer input over a single spatial (or temporal) dimension to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide an `input_shape` argument (tuple of integers or `None`, e.g. `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors, or `(None, 128)` for variable-length sequences of 128-dimensional vectors. Examples: >>> # The inputs are 128-length vectors with 10 timesteps, and the batch size >>> # is 4. >>> input_shape = (4, 10, 128) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv1D( ... 32, 3, activation='relu',input_shape=input_shape[1:])(x) >>> print(y.shape) (4, 8, 32) >>> # With extended batch shape [4, 7] (e.g. weather data where batch >>> # dimensions correspond to spatial location and the third dimension >>> # corresponds to time.) >>> input_shape = (4, 7, 10, 128) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv1D( ... 32, 3, activation='relu', input_shape=input_shape[2:])(x) >>> print(y.shape) (4, 7, 8, 32) Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""`, `""same""` or `""causal""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. `""causal""` results in causal (dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`. Useful when modeling temporal data where the model should not violate the temporal order. See [WaveNet: A Generative Model for Raw Audio, section 2.1](https://arxiv.org/abs/1609.03499). data_format: A string, one of `channels_last` (default) or `channels_first`. dilation_rate: an integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. groups: A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with `filters / groups` filters. The output is the concatenation of all the `groups` results along the channel axis. Input channels and `filters` must both be divisible by `groups`. activation: Activation function to use. If you don't specify anything, no activation is applied ( see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix ( see `keras.initializers`). bias_initializer: Initializer for the bias vector ( see `keras.initializers`). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector ( see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"") ( see `keras.regularizers`). kernel_constraint: Constraint function applied to the kernel matrix ( see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector ( see `keras.constraints`). Input shape: 3+D tensor with shape: `batch_shape + (steps, input_dim)` Output shape: 3+D tensor with shape: `batch_shape + (new_steps, filters)` `steps` value might have changed due to padding or strides. Returns: A tensor of rank 3 representing `activation(conv1d(inputs, kernel) + bias)`. Raises: ValueError: when both `strides > 1` and `dilation_rate > 1`." 5794,Conv2D,tensorflow/tensorflow/python/keras/layers/convolutional.py,519,class,"2D convolution layer (e.g. spatial convolution over images). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format=""channels_last""`. Examples: >>> # The inputs are 28x28 RGB images with `channels_last` and the batch >>> # size is 4. >>> input_shape = (4, 28, 28, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv2D( ... 2, 3, activation='relu', input_shape=input_shape[1:])(x) >>> print(y.shape) (4, 26, 26, 2) >>> # With `dilation_rate` as 2. >>> input_shape = (4, 28, 28, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv2D( ... 2, 3, activation='relu', dilation_rate=2, input_shape=input_shape[1:])(x) >>> print(y.shape) (4, 24, 24, 2) >>> # With `padding` as ""same"". >>> input_shape = (4, 28, 28, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv2D( ... 2, 3, activation='relu', padding=""same"", input_shape=input_shape[1:])(x) >>> print(y.shape) (4, 28, 28, 2) >>> # With extended batch shape [4, 7]: >>> input_shape = (4, 7, 28, 28, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv2D( ... 2, 3, activation='relu', input_shape=input_shape[2:])(x) >>> print(y.shape) (4, 7, 26, 26, 2) Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `channels_last`. dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. groups: A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with `filters / groups` filters. The output is the concatenation of all the `groups` results along the channel axis. Input channels and `filters` must both be divisible by `groups`. activation: Activation function to use. If you don't specify anything, no activation is applied (see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix (see `keras.initializers`). bias_initializer: Initializer for the bias vector (see `keras.initializers`). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"") (see `keras.regularizers`). kernel_constraint: Constraint function applied to the kernel matrix (see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`). Input shape: 4+D tensor with shape: `batch_shape + (channels, rows, cols)` if `data_format='channels_first'` or 4+D tensor with shape: `batch_shape + (rows, cols, channels)` if `data_format='channels_last'`. Output shape: 4+D tensor with shape: `batch_shape + (filters, new_rows, new_cols)` if `data_format='channels_first'` or 4+D tensor with shape: `batch_shape + (new_rows, new_cols, filters)` if `data_format='channels_last'`. `rows` and `cols` values might have changed due to padding. Returns: A tensor of rank 4+ representing `activation(conv2d(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is `""causal""`. ValueError: when both `strides > 1` and `dilation_rate > 1`." 5795,Conv3D,tensorflow/tensorflow/python/keras/layers/convolutional.py,678,class,"3D convolution layer (e.g. spatial convolution over volumes). This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs. If `use_bias` is True, a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes with a single channel, in `data_format=""channels_last""`. Examples: >>> # The inputs are 28x28x28 volumes with a single channel, and the >>> # batch size is 4 >>> input_shape =(4, 28, 28, 28, 1) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv3D( ... 2, 3, activation='relu', input_shape=input_shape[1:])(x) >>> print(y.shape) (4, 26, 26, 26, 2) >>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of 3D frames, >>> # with 7 frames per video. >>> input_shape = (4, 7, 28, 28, 28, 1) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.Conv3D( ... 2, 3, activation='relu', input_shape=input_shape[2:])(x) >>> print(y.shape) (4, 7, 26, 26, 26, 2) Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along each spatial dimension. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `batch_shape + (spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `batch_shape + (channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". dilation_rate: an integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. groups: A positive integer specifying the number of groups in which the input is split along the channel axis. Each group is convolved separately with `filters / groups` filters. The output is the concatenation of all the `groups` results along the channel axis. Input channels and `filters` must both be divisible by `groups`. activation: Activation function to use. If you don't specify anything, no activation is applied (see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix (see `keras.initializers`). bias_initializer: Initializer for the bias vector (see `keras.initializers`). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"") (see `keras.regularizers`). kernel_constraint: Constraint function applied to the kernel matrix (see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`). Input shape: 5+D tensor with shape: `batch_shape + (channels, conv_dim1, conv_dim2, conv_dim3)` if data_format='channels_first' or 5+D tensor with shape: `batch_shape + (conv_dim1, conv_dim2, conv_dim3, channels)` if data_format='channels_last'. Output shape: 5+D tensor with shape: `batch_shape + (filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if data_format='channels_first' or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have changed due to padding. Returns: A tensor of rank 5+ representing `activation(conv3d(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is ""causal"". ValueError: when both `strides > 1` and `dilation_rate > 1`." 5796,Conv1DTranspose,tensorflow/tensorflow/python/keras/layers/convolutional.py,826,class,"Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer length of the 1D convolution window. strides: An integer specifying the stride of the convolution along the time dimension. Specifying a stride value != 1 is incompatible with specifying a `dilation_rate` value != 1. Defaults to 1. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. output_padding: An integer specifying the amount of padding along the time dimension of the output tensor. The amount of output padding must be lower than the stride. If set to `None` (default), the output shape is inferred. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, length, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, length)`. dilation_rate: an integer, specifying the dilation rate to use for dilated convolution. Currently, specifying a `dilation_rate` value != 1 is incompatible with specifying a stride value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied ( see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix ( see `keras.initializers`). bias_initializer: Initializer for the bias vector ( see `keras.initializers`). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector ( see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"") (see `keras.regularizers`). kernel_constraint: Constraint function applied to the kernel matrix ( see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector ( see `keras.constraints`). Input shape: 3D tensor with shape: `(batch_size, steps, channels)` Output shape: 3D tensor with shape: `(batch_size, new_steps, filters)` If `output_padding` is specified: ``` new_timesteps = ((timesteps - 1) * strides + kernel_size - 2 * padding + output_padding) ``` Returns: A tensor of rank 3 representing `activation(conv1dtranspose(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is ""causal"". ValueError: when both `strides` > 1 and `dilation_rate` > 1. References: - [A guide to convolution arithmetic for deep learning]( https://arxiv.org/abs/1603.07285v1) - [Deconvolutional Networks]( https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)" 5797,Conv2DTranspose,tensorflow/tensorflow/python/keras/layers/convolutional.py,1072,class,"Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format=""channels_last""`. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. output_padding: An integer or tuple/list of 2 integers, specifying the amount of padding along the height and width of the output tensor. Can be a single integer to specify the same value for all spatial dimensions. The amount of output padding along a given dimension must be lower than the stride along that same dimension. If set to `None` (default), the output shape is inferred. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". dilation_rate: an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied ( see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix ( see `keras.initializers`). bias_initializer: Initializer for the bias vector ( see `keras.initializers`). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector ( see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"") (see `keras.regularizers`). kernel_constraint: Constraint function applied to the kernel matrix ( see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector ( see `keras.constraints`). Input shape: 4D tensor with shape: `(batch_size, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch_size, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. If `output_padding` is specified: ``` new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] + output_padding[0]) new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] + output_padding[1]) ``` Returns: A tensor of rank 4 representing `activation(conv2dtranspose(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is ""causal"". ValueError: when both `strides` > 1 and `dilation_rate` > 1. References: - [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1) - [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)" 5798,Conv3DTranspose,tensorflow/tensorflow/python/keras/layers/convolutional.py,1375,class,"Transposed convolution layer (sometimes called Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. When using this layer as the first layer in a model, provide the keyword argument `input_shape` (tuple of integers, does not include the sample axis), e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels if `data_format=""channels_last""`. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. output_padding: An integer or tuple/list of 3 integers, specifying the amount of padding along the depth, height, and width. Can be a single integer to specify the same value for all spatial dimensions. The amount of output padding along a given dimension must be lower than the stride along that same dimension. If set to `None` (default), the output shape is inferred. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, depth, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". dilation_rate: an integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied ( see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix ( see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector ( see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"") ( see `keras.regularizers`). kernel_constraint: Constraint function applied to the kernel matrix ( see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector ( see `keras.constraints`). Input shape: 5D tensor with shape: `(batch_size, channels, depth, rows, cols)` if data_format='channels_first' or 5D tensor with shape: `(batch_size, depth, rows, cols, channels)` if data_format='channels_last'. Output shape: 5D tensor with shape: `(batch_size, filters, new_depth, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(batch_size, new_depth, new_rows, new_cols, filters)` if data_format='channels_last'. `depth` and `rows` and `cols` values might have changed due to padding. If `output_padding` is specified:: ``` new_depth = ((depth - 1) * strides[0] + kernel_size[0] - 2 * padding[0] + output_padding[0]) new_rows = ((rows - 1) * strides[1] + kernel_size[1] - 2 * padding[1] + output_padding[1]) new_cols = ((cols - 1) * strides[2] + kernel_size[2] - 2 * padding[2] + output_padding[2]) ``` Returns: A tensor of rank 5 representing `activation(conv3dtranspose(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is ""causal"". ValueError: when both `strides` > 1 and `dilation_rate` > 1. References: - [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1) - [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)" 5799,SeparableConv,tensorflow/tensorflow/python/keras/layers/convolutional.py,1684,class,"Abstract base layer for separable nD convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: rank: An integer, the rank of the convolution, e.g. ""2"" for 2D convolution. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, ...)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function to use. If you don't specify anything, no activation is applied ( see `keras.activations`). use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` the weights of this layer will be marked as trainable (and listed in `layer.trainable_weights`). name: A string, the name of the layer." 5800,SeparableConv1D,tensorflow/tensorflow/python/keras/layers/convolutional.py,1894,class,"Depthwise separable 1D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial dimensions of the filters. strides: A single integer specifying the strides of the convolution. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""`, `""same""`, or `""causal""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. `""causal""` results in causal (dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, length, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, length)`. dilation_rate: A single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function to use. If you don't specify anything, no activation is applied ( see `keras.activations`). use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel ( see `keras.initializers`). pointwise_initializer: An initializer for the pointwise convolution kernel ( see `keras.initializers`). bias_initializer: An initializer for the bias vector. If None, the default initializer will be used (see `keras.initializers`). depthwise_regularizer: Optional regularizer for the depthwise convolution kernel (see `keras.regularizers`). pointwise_regularizer: Optional regularizer for the pointwise convolution kernel (see `keras.regularizers`). bias_regularizer: Optional regularizer for the bias vector ( see `keras.regularizers`). activity_regularizer: Optional regularizer function for the output ( see `keras.regularizers`). depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training ( see `keras.constraints`). pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer` ( see `keras.constraints`). bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer` ( see `keras.constraints`). trainable: Boolean, if `True` the weights of this layer will be marked as trainable (and listed in `layer.trainable_weights`). name: A string, the name of the layer. Input shape: 3D tensor with shape: `(batch_size, channels, steps)` if data_format='channels_first' or 5D tensor with shape: `(batch_size, steps, channels)` if data_format='channels_last'. Output shape: 3D tensor with shape: `(batch_size, filters, new_steps)` if data_format='channels_first' or 3D tensor with shape: `(batch_size, new_steps, filters)` if data_format='channels_last'. `new_steps` value might have changed due to padding or strides. Returns: A tensor of rank 3 representing `activation(separableconv1d(inputs, kernel) + bias)`. Raises: ValueError: when both `strides` > 1 and `dilation_rate` > 1." 5801,SeparableConv2D,tensorflow/tensorflow/python/keras/layers/convolutional.py,2074,class,"Depthwise separable 2D convolution. Separable convolutions consist of first performing a depthwise spatial convolution (which acts on each input channel separately) followed by a pointwise convolution which mixes the resulting output channels. The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. Intuitively, separable convolutions can be understood as a way to factorize a convolution kernel into two smaller kernels, or as an extreme version of an Inception block. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. activation: Activation function to use. If you don't specify anything, no activation is applied ( see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: Initializer for the depthwise kernel matrix ( see `keras.initializers`). pointwise_initializer: Initializer for the pointwise kernel matrix ( see `keras.initializers`). bias_initializer: Initializer for the bias vector ( see `keras.initializers`). depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix (see `keras.regularizers`). pointwise_regularizer: Regularizer function applied to the pointwise kernel matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector ( see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"") ( see `keras.regularizers`). depthwise_constraint: Constraint function applied to the depthwise kernel matrix ( see `keras.constraints`). pointwise_constraint: Constraint function applied to the pointwise kernel matrix ( see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector ( see `keras.constraints`). Input shape: 4D tensor with shape: `(batch_size, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch_size, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. Returns: A tensor of rank 4 representing `activation(separableconv2d(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is ""causal"". ValueError: when both `strides` > 1 and `dilation_rate` > 1." 5802,DepthwiseConv2D,tensorflow/tensorflow/python/keras/layers/convolutional.py,2244,class,"Depthwise separable 2D convolution. Depthwise Separable convolutions consist of performing just the first step in a depthwise spatial convolution (which acts on each input channel separately). The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. Arguments: kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `'valid'` or `'same'` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be 'channels_last'. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied ( see `keras.activations`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: Initializer for the depthwise kernel matrix ( see `keras.initializers`). bias_initializer: Initializer for the bias vector ( see `keras.initializers`). depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix (see `keras.regularizers`). bias_regularizer: Regularizer function applied to the bias vector ( see `keras.regularizers`). activity_regularizer: Regularizer function applied to the output of the layer (its 'activation') ( see `keras.regularizers`). depthwise_constraint: Constraint function applied to the depthwise kernel matrix ( see `keras.constraints`). bias_constraint: Constraint function applied to the bias vector ( see `keras.constraints`). Input shape: 4D tensor with shape: `[batch_size, channels, rows, cols]` if data_format='channels_first' or 4D tensor with shape: `[batch_size, rows, cols, channels]` if data_format='channels_last'. Output shape: 4D tensor with shape: `[batch_size, filters, new_rows, new_cols]` if data_format='channels_first' or 4D tensor with shape: `[batch_size, new_rows, new_cols, filters]` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. Returns: A tensor of rank 4 representing `activation(depthwiseconv2d(inputs, kernel) + bias)`. Raises: ValueError: if `padding` is ""causal"". ValueError: when both `strides` > 1 and `dilation_rate` > 1." 5803,UpSampling1D,tensorflow/tensorflow/python/keras/layers/convolutional.py,2461,class,"Upsampling layer for 1D inputs. Repeats each temporal step `size` times along the time axis. Examples: >>> input_shape = (2, 2, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[ 0 1 2] [ 3 4 5]] [[ 6 7 8] [ 9 10 11]]] >>> y = tf.keras.layers.UpSampling1D(size=2)(x) >>> print(y) tf.Tensor( [[[ 0 1 2] [ 0 1 2] [ 3 4 5] [ 3 4 5]] [[ 6 7 8] [ 6 7 8] [ 9 10 11] [ 9 10 11]]], shape=(2, 4, 3), dtype=int64) Arguments: size: Integer. Upsampling factor. Input shape: 3D tensor with shape: `(batch_size, steps, features)`. Output shape: 3D tensor with shape: `(batch_size, upsampled_steps, features)`." 5804,UpSampling2D,tensorflow/tensorflow/python/keras/layers/convolutional.py,2518,class,"Upsampling layer for 2D inputs. Repeats the rows and columns of the data by `size[0]` and `size[1]` respectively. Examples: >>> input_shape = (2, 2, 1, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[[ 0 1 2]] [[ 3 4 5]]] [[[ 6 7 8]] [[ 9 10 11]]]] >>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x) >>> print(y) tf.Tensor( [[[[ 0 1 2] [ 0 1 2]] [[ 3 4 5] [ 3 4 5]]] [[[ 6 7 8] [ 6 7 8]] [[ 9 10 11] [ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64) Arguments: size: Int, or tuple of 2 integers. The upsampling factors for rows and columns. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". interpolation: A string, one of `nearest` or `bilinear`. Input shape: 4D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, rows, cols, channels)` - If `data_format` is `""channels_first""`: `(batch_size, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, upsampled_rows, upsampled_cols, channels)` - If `data_format` is `""channels_first""`: `(batch_size, channels, upsampled_rows, upsampled_cols)`" 5805,UpSampling3D,tensorflow/tensorflow/python/keras/layers/convolutional.py,2622,class,"Upsampling layer for 3D inputs. Repeats the 1st, 2nd and 3rd dimensions of the data by `size[0]`, `size[1]` and `size[2]` respectively. Examples: >>> input_shape = (2, 1, 2, 1, 3) >>> x = tf.constant(1, shape=input_shape) >>> y = tf.keras.layers.UpSampling3D(size=2)(x) >>> print(y.shape) (2, 2, 4, 2, 3) Arguments: size: Int, or tuple of 3 integers. The upsampling factors for dim1, dim2 and dim3. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: 5D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, dim1, dim2, dim3, channels)` - If `data_format` is `""channels_first""`: `(batch_size, channels, dim1, dim2, dim3)` Output shape: 5D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)` - If `data_format` is `""channels_first""`: `(batch_size, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`" 5806,ZeroPadding1D,tensorflow/tensorflow/python/keras/layers/convolutional.py,2703,class,"Zero-padding layer for 1D input (e.g. temporal sequence). Examples: >>> input_shape = (2, 2, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[ 0 1 2] [ 3 4 5]] [[ 6 7 8] [ 9 10 11]]] >>> y = tf.keras.layers.ZeroPadding1D(padding=2)(x) >>> print(y) tf.Tensor( [[[ 0 0 0] [ 0 0 0] [ 0 1 2] [ 3 4 5] [ 0 0 0] [ 0 0 0]] [[ 0 0 0] [ 0 0 0] [ 6 7 8] [ 9 10 11] [ 0 0 0] [ 0 0 0]]], shape=(2, 6, 3), dtype=int64) Arguments: padding: Int, or tuple of int (length 2), or dictionary. - If int: How many zeros to add at the beginning and end of the padding dimension (axis 1). - If tuple of int (length 2): How many zeros to add at the beginning and the end of the padding dimension (`(left_pad, right_pad)`). Input shape: 3D tensor with shape `(batch_size, axis_to_pad, features)` Output shape: 3D tensor with shape `(batch_size, padded_axis, features)`" 5807,ZeroPadding2D,tensorflow/tensorflow/python/keras/layers/convolutional.py,2769,class,"Zero-padding layer for 2D input (e.g. picture). This layer can add rows and columns of zeros at the top, bottom, left and right side of an image tensor. Examples: >>> input_shape = (1, 1, 2, 2) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[[0 1] [2 3]]]] >>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x) >>> print(y) tf.Tensor( [[[[0 0] [0 0] [0 0] [0 0]] [[0 0] [0 1] [2 3] [0 0]] [[0 0] [0 0] [0 0] [0 0]]]], shape=(1, 3, 4, 2), dtype=int64) Arguments: padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric padding values for height and width: `(symmetric_height_pad, symmetric_width_pad)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_pad, bottom_pad), (left_pad, right_pad))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: 4D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, rows, cols, channels)` - If `data_format` is `""channels_first""`: `(batch_size, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, padded_rows, padded_cols, channels)` - If `data_format` is `""channels_first""`: `(batch_size, channels, padded_rows, padded_cols)`" 5808,ZeroPadding3D,tensorflow/tensorflow/python/keras/layers/convolutional.py,2894,class,"Zero-padding layer for 3D data (spatial or spatio-temporal). Examples: >>> input_shape = (1, 1, 2, 2, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> y = tf.keras.layers.ZeroPadding3D(padding=2)(x) >>> print(y.shape) (1, 5, 6, 6, 3) Arguments: padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. - If tuple of 3 ints: interpreted as two different symmetric padding values for height and width: `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`. - If tuple of 3 tuples of 2 ints: interpreted as `((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: 5D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad, depth)` - If `data_format` is `""channels_first""`: `(batch_size, depth, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad)` Output shape: 5D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, first_padded_axis, second_padded_axis, third_axis_to_pad, depth)` - If `data_format` is `""channels_first""`: `(batch_size, depth, first_padded_axis, second_padded_axis, third_axis_to_pad)`" 5809,Cropping1D,tensorflow/tensorflow/python/keras/layers/convolutional.py,3020,class,"Cropping layer for 1D input (e.g. temporal sequence). It crops along the time dimension (axis 1). Examples: >>> input_shape = (2, 3, 2) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> print(x) [[[ 0 1] [ 2 3] [ 4 5]] [[ 6 7] [ 8 9] [10 11]]] >>> y = tf.keras.layers.Cropping1D(cropping=1)(x) >>> print(y) tf.Tensor( [[[2 3]] [[8 9]]], shape=(2, 1, 2), dtype=int64) Arguments: cropping: Int or tuple of int (length 2) How many units should be trimmed off at the beginning and end of the cropping dimension (axis 1). If a single int is provided, the same value will be used for both. Input shape: 3D tensor with shape `(batch_size, axis_to_crop, features)` Output shape: 3D tensor with shape `(batch_size, cropped_axis, features)`" 5810,Cropping2D,tensorflow/tensorflow/python/keras/layers/convolutional.py,3081,class,"Cropping layer for 2D input (e.g. picture). It crops along spatial dimensions, i.e. height and width. Examples: >>> input_shape = (2, 28, 28, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> y = tf.keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x) >>> print(y.shape) (2, 24, 20, 3) Arguments: cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric cropping is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric cropping values for height and width: `(symmetric_height_crop, symmetric_width_crop)`. - If tuple of 2 tuples of 2 ints: interpreted as `((top_crop, bottom_crop), (left_crop, right_crop))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: 4D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, rows, cols, channels)` - If `data_format` is `""channels_first""`: `(batch_size, channels, rows, cols)` Output shape: 4D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, cropped_rows, cropped_cols, channels)` - If `data_format` is `""channels_first""`: `(batch_size, channels, cropped_rows, cropped_cols)`" 5811,Cropping3D,tensorflow/tensorflow/python/keras/layers/convolutional.py,3208,class,"Cropping layer for 3D data (e.g. spatial or spatio-temporal). Examples: >>> input_shape = (2, 28, 28, 10, 3) >>> x = np.arange(np.prod(input_shape)).reshape(input_shape) >>> y = tf.keras.layers.Cropping3D(cropping=(2, 4, 2))(x) >>> print(y.shape) (2, 24, 20, 6, 3) Arguments: cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. - If int: the same symmetric cropping is applied to depth, height, and width. - If tuple of 3 ints: interpreted as two different symmetric cropping values for depth, height, and width: `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`. - If tuple of 3 tuples of 2 ints: interpreted as `((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim3_crop))` data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: 5D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop, depth)` - If `data_format` is `""channels_first""`: `(batch_size, depth, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop)` Output shape: 5D tensor with shape: - If `data_format` is `""channels_last""`: `(batch_size, first_cropped_axis, second_cropped_axis, third_cropped_axis, depth)` - If `data_format` is `""channels_first""`: `(batch_size, depth, first_cropped_axis, second_cropped_axis, third_cropped_axis)`" 5812,ConvRNN2D,tensorflow/tensorflow/python/keras/layers/convolutional_recurrent.py,40,class,"Base class for convolutional-recurrent layers. Arguments: cell: A RNN cell instance. A RNN cell is a class that has: - a `call(input_at_t, states_at_t)` method, returning `(output_at_t, states_at_t_plus_1)`. The call method of the cell can also take the optional argument `constants`, see section ""Note on passing external constants"" below. - a `state_size` attribute. This can be a single integer (single state) in which case it is the number of channels of the recurrent state (which should be the same as the number of channels of the cell output). This can also be a list/tuple of integers (one size per state). In this case, the first entry (`state_size[0]`) should be the same as the size of the cell output. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. input_shape: Use this argument to specify the shape of the input when this layer is the first one in a model. Call arguments: inputs: A 5D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is for use with cells that use dropout. initial_state: List of initial state tensors to be passed to the first call of the cell. constants: List of constant tensors to be passed to the cell at each timestep. Input shape: 5D tensor with shape: `(samples, timesteps, channels, rows, cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, rows, cols, channels)` if data_format='channels_last'. Output shape: - If `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. - If `return_sequences`: 5D tensor with shape: `(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'. - Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. Masking: This layer supports masking for input data with a variable number of timesteps. Note on using statefulness in RNNs: You can set RNN layers to be 'stateful', which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches. To enable statefulness: - Specify `stateful=True` in the layer constructor. - Specify a fixed batch size for your model, by passing - If sequential model: `batch_input_shape=(...)` to the first layer in your model. - If functional model with 1 or more Input layers: `batch_shape=(...)` to all the first layers in your model. This is the expected shape of your inputs *including the batch size*. It should be a tuple of integers, e.g. `(32, 10, 100, 100, 32)`. Note that the number of rows and columns should be specified too. - Specify `shuffle=False` when calling fit(). To reset the states of your model, call `.reset_states()` on either a specific layer, or on your entire model. Note on specifying the initial state of RNNs: You can specify the initial state of RNN layers symbolically by calling them with the keyword argument `initial_state`. The value of `initial_state` should be a tensor or list of tensors representing the initial state of the RNN layer. You can specify the initial state of RNN layers numerically by calling `reset_states` with the keyword argument `states`. The value of `states` should be a numpy array or list of numpy arrays representing the initial state of the RNN layer. Note on passing external constants to RNNs: You can pass ""external"" constants to the cell using the `constants` keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This requires that the `cell.call` method accepts the same keyword argument `constants`. Such constants can be used to condition the cell transformation on additional static inputs (not changing over time), a.k.a. an attention mechanism." 5813,ConvLSTM2DCell,tensorflow/tensorflow/python/keras/layers/convolutional_recurrent.py,424,class,"Cell class for the ConvLSTM2D layer. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer=""zeros""`. This is recommended in [Jozefowicz et al., 2015]( http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Call arguments: inputs: A 4D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used." 5814,ConvLSTM2D,tensorflow/tensorflow/python/keras/layers/convolutional_recurrent.py,700,class,"Convolutional LSTM. It is similar to an LSTM layer, but the input transformations and recurrent transformations are both convolutional. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, time, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, time, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. By default hyperbolic tangent activation function is applied (`tanh(x)`). recurrent_activation: Activation function to use for the recurrent step. use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer=""zeros""`. This is recommended in [Jozefowicz et al., 2015]( http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. (default False) return_state: Boolean Whether to return the last state in addition to the output. (default False) go_backwards: Boolean (default False). If True, process the input sequence backwards. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Call arguments: inputs: A 5D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` are set. initial_state: List of initial state tensors to be passed to the first call of the cell. Input shape: - If data_format='channels_first' 5D tensor with shape: `(samples, time, channels, rows, cols)` - If data_format='channels_last' 5D tensor with shape: `(samples, time, rows, cols, channels)` Output shape: - If `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. - If `return_sequences`: 5D tensor with shape: `(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'. - Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. Raises: ValueError: in case of invalid constructor arguments. References: - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1) (the current implementation does not include the feedback loop on the cells output)." 5815,ConvLSTMTest,tensorflow/tensorflow/python/keras/layers/convolutional_recurrent_test.py,32,class, 5816,Conv1DTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,38,class, 5817,Conv2DTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,175,class, 5818,Conv3DTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,283,class, 5819,GroupedConvTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,418,class, 5820,Conv1DTransposeTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,470,class, 5821,Conv3DTransposeTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,503,class, 5822,ConvSequentialTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,536,class, 5823,ZeroPaddingTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,596,class, 5824,UpSamplingTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,832,class, 5825,CroppingTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,981,class, 5826,DepthwiseConv2DTest,tensorflow/tensorflow/python/keras/layers/convolutional_test.py,1123,class, 5827,Conv2DTransposeTest,tensorflow/tensorflow/python/keras/layers/convolutional_transpose_test.py,31,class, 5828,Conv3DTransposeTest,tensorflow/tensorflow/python/keras/layers/convolutional_transpose_test.py,121,class, 5829,Masking,tensorflow/tensorflow/python/keras/layers/core.py,67,class,"Masks a sequence by using a mask value to skip timesteps. For each timestep in the input tensor (dimension #1 in the tensor), if all values in the input tensor at that timestep are equal to `mask_value`, then the timestep will be masked (skipped) in all downstream layers (as long as they support masking). If any downstream layer does not support masking yet receives such an input mask, an exception will be raised. Example: Consider a Numpy data array `x` of shape `(samples, timesteps, features)`, to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you lack data for these timesteps. You can: - Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.` - Insert a `Masking` layer with `mask_value=0.` before the LSTM layer: ```python samples, timesteps, features = 32, 10, 8 inputs = np.random.random([samples, timesteps, features]).astype(np.float32) inputs[:, 3, :] = 0. inputs[:, 5, :] = 0. model = tf.keras.models.Sequential() model.add(tf.keras.layers.Masking(mask_value=0., input_shape=(timesteps, features))) model.add(tf.keras.layers.LSTM(32)) output = model(inputs) # The time step 3 and 5 will be skipped from LSTM calculation. ``` See [the masking and padding guide]( https://www.tensorflow.org/guide/keras/masking_and_padding) for more details." 5830,Dropout,tensorflow/tensorflow/python/keras/layers/core.py,134,class,"Applies Dropout to the input. The Dropout layer randomly sets input units to 0 with a frequency of `rate` at each step during training time, which helps prevent overfitting. Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over all inputs is unchanged. Note that the Dropout layer only applies when `training` is set to True such that no values are dropped during inference. When using `model.fit`, `training` will be appropriately set to True automatically, and in other contexts, you can set the kwarg explicitly to True when calling the layer. (This is in contrast to setting `trainable=False` for a Dropout layer. `trainable` does not affect the layer's behavior, as Dropout does not have any variables/weights that can be frozen during training.) >>> tf.random.set_seed(0) >>> layer = tf.keras.layers.Dropout(.2, input_shape=(2,)) >>> data = np.arange(10).reshape(5, 2).astype(np.float32) >>> print(data) [[0. 1.] [2. 3.] [4. 5.] [6. 7.] [8. 9.]] >>> outputs = layer(data, training=True) >>> print(outputs) tf.Tensor( [[ 0. 1.25] [ 2.5 3.75] [ 5. 6.25] [ 7.5 8.75] [10. 0. ]], shape=(5, 2), dtype=float32) Arguments: rate: Float between 0 and 1. Fraction of the input units to drop. noise_shape: 1D integer tensor representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)` and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=(batch_size, 1, features)`. seed: A Python integer to use as random seed. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing)." 5831,SpatialDropout1D,tensorflow/tensorflow/python/keras/layers/core.py,235,class,"Spatial 1D version of Dropout. This version performs the same function as Dropout, however, it drops entire 1D feature maps instead of individual elements. If adjacent frames within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout1D will help promote independence between feature maps and should be used instead. Arguments: rate: Float between 0 and 1. Fraction of the input units to drop. Call arguments: inputs: A 3D tensor. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: 3D tensor with shape: `(samples, timesteps, channels)` Output shape: Same as input. References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)" 5832,SpatialDropout2D,tensorflow/tensorflow/python/keras/layers/core.py,277,class,"Spatial 2D version of Dropout. This version performs the same function as Dropout, however, it drops entire 2D feature maps instead of individual elements. If adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout2D will help promote independence between feature maps and should be used instead. Arguments: rate: Float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension (the depth) is at index 1, in 'channels_last' mode is it at index 3. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Call arguments: inputs: A 4D tensor. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. Output shape: Same as input. References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)" 5833,SpatialDropout3D,tensorflow/tensorflow/python/keras/layers/core.py,336,class,"Spatial 3D version of Dropout. This version performs the same function as Dropout, however, it drops entire 3D feature maps instead of individual elements. If adjacent voxels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout3D will help promote independence between feature maps and should be used instead. Arguments: rate: Float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension (the depth) is at index 1, in 'channels_last' mode is it at index 4. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Call arguments: inputs: A 5D tensor. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: 5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first' or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'. Output shape: Same as input. References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)" 5834,Activation,tensorflow/tensorflow/python/keras/layers/core.py,394,class,"Applies an activation function to an output. Arguments: activation: Activation function, such as `tf.nn.relu`, or string name of built-in activation function, such as ""relu"". Usage: >>> layer = tf.keras.layers.Activation('relu') >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 2.0] >>> layer = tf.keras.layers.Activation(tf.nn.relu) >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [0.0, 0.0, 0.0, 2.0] Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the batch axis) when using this layer as the first layer in a model. Output shape: Same shape as input." 5835,Reshape,tensorflow/tensorflow/python/keras/layers/core.py,439,class,"Layer that reshapes inputs into the given shape. Input shape: Arbitrary, although all dimensions in the input shape must be known/fixed. Use the keyword argument `input_shape` (tuple of integers, does not include the samples/batch size axis) when using this layer as the first layer in a model. Output shape: `(batch_size,) + target_shape` Example: >>> # as first layer in a Sequential model >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Reshape((3, 4), input_shape=(12,))) >>> # model.output_shape == (None, 3, 4), `None` is the batch size. >>> model.output_shape (None, 3, 4) >>> # as intermediate layer in a Sequential model >>> model.add(tf.keras.layers.Reshape((6, 2))) >>> model.output_shape (None, 6, 2) >>> # also supports shape inference using `-1` as dimension >>> model.add(tf.keras.layers.Reshape((-1, 2, 2))) >>> model.output_shape (None, 3, 2, 2)" 5836,Permute,tensorflow/tensorflow/python/keras/layers/core.py,554,class,"Permutes the dimensions of the input according to a given pattern. Useful e.g. connecting RNNs and convnets. Example: ```python model = Sequential() model.add(Permute((2, 1), input_shape=(10, 64))) # now: model.output_shape == (None, 64, 10) # note: `None` is the batch dimension ``` Arguments: dims: Tuple of integers. Permutation pattern does not include the samples dimension. Indexing starts at 1. For instance, `(2, 1)` permutes the first and second dimensions of the input. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same as the input shape, but with the dimensions re-ordered according to the specified pattern." 5837,Flatten,tensorflow/tensorflow/python/keras/layers/core.py,612,class,"Flattens the input. Does not affect the batch size. Note: If inputs are shaped `(batch,)` without a feature axis, then flattening adds an extra channel dimension and output shape is `(batch, 1)`. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Example: >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Conv2D(64, 3, 3, input_shape=(3, 32, 32))) >>> model.output_shape (None, 1, 10, 64) >>> model.add(Flatten()) >>> model.output_shape (None, 640)" 5838,RepeatVector,tensorflow/tensorflow/python/keras/layers/core.py,700,class,"Repeats the input n times. Example: ```python model = Sequential() model.add(Dense(32, input_dim=32)) # now: model.output_shape == (None, 32) # note: `None` is the batch dimension model.add(RepeatVector(3)) # now: model.output_shape == (None, 3, 32) ``` Arguments: n: Integer, repetition factor. Input shape: 2D tensor of shape `(num_samples, features)`. Output shape: 3D tensor of shape `(num_samples, n, features)`." 5839,Lambda,tensorflow/tensorflow/python/keras/layers/core.py,744,class,"Wraps arbitrary expressions as a `Layer` object. The `Lambda` layer exists so that arbitrary TensorFlow functions can be used when constructing `Sequential` and Functional API models. `Lambda` layers are best suited for simple operations or quick experimentation. For more advanced use cases, follow [this guide](https://www.tensorflow.org/guide/keras/custom_layers_and_models) for subclassing `tf.keras.layers.Layer`. The main reason to subclass `tf.keras.layers.Layer` instead of using a `Lambda` layer is saving and inspecting a Model. `Lambda` layers are saved by serializing the Python bytecode, whereas subclassed Layers can be saved via overriding their `get_config` method. Overriding `get_config` improves the portability of Models. Models that rely on subclassed Layers are also often easier to visualize and reason about. Examples: ```python # add a x -> x^2 layer model.add(Lambda(lambda x: x ** 2)) ``` ```python # add a layer that returns the concatenation # of the positive part of the input and # the opposite of the negative part def antirectifier(x): x -= K.mean(x, axis=1, keepdims=True) x = K.l2_normalize(x, axis=1) pos = K.relu(x) neg = K.relu(-x) return K.concatenate([pos, neg], axis=1) model.add(Lambda(antirectifier)) ``` Variables: While it is possible to use Variables with Lambda layers, this practice is discouraged as it can easily lead to bugs. For instance, consider the following layer: ```python scale = tf.Variable(1.) scale_layer = tf.keras.layers.Lambda(lambda x: x * scale) ``` Because scale_layer does not directly track the `scale` variable, it will not appear in `scale_layer.trainable_weights` and will therefore not be trained if `scale_layer` is used in a Model. A better pattern is to write a subclassed Layer: ```python class ScaleLayer(tf.keras.layers.Layer): def __init__(self): super(ScaleLayer, self).__init__() self.scale = tf.Variable(1.) def call(self, inputs): return inputs * self.scale ``` In general, Lambda layers can be convenient for simple stateless computation, but anything more complex should use a subclass Layer instead. Arguments: function: The function to be evaluated. Takes input tensor as first argument. output_shape: Expected output shape from function. This argument can be inferred if not explicitly provided. Can be a tuple or function. If a tuple, it only specifies the first dimension onward; sample dimension is assumed either the same as the input: `output_shape = (input_shape[0], ) + output_shape` or, the input is `None` and the sample dimension is also `None`: `output_shape = (None, ) + output_shape` If a function, it specifies the entire shape as a function of the input shape: `output_shape = f(input_shape)` mask: Either None (indicating no masking) or a callable with the same signature as the `compute_mask` layer method, or a tensor that will be returned as output mask regardless of what the input is. arguments: Optional dictionary of keyword arguments to be passed to the function. Input shape: Arbitrary. Use the keyword argument input_shape (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Specified by `output_shape` argument" 5840,Dense,tensorflow/tensorflow/python/keras/layers/core.py,1067,class,"Just your regular densely-connected NN layer. `Dense` implements the operation: `output = activation(dot(input, kernel) + bias)` where `activation` is the element-wise activation function passed as the `activation` argument, `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only applicable if `use_bias` is `True`). Note: If the input to the layer has a rank greater than 2, then `Dense` computes the dot product between the `inputs` and the `kernel` along the last axis of the `inputs` and axis 1 of the `kernel` (using `tf.tensordot`). For example, if input has dimensions `(batch_size, d0, d1)`, then we create a `kernel` with shape `(d1, units)`, and the `kernel` operates along axis 2 of the `input`, on every sub-tensor of shape `(1, 1, d1)` (there are `batch_size * d0` such sub-tensors). The output in this case will have shape `(batch_size, d0, units)`. Besides, layer attributes cannot be modified after the layer has been called once (except the `trainable` attribute). Example: >>> # Create a `Sequential` model and add a Dense layer as the first layer. >>> model = tf.keras.models.Sequential() >>> model.add(tf.keras.Input(shape=(16,))) >>> model.add(tf.keras.layers.Dense(32, activation='relu')) >>> # Now the model will take as input arrays of shape (None, 16) >>> # and output arrays of shape (None, 32). >>> # Note that after the first layer, you don't need to specify >>> # the size of the input anymore: >>> model.add(tf.keras.layers.Dense(32)) >>> model.output_shape (None, 32) Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation""). kernel_constraint: Constraint function applied to the `kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common situation would be a 2D input with shape `(batch_size, input_dim)`. Output shape: N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input with shape `(batch_size, input_dim)`, the output would have shape `(batch_size, units)`." 5841,ActivityRegularization,tensorflow/tensorflow/python/keras/layers/core.py,1237,class,"Layer that applies an update to the cost function based input activity. Arguments: l1: L1 regularization factor (positive float). l2: L2 regularization factor (positive float). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input." 5842,TFOpLambda,tensorflow/tensorflow/python/keras/layers/core.py,1269,class,"Wraps TF API symbols in a `Layer` object. It is inserted by the Functional API construction whenever users call a supported TF symbol on KerasTensors. Like Lambda layers, this layer tries to raise warnings when it detects users explicitly use variables in the call. (To let them know that the layer will not capture the variables). This is useful in the case where users do something like: x = keras.Input(...) y = tf.Variable(...) out = x * tf_variable" 5843,KerasOpDispatcher,tensorflow/tensorflow/python/keras/layers/core.py,1416,class,A global dispatcher that allows building a functional model with TF Ops. 5844,_slice_to_dict,tensorflow/tensorflow/python/keras/layers/core.py,1431,function, 5845,_dict_to_slice,tensorflow/tensorflow/python/keras/layers/core.py,1437,function, 5846,SlicingOpLambda,tensorflow/tensorflow/python/keras/layers/core.py,1443,class,"Wraps TF API symbols in a `Layer` object. It is inserted by the Functional API construction whenever users call a supported TF symbol on KerasTensors. Like Lambda layers, this layer tries to raise warnings when it detects users explicitly use variables in the call. (To let them know that the layer will not capture the variables). This is useful in the case where users do something like: x = keras.Input(...) y = tf.Variable(...) out = x * tf_variable" 5847,TFSlicingOpDispatcher,tensorflow/tensorflow/python/keras/layers/core.py,1499,class,A global dispatcher that allows building a functional model with TF Ops. 5848,DropoutLayersTest,tensorflow/tensorflow/python/keras/layers/core_test.py,42,class, 5849,LambdaLayerTest,tensorflow/tensorflow/python/keras/layers/core_test.py,98,class, 5850,TestStatefulLambda,tensorflow/tensorflow/python/keras/layers/core_test.py,285,class, 5851,CoreLayersTest,tensorflow/tensorflow/python/keras/layers/core_test.py,374,class, 5852,_CuDNNRNN,tensorflow/tensorflow/python/keras/layers/cudnn_recurrent.py,37,class,"Private base class for CuDNNGRU and CuDNNLSTM layers. Arguments: return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. time_major: Boolean (default False). If true, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`." 5853,CuDNNGRU,tensorflow/tensorflow/python/keras/layers/cudnn_recurrent.py,162,class,"Fast GRU implementation backed by cuDNN. More information about cuDNN can be found on the [NVIDIA developer website](https://developer.nvidia.com/cudnn). Can only be run on GPU. Arguments: units: Positive integer, dimensionality of the output space. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation""). kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch." 5854,CuDNNLSTM,tensorflow/tensorflow/python/keras/layers/cudnn_recurrent.py,342,class,"Fast LSTM implementation backed by cuDNN. More information about cuDNN can be found on the [NVIDIA developer website](https://developer.nvidia.com/cudnn). Can only be run on GPU. Arguments: units: Positive integer, dimensionality of the output space. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer=""zeros""`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation""). kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch." 5855,CuDNNTest,tensorflow/tensorflow/python/keras/layers/cudnn_recurrent_test.py,40,class, 5856,CuDNNGraphOnlyTest,tensorflow/tensorflow/python/keras/layers/cudnn_recurrent_test.py,162,class, 5857,CuDNNV1OnlyTest,tensorflow/tensorflow/python/keras/layers/cudnn_recurrent_test.py,249,class, 5858,BaseDenseAttention,tensorflow/tensorflow/python/keras/layers/dense_attention.py,38,class,"Base Attention class for Dense networks. This class is suitable for Dense or CNN networks, and not for RNN networks. Implementations of attention mechanisms should inherit from this class, and reuse the `apply_attention_scores()` method. Args: causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such that position `i` cannot attend to positions `j > i`. This prevents the flow of information from the future towards the past. dropout: Float between 0 and 1. Fraction of the units to drop for the attention scores. Call Arguments: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * value: Value `Tensor` of shape `[batch_size, Tv, dim]`. * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not given, will use `value` for both `key` and `value`, which is the most common case. mask: List of the following tensors: * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`. If given, the output will be zero at the positions where `mask==False`. * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`. If given, will apply the mask such that values at positions where `mask==False` do not contribute to the result. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (no dropout). Output shape: Attention outputs of shape `[batch_size, Tq, dim]`." 5859,Attention,tensorflow/tensorflow/python/keras/layers/dense_attention.py,210,class,"Dot-product attention layer, a.k.a. Luong-style attention. Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of shape `[batch_size, Tv, dim]` and `key` tensor of shape `[batch_size, Tv, dim]`. The calculation follows the steps: 1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot product: `scores = tf.matmul(query, key, transpose_b=True)`. 2. Use scores to calculate a distribution with shape `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`. 3. Use `distribution` to create a linear combination of `value` with shape `[batch_size, Tq, dim]`: `return tf.matmul(distribution, value)`. Args: use_scale: If `True`, will create a scalar variable to scale the attention scores. causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such that position `i` cannot attend to positions `j > i`. This prevents the flow of information from the future towards the past. dropout: Float between 0 and 1. Fraction of the units to drop for the attention scores. Call Arguments: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * value: Value `Tensor` of shape `[batch_size, Tv, dim]`. * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not given, will use `value` for both `key` and `value`, which is the most common case. mask: List of the following tensors: * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`. If given, the output will be zero at the positions where `mask==False`. * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`. If given, will apply the mask such that values at positions where `mask==False` do not contribute to the result. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (no dropout). Output shape: Attention outputs of shape `[batch_size, Tq, dim]`. The meaning of `query`, `value` and `key` depend on the application. In the case of text similarity, for example, `query` is the sequence embeddings of the first piece of text and `value` is the sequence embeddings of the second piece of text. `key` is usually the same tensor as `value`. Here is a code example for using `Attention` in a CNN+Attention network: ```python # Variable-length int sequences. query_input = tf.keras.Input(shape=(None,), dtype='int32') value_input = tf.keras.Input(shape=(None,), dtype='int32') # Embedding lookup. token_embedding = tf.keras.layers.Embedding(input_dim=1000, output_dim=64) # Query embeddings of shape [batch_size, Tq, dimension]. query_embeddings = token_embedding(query_input) # Value embeddings of shape [batch_size, Tv, dimension]. value_embeddings = token_embedding(value_input) # CNN layer. cnn_layer = tf.keras.layers.Conv1D( filters=100, kernel_size=4, # Use 'same' padding so outputs have the same shape as inputs. padding='same') # Query encoding of shape [batch_size, Tq, filters]. query_seq_encoding = cnn_layer(query_embeddings) # Value encoding of shape [batch_size, Tv, filters]. value_seq_encoding = cnn_layer(value_embeddings) # Query-value attention of shape [batch_size, Tq, filters]. query_value_attention_seq = tf.keras.layers.Attention()( [query_seq_encoding, value_seq_encoding]) # Reduce over the sequence axis to produce encodings of shape # [batch_size, filters]. query_encoding = tf.keras.layers.GlobalAveragePooling1D()( query_seq_encoding) query_value_attention = tf.keras.layers.GlobalAveragePooling1D()( query_value_attention_seq) # Concatenate query and document encodings to produce a DNN input layer. input_layer = tf.keras.layers.Concatenate()( [query_encoding, query_value_attention]) # Add DNN layers, and create Model. # ... ```" 5860,AdditiveAttention,tensorflow/tensorflow/python/keras/layers/dense_attention.py,344,class,"Additive attention layer, a.k.a. Bahdanau-style attention. Inputs are `query` tensor of shape `[batch_size, Tq, dim]`, `value` tensor of shape `[batch_size, Tv, dim]` and `key` tensor of shape `[batch_size, Tv, dim]`. The calculation follows the steps: 1. Reshape `query` and `value` into shapes `[batch_size, Tq, 1, dim]` and `[batch_size, 1, Tv, dim]` respectively. 2. Calculate scores with shape `[batch_size, Tq, Tv]` as a non-linear sum: `scores = tf.reduce_sum(tf.tanh(query + value), axis=-1)` 3. Use scores to calculate a distribution with shape `[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`. 4. Use `distribution` to create a linear combination of `value` with shape `batch_size, Tq, dim]`: `return tf.matmul(distribution, value)`. Args: use_scale: If `True`, will create a variable to scale the attention scores. causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such that position `i` cannot attend to positions `j > i`. This prevents the flow of information from the future towards the past. dropout: Float between 0 and 1. Fraction of the units to drop for the attention scores. Call Arguments: inputs: List of the following tensors: * query: Query `Tensor` of shape `[batch_size, Tq, dim]`. * value: Value `Tensor` of shape `[batch_size, Tv, dim]`. * key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not given, will use `value` for both `key` and `value`, which is the most common case. mask: List of the following tensors: * query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`. If given, the output will be zero at the positions where `mask==False`. * value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`. If given, will apply the mask such that values at positions where `mask==False` do not contribute to the result. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (no dropout). Output shape: Attention outputs of shape `[batch_size, Tq, dim]`. The meaning of `query`, `value` and `key` depend on the application. In the case of text similarity, for example, `query` is the sequence embeddings of the first piece of text and `value` is the sequence embeddings of the second piece of text. `key` is usually the same tensor as `value`. Here is a code example for using `AdditiveAttention` in a CNN+Attention network: ```python # Variable-length int sequences. query_input = tf.keras.Input(shape=(None,), dtype='int32') value_input = tf.keras.Input(shape=(None,), dtype='int32') # Embedding lookup. token_embedding = tf.keras.layers.Embedding(max_tokens, dimension) # Query embeddings of shape [batch_size, Tq, dimension]. query_embeddings = token_embedding(query_input) # Value embeddings of shape [batch_size, Tv, dimension]. value_embeddings = token_embedding(value_input) # CNN layer. cnn_layer = tf.keras.layers.Conv1D( filters=100, kernel_size=4, # Use 'same' padding so outputs have the same shape as inputs. padding='same') # Query encoding of shape [batch_size, Tq, filters]. query_seq_encoding = cnn_layer(query_embeddings) # Value encoding of shape [batch_size, Tv, filters]. value_seq_encoding = cnn_layer(value_embeddings) # Query-value attention of shape [batch_size, Tq, filters]. query_value_attention_seq = tf.keras.layers.AdditiveAttention()( [query_seq_encoding, value_seq_encoding]) # Reduce over the sequence axis to produce encodings of shape # [batch_size, filters]. query_encoding = tf.keras.layers.GlobalAveragePooling1D()( query_seq_encoding) query_value_attention = tf.keras.layers.GlobalAveragePooling1D()( query_value_attention_seq) # Concatenate query and document encodings to produce a DNN input layer. input_layer = tf.keras.layers.Concatenate()( [query_encoding, query_value_attention]) # Add DNN layers, and create Model. # ... ```" 5861,_lower_triangular_mask,tensorflow/tensorflow/python/keras/layers/dense_attention.py,489,function,Creates a lower-triangular boolean mask over the last 2 dimensions. 5862,_merge_masks,tensorflow/tensorflow/python/keras/layers/dense_attention.py,498,function, 5863,BaseDenseAttentionTest,tensorflow/tensorflow/python/keras/layers/dense_attention_test.py,34,class, 5864,AttentionTest,tensorflow/tensorflow/python/keras/layers/dense_attention_test.py,155,class, 5865,AdditiveAttentionTest,tensorflow/tensorflow/python/keras/layers/dense_attention_test.py,472,class, 5866,LowerTriangularMaskTest,tensorflow/tensorflow/python/keras/layers/dense_attention_test.py,718,class, 5867,EinsumDense,tensorflow/tensorflow/python/keras/layers/einsum_dense.py,34,class,"A layer that uses tf.einsum as the backing computation. This layer can perform einsum calculations of arbitrary dimensionality. Arguments: equation: An equation describing the einsum to perform. This equation must be a valid einsum string of the form `ab,bc->ac`, `...ab,bc->...ac`, or `ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum axis expression sequence. output_shape: The expected shape of the output tensor (excluding the batch dimension and any dimensions represented by ellipses). You can specify None for any dimension that is unknown or can be inferred from the input shape. activation: Activation function to use. If you don't specify anything, no activation is applied (that is, a ""linear"" activation: `a(x) = x`). bias_axes: A string containing the output dimension(s) to apply a bias to. Each character in the `bias_axes` string should correspond to a character in the output portion of the `equation` string. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. Examples: **Biased dense layer with einsums** This example shows how to instantiate a standard Keras dense layer using einsum operations. This example is equivalent to `tf.keras.layers.Dense(64, use_bias=True)`. >>> layer = EinsumDense(""ab,bc->ac"", output_shape=64, bias_axes=""c"") >>> input_tensor = tf.keras.Input(shape=[32]) >>> output_tensor = layer(input_tensor) >>> output_tensor <... shape=(None, 64) dtype=...> **Applying a dense layer to a sequence** This example shows how to instantiate a layer that applies the same dense operation to every element in a sequence. Here, the 'output_shape' has two values (since there are two non-batch dimensions in the output); the first dimension in the output_shape is `None`, because the sequence dimension `b` has an unknown shape. >>> layer = EinsumDense(""abc,cd->abd"", ... output_shape=(None, 64), ... bias_axes=""d"") >>> input_tensor = tf.keras.Input(shape=[32, 128]) >>> output_tensor = layer(input_tensor) >>> output_tensor <... shape=(None, 32, 64) dtype=...> **Applying a dense layer to a sequence using ellipses** This example shows how to instantiate a layer that applies the same dense operation to every element in a sequence, but uses the ellipsis notation instead of specifying the batch and sequence dimensions. Because we are using ellipsis notation and have specified only one axis, the output_shape arg is a single value. When instantiated in this way, the layer can handle any number of sequence dimensions - including the case where no sequence dimension exists. >>> layer = EinsumDense(""...x,xy->...y"", output_shape=64, bias_axes=""y"") >>> input_tensor = tf.keras.Input(shape=[32, 128]) >>> output_tensor = layer(input_tensor) >>> output_tensor <... shape=(None, 32, 64) dtype=...>" 5868,_analyze_einsum_string,tensorflow/tensorflow/python/keras/layers/einsum_dense.py,209,function,Analyzes an einsum string to determine the required weight shape. 5869,_analyze_split_string,tensorflow/tensorflow/python/keras/layers/einsum_dense.py,240,function,Analyze an pre-split einsum string to find the weight shape. 5870,TestEinsumDenseLayer,tensorflow/tensorflow/python/keras/layers/einsum_dense_test.py,225,class, 5871,TestEinsumLayerAPI,tensorflow/tensorflow/python/keras/layers/einsum_dense_test.py,258,class, 5872,Embedding,tensorflow/tensorflow/python/keras/layers/embeddings.py,36,class,"Turns positive integers (indexes) into dense vectors of fixed size. e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]` This layer can only be used as the first layer in a model. Example: >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Embedding(1000, 64, input_length=10)) >>> # The model will take as input an integer matrix of size (batch, >>> # input_length), and the largest integer (i.e. word index) in the input >>> # should be no larger than 999 (vocabulary size). >>> # Now model.output_shape is (None, 10, 64), where `None` is the batch >>> # dimension. >>> input_array = np.random.randint(1000, size=(32, 10)) >>> model.compile('rmsprop', 'mse') >>> output_array = model.predict(input_array) >>> print(output_array.shape) (32, 10, 64) Arguments: input_dim: Integer. Size of the vocabulary, i.e. maximum integer index + 1. output_dim: Integer. Dimension of the dense embedding. embeddings_initializer: Initializer for the `embeddings` matrix (see `keras.initializers`). embeddings_regularizer: Regularizer function applied to the `embeddings` matrix (see `keras.regularizers`). embeddings_constraint: Constraint function applied to the `embeddings` matrix (see `keras.constraints`). mask_zero: Boolean, whether or not the input value 0 is a special ""padding"" value that should be masked out. This is useful when using recurrent layers which may take variable length input. If this is `True`, then all subsequent layers in the model need to support masking or an exception will be raised. If mask_zero is set to True, as a consequence, index 0 cannot be used in the vocabulary (input_dim should equal size of vocabulary + 1). input_length: Length of input sequences, when it is constant. This argument is required if you are going to connect `Flatten` then `Dense` layers upstream (without it, the shape of the dense outputs cannot be computed). Input shape: 2D tensor with shape: `(batch_size, input_length)`. Output shape: 3D tensor with shape: `(batch_size, input_length, output_dim)`." 5873,EmbeddingTest,tensorflow/tensorflow/python/keras/layers/embeddings_test.py,37,class, 5874,GRULayerTest,tensorflow/tensorflow/python/keras/layers/gru_test.py,38,class, 5875,GRULayerGenericTest,tensorflow/tensorflow/python/keras/layers/gru_test.py,230,class, 5876,GRUV2Test,tensorflow/tensorflow/python/keras/layers/gru_v2_test.py,63,class, 5877,GRULayerGradientTapeTest,tensorflow/tensorflow/python/keras/layers/gru_v2_test.py,644,class, 5878,GRUGraphRewriteTest,tensorflow/tensorflow/python/keras/layers/gru_v2_test.py,672,class, 5879,RandomFourierFeatures,tensorflow/tensorflow/python/keras/layers/kernelized.py,40,class,"Layer that projects its inputs into a random feature space. This layer implements a mapping from input space to a space with `output_dim` dimensions, which approximates shift-invariant kernels. A kernel function `K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for some function `k`. Many popular Radial Basis Functions (RBF), including Gaussian and Laplacian kernels, are shift-invariant. The implementation of this layer is based on the following paper: [""Random Features for Large-Scale Kernel Machines""]( https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf) by Ali Rahimi and Ben Recht. The distribution from which the parameters of the random features map (layer) are sampled determines which shift-invariant kernel the layer approximates (see paper for more details). You can use the distribution of your choice. The layer supports out-of-the-box approximation sof the following two RBF kernels: - Gaussian: `K(x, y) == exp(- square(x - y) / (2 * square(scale)))` - Laplacian: `K(x, y) = exp(-abs(x - y) / scale))` **Note:** Unlike what is described in the paper and unlike what is used in the Scikit-Learn implementation, the output of this layer does not apply the `sqrt(2 / D)` normalization factor. **Usage:** Typically, this layer is used to ""kernelize"" linear models by applying a non-linear transformation (this layer) to the input features and then training a linear model on top of the transformed features. Depending on the loss function of the linear model, the composition of this layer and the linear model results to models that are equivalent (up to approximation) to kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss), kernel linear regression (for squared loss), etc. Examples: A kernel multinomial logistic regression model with Gaussian kernel for MNIST: ```python model = keras.Sequential([ keras.Input(shape=(784,)), RandomFourierFeatures( output_dim=4096, scale=10., kernel_initializer='gaussian'), layers.Dense(units=10, activation='softmax'), ]) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'] ) ``` A quasi-SVM classifier for MNIST: ```python model = keras.Sequential([ keras.Input(shape=(784,)), RandomFourierFeatures( output_dim=4096, scale=10., kernel_initializer='gaussian'), layers.Dense(units=10), ]) model.compile( optimizer='adam', loss='hinge', metrics=['categorical_accuracy'] ) ``` To use another kernel, just replace the layer creation line with: ```python random_features_layer = RandomFourierFeatures( output_dim=500, kernel_initializer=, scale=..., ...) ``` Arguments: output_dim: Positive integer, the dimension of the layer's output, i.e., the number of random features used to approximate the kernel. kernel_initializer: Determines the distribution of the parameters of the random features map (and therefore the kernel approximated by the layer). It can be either a string identifier or a Keras `Initializer` instance. Currently only 'gaussian' and 'laplacian' are supported string identifiers (case insensitive). Note that the kernel matrix is not trainable. scale: For Gaussian and Laplacian kernels, this corresponds to a scaling factor of the corresponding kernel approximated by the layer (see concrete definitions above). When provided, it should be a positive float. If None, a default value is used: if the kernel initializer is set to ""gaussian"", `scale` defaults to `sqrt(input_dim / 2)`, otherwise, it defaults to 1.0. Both the approximation error of the kernel and the classification quality are sensitive to this parameter. If `trainable` is set to `True`, this parameter is learned end-to-end during training and the provided value serves as the initial value. **Note:** When features from this layer are fed to a linear model, by making `scale` trainable, the resulting optimization problem is no longer convex (even if the loss function used by the linear model is convex). trainable: Whether the scaling parameter of the layer should be trainable. Defaults to `False`. name: String, name to use for this layer." 5880,_get_random_features_initializer,tensorflow/tensorflow/python/keras/layers/kernelized.py,250,function,Returns Initializer object for random features. 5881,_get_default_scale,tensorflow/tensorflow/python/keras/layers/kernelized.py,273,function, 5882,_exact_gaussian,tensorflow/tensorflow/python/keras/layers/kernelized_test.py,53,function, 5883,_exact_laplacian,tensorflow/tensorflow/python/keras/layers/kernelized_test.py,58,function, 5884,RandomFourierFeaturesTest,tensorflow/tensorflow/python/keras/layers/kernelized_test.py,64,class, 5885,LayersTest,tensorflow/tensorflow/python/keras/layers/layers_test.py,27,class, 5886,LocallyConnected1D,tensorflow/tensorflow/python/keras/layers/local.py,36,class,"Locally-connected layer for 1D inputs. The `LocallyConnected1D` layer works similarly to the `Conv1D` layer, except that weights are unshared, that is, a different set of filters is applied at each different patch of the input. Note: layer attributes cannot be modified after the layer has been called once (except the `trainable` attribute). Example: ```python # apply a unshared weight convolution 1d of length 3 to a sequence with # 10 timesteps, with 64 output filters model = Sequential() model.add(LocallyConnected1D(64, 3, input_shape=(10, 32))) # now model.output_shape == (None, 8, 64) # add a new conv1d on top model.add(LocallyConnected1D(32, 3)) # now model.output_shape == (None, 6, 32) ``` Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: Currently only supports `""valid""` (case-insensitive). `""same""` may be supported in the future. `""valid""` means no padding. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". activation: Activation function to use. If you don't specify anything, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"").. kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. implementation: implementation mode, either `1`, `2`, or `3`. `1` loops over input spatial locations to perform the forward pass. It is memory-efficient but performs a lot of (small) ops. `2` stores layer weights in a dense but sparsely-populated 2D matrix and implements the forward pass as a single matrix-multiply. It uses a lot of RAM but performs few (large) ops. `3` stores layer weights in a sparse tensor and implements the forward pass as a single sparse matrix-multiply. How to choose: `1`: large, dense models, `2`: small models, `3`: large, sparse models, where ""large"" stands for large input/output activations (i.e. many `filters`, `input_filters`, large `input_size`, `output_size`), and ""sparse"" stands for few connections between inputs and outputs, i.e. small ratio `filters * input_filters * kernel_size / (input_size * strides)`, where inputs to and outputs of the layer are assumed to have shapes `(input_size, input_filters)`, `(output_size, filters)` respectively. It is recommended to benchmark each in the setting of interest to pick the most efficient one (in terms of speed and memory usage). Correct choice of implementation can lead to dramatic speed improvements (e.g. 50X), potentially at the expense of RAM. Also, only `padding=""valid""` is supported by `implementation=1`. Input shape: 3D tensor with shape: `(batch_size, steps, input_dim)` Output shape: 3D tensor with shape: `(batch_size, new_steps, filters)` `steps` value might have changed due to padding or strides." 5887,LocallyConnected2D,tensorflow/tensorflow/python/keras/layers/local.py,339,class,"Locally-connected layer for 2D inputs. The `LocallyConnected2D` layer works similarly to the `Conv2D` layer, except that weights are unshared, that is, a different set of filters is applied at each different patch of the input. Note: layer attributes cannot be modified after the layer has been called once (except the `trainable` attribute). Examples: ```python # apply a 3x3 unshared weights convolution with 64 output filters on a 32x32 image # with `data_format=""channels_last""`: model = Sequential() model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3))) # now model.output_shape == (None, 30, 30, 64) # notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64 parameters # add a 3x3 unshared weights convolution on top, with 32 output filters: model.add(LocallyConnected2D(32, (3, 3))) # now model.output_shape == (None, 28, 28, 32) ``` Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. padding: Currently only support `""valid""` (case-insensitive). `""same""` will be supported in future. `""valid""` means no padding. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". activation: Activation function to use. If you don't specify anything, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation""). kernel_constraint: Constraint function applied to the kernel matrix. bias_constraint: Constraint function applied to the bias vector. implementation: implementation mode, either `1`, `2`, or `3`. `1` loops over input spatial locations to perform the forward pass. It is memory-efficient but performs a lot of (small) ops. `2` stores layer weights in a dense but sparsely-populated 2D matrix and implements the forward pass as a single matrix-multiply. It uses a lot of RAM but performs few (large) ops. `3` stores layer weights in a sparse tensor and implements the forward pass as a single sparse matrix-multiply. How to choose: `1`: large, dense models, `2`: small models, `3`: large, sparse models, where ""large"" stands for large input/output activations (i.e. many `filters`, `input_filters`, large `np.prod(input_size)`, `np.prod(output_size)`), and ""sparse"" stands for few connections between inputs and outputs, i.e. small ratio `filters * input_filters * np.prod(kernel_size) / (np.prod(input_size) * np.prod(strides))`, where inputs to and outputs of the layer are assumed to have shapes `input_size + (input_filters,)`, `output_size + (filters,)` respectively. It is recommended to benchmark each in the setting of interest to pick the most efficient one (in terms of speed and memory usage). Correct choice of implementation can lead to dramatic speed improvements (e.g. 50X), potentially at the expense of RAM. Also, only `padding=""valid""` is supported by `implementation=1`. Input shape: 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding." 5888,get_locallyconnected_mask,tensorflow/tensorflow/python/keras/layers/local.py,666,function,"Return a mask representing connectivity of a locally-connected operation. This method returns a masking numpy array of 0s and 1s (of type `np.float32`) that, when element-wise multiplied with a fully-connected weight tensor, masks out the weights between disconnected input-output pairs and thus implements local connectivity through a sparse fully-connected weight tensor. Assume an unshared convolution with given parameters is applied to an input having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an output with spatial shape `(d_out1, ..., d_outN)` (determined by layer parameters such as `strides`). This method returns a mask which can be broadcast-multiplied (element-wise) with a 2*(N+1)-D weight matrix (equivalent to a fully-connected layer between (N+1)-D activations (N spatial + 1 channel dimensions for input and output) to make it perform an unshared convolution with given `kernel_shape`, `strides`, `padding` and `data_format`. Arguments: input_shape: tuple of size N: `(d_in1, ..., d_inN)` spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `""same""` or `""valid""`. data_format: a string, `""channels_first""` or `""channels_last""`. Returns: a `np.float32`-type `np.ndarray` of shape `(1, d_in1, ..., d_inN, 1, d_out1, ..., d_outN)` if `data_format == `""channels_first""`, or `(d_in1, ..., d_inN, 1, d_out1, ..., d_outN, 1)` if `data_format == ""channels_last""`. Raises: ValueError: if `data_format` is neither `""channels_first""` nor `""channels_last""`." 5889,local_conv_matmul,tensorflow/tensorflow/python/keras/layers/local.py,729,function,"Apply N-D convolution with un-shared weights using a single matmul call. This method outputs `inputs . (kernel * kernel_mask)` (with `.` standing for matrix-multiply and `*` for element-wise multiply) and requires a precomputed `kernel_mask` to zero-out weights in `kernel` and hence perform the same operation as a convolution with un-shared (the remaining entries in `kernel`) weights. It also does the necessary reshapes to make `inputs` and `kernel` 2-D and `output` (N+2)-D. Arguments: inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ..., d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`. kernel: the unshared weights for N-D convolution, an (N+2)-D tensor of shape: `(d_in1, ..., d_inN, channels_in, d_out2, ..., d_outN, channels_out)` or `(channels_in, d_in1, ..., d_inN, channels_out, d_out2, ..., d_outN)`, with the ordering of channels and spatial dimensions matching that of the input. Each entry is the weight between a particular input and output location, similarly to a fully-connected weight matrix. kernel_mask: a float 0/1 mask tensor of shape: `(d_in1, ..., d_inN, 1, d_out2, ..., d_outN, 1)` or `(1, d_in1, ..., d_inN, 1, d_out2, ..., d_outN)`, with the ordering of singleton and spatial dimensions matching that of the input. Mask represents the connectivity pattern of the layer and is precomputed elsewhere based on layer parameters: stride, padding, and the receptive field shape. output_shape: a tuple of (N+2) elements representing the output shape: `(batch_size, channels_out, d_out1, ..., d_outN)` or `(batch_size, d_out1, ..., d_outN, channels_out)`, with the ordering of channels and spatial dimensions matching that of the input. Returns: Output (N+2)-D tensor with shape `output_shape`." 5890,local_conv_sparse_matmul,tensorflow/tensorflow/python/keras/layers/local.py,783,function,"Apply N-D convolution with un-shared weights using a single sparse matmul. This method outputs `inputs . tf.sparse.SparseTensor(indices=kernel_idxs, values=kernel, dense_shape=kernel_shape)`, with `.` standing for matrix-multiply. It also reshapes `inputs` to 2-D and `output` to (N+2)-D. Arguments: inputs: (N+2)-D tensor with shape `(batch_size, channels_in, d_in1, ..., d_inN)` or `(batch_size, d_in1, ..., d_inN, channels_in)`. kernel: a 1-D tensor with shape `(len(kernel_idxs),)` containing all the weights of the layer. kernel_idxs: a list of integer tuples representing indices in a sparse matrix performing the un-shared convolution as a matrix-multiply. kernel_shape: a tuple `(input_size, output_size)`, where `input_size = channels_in * d_in1 * ... * d_inN` and `output_size = channels_out * d_out1 * ... * d_outN`. output_shape: a tuple of (N+2) elements representing the output shape: `(batch_size, channels_out, d_out1, ..., d_outN)` or `(batch_size, d_out1, ..., d_outN, channels_out)`, with the ordering of channels and spatial dimensions matching that of the input. Returns: Output (N+2)-D dense tensor with shape `output_shape`." 5891,make_2d,tensorflow/tensorflow/python/keras/layers/local.py,821,function,"Reshapes an N-dimensional tensor into a 2D tensor. Dimensions before (excluding) and after (including) `split_dim` are grouped together. Arguments: tensor: a tensor of shape `(d0, ..., d(N-1))`. split_dim: an integer from 1 to N-1, index of the dimension to group dimensions before (excluding) and after (including). Returns: Tensor of shape `(d0 * ... * d(split_dim-1), d(split_dim) * ... * d(N-1))`." 5892,LocallyConnected1DLayersTest,tensorflow/tensorflow/python/keras/layers/local_test.py,84,class, 5893,LocallyConnected2DLayersTest,tensorflow/tensorflow/python/keras/layers/local_test.py,163,class, 5894,LocallyConnectedImplementationModeTest,tensorflow/tensorflow/python/keras/layers/local_test.py,270,class, 5895,get_inputs,tensorflow/tensorflow/python/keras/layers/local_test.py,402,function, 5896,xent,tensorflow/tensorflow/python/keras/layers/local_test.py,423,function, 5897,get_model,tensorflow/tensorflow/python/keras/layers/local_test.py,433,function, 5898,copy_lc_weights_2_to_1,tensorflow/tensorflow/python/keras/layers/local_test.py,471,function, 5899,copy_lc_weights_2_to_3,tensorflow/tensorflow/python/keras/layers/local_test.py,512,function, 5900,copy_model_weights,tensorflow/tensorflow/python/keras/layers/local_test.py,529,function, 5901,LSTMLayerTest,tensorflow/tensorflow/python/keras/layers/lstm_test.py,37,class, 5902,LSTMV2Test,tensorflow/tensorflow/python/keras/layers/lstm_v2_test.py,64,class, 5903,LSTMGraphRewriteTest,tensorflow/tensorflow/python/keras/layers/lstm_v2_test.py,846,class, 5904,LSTMPerformanceTest,tensorflow/tensorflow/python/keras/layers/lstm_v2_test.py,987,class, 5905,_Merge,tensorflow/tensorflow/python/keras/layers/merge.py,33,class,"Generic merge layer for elementwise merge functions. Used to implement `Sum`, `Average`, etc." 5906,Add,tensorflow/tensorflow/python/keras/layers/merge.py,221,class,"Layer that adds a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). Examples: >>> input_shape = (2, 3, 4) >>> x1 = tf.random.normal(input_shape) >>> x2 = tf.random.normal(input_shape) >>> y = tf.keras.layers.Add()([x1, x2]) >>> print(y.shape) (2, 3, 4) Used in a functional model: >>> input1 = tf.keras.layers.Input(shape=(16,)) >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) >>> input2 = tf.keras.layers.Input(shape=(32,)) >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) >>> # equivalent to `added = tf.keras.layers.add([x1, x2])` >>> added = tf.keras.layers.Add()([x1, x2]) >>> out = tf.keras.layers.Dense(4)(added) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)" 5907,Subtract,tensorflow/tensorflow/python/keras/layers/merge.py,258,class,"Layer that subtracts two inputs. It takes as input a list of tensors of size 2, both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]), also of the same shape. Examples: ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) # Equivalent to subtracted = keras.layers.subtract([x1, x2]) subtracted = keras.layers.Subtract()([x1, x2]) out = keras.layers.Dense(4)(subtracted) model = keras.models.Model(inputs=[input1, input2], outputs=out) ```" 5908,Multiply,tensorflow/tensorflow/python/keras/layers/merge.py,297,class,"Layer that multiplies (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). >>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1), ... np.arange(5, 10).reshape(5, 1)]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> multiplied = tf.keras.layers.Multiply()([x1, x2]) >>> multiplied.shape TensorShape([5, 8])" 5909,Average,tensorflow/tensorflow/python/keras/layers/merge.py,327,class,"Layer that averages a list of inputs element-wise. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). Example: >>> x1 = np.ones((2, 2)) >>> x2 = np.zeros((2, 2)) >>> y = tf.keras.layers.Average()([x1, x2]) >>> y.numpy().tolist() [[0.5, 0.5], [0.5, 0.5]] Usage in a functional model: >>> input1 = tf.keras.layers.Input(shape=(16,)) >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) >>> input2 = tf.keras.layers.Input(shape=(32,)) >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) >>> avg = tf.keras.layers.Average()([x1, x2]) >>> out = tf.keras.layers.Dense(4)(avg) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) Raises: ValueError: If there is a shape mismatch between the inputs and the shapes cannot be broadcasted to match." 5910,Maximum,tensorflow/tensorflow/python/keras/layers/merge.py,364,class,"Layer that computes the maximum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). >>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1), ... np.arange(5, 10).reshape(5, 1)]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> maxed = tf.keras.layers.Maximum()([x1, x2]) >>> maxed.shape TensorShape([5, 8])" 5911,Minimum,tensorflow/tensorflow/python/keras/layers/merge.py,394,class,"Layer that computes the minimum (element-wise) a list of inputs. It takes as input a list of tensors, all of the same shape, and returns a single tensor (also of the same shape). >>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1), ... np.arange(5, 10).reshape(5, 1)]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> minned = tf.keras.layers.Minimum()([x1, x2]) >>> minned.shape TensorShape([5, 8])" 5912,Concatenate,tensorflow/tensorflow/python/keras/layers/merge.py,424,class,"Layer that concatenates a list of inputs. It takes as input a list of tensors, all of the same shape except for the concatenation axis, and returns a single tensor that is the concatenation of all inputs. >>> x = np.arange(20).reshape(2, 2, 5) >>> print(x) [[[ 0 1 2 3 4] [ 5 6 7 8 9]] [[10 11 12 13 14] [15 16 17 18 19]]] >>> y = np.arange(20, 30).reshape(2, 1, 5) >>> print(y) [[[20 21 22 23 24]] [[25 26 27 28 29]]] >>> tf.keras.layers.Concatenate(axis=1)([x, y]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> concatted = tf.keras.layers.Concatenate()([x1, x2]) >>> concatted.shape TensorShape([5, 16])" 5913,Dot,tensorflow/tensorflow/python/keras/layers/merge.py,579,class,"Layer that computes a dot product between samples in two tensors. E.g. if applied to a list of two tensors `a` and `b` of shape `(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)` where each entry `i` will be the dot product between `a[i]` and `b[i]`. >>> x = np.arange(10).reshape(1, 5, 2) >>> print(x) [[[0 1] [2 3] [4 5] [6 7] [8 9]]] >>> y = np.arange(10, 20).reshape(1, 2, 5) >>> print(y) [[[10 11 12 13 14] [15 16 17 18 19]]] >>> tf.keras.layers.Dot(axes=(1, 2))([x, y]) >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) >>> dotted = tf.keras.layers.Dot(axes=1)([x1, x2]) >>> dotted.shape TensorShape([5, 1])" 5914,add,tensorflow/tensorflow/python/keras/layers/merge.py,741,function,"Functional interface to the `tf.keras.layers.Add` layer. Arguments: inputs: A list of input tensors (at least 2) with the same shape. **kwargs: Standard layer keyword arguments. Returns: A tensor as the sum of the inputs. It has the same shape as the inputs. Examples: >>> input_shape = (2, 3, 4) >>> x1 = tf.random.normal(input_shape) >>> x2 = tf.random.normal(input_shape) >>> y = tf.keras.layers.add([x1, x2]) >>> print(y.shape) (2, 3, 4) Used in a functiona model: >>> input1 = tf.keras.layers.Input(shape=(16,)) >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) >>> input2 = tf.keras.layers.Input(shape=(32,)) >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) >>> added = tf.keras.layers.add([x1, x2]) >>> out = tf.keras.layers.Dense(4)(added) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)" 5915,subtract,tensorflow/tensorflow/python/keras/layers/merge.py,775,function,"Functional interface to the `Subtract` layer. Arguments: inputs: A list of input tensors (exactly 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the difference of the inputs. Examples: ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) subtracted = keras.layers.subtract([x1, x2]) out = keras.layers.Dense(4)(subtracted) model = keras.models.Model(inputs=[input1, input2], outputs=out) ```" 5916,multiply,tensorflow/tensorflow/python/keras/layers/merge.py,804,function,"Functional interface to the `Multiply` layer. Arguments: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the element-wise product of the inputs." 5917,average,tensorflow/tensorflow/python/keras/layers/merge.py,818,function,"Functional interface to the `tf.keras.layers.Average` layer. Example: >>> x1 = np.ones((2, 2)) >>> x2 = np.zeros((2, 2)) >>> y = tf.keras.layers.Average()([x1, x2]) >>> y.numpy().tolist() [[0.5, 0.5], [0.5, 0.5]] Usage in a functional model: >>> input1 = tf.keras.layers.Input(shape=(16,)) >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) >>> input2 = tf.keras.layers.Input(shape=(32,)) >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) >>> avg = tf.keras.layers.Average()([x1, x2]) >>> out = tf.keras.layers.Dense(4)(avg) >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) Arguments: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the average of the inputs. Raises: ValueError: If there is a shape mismatch between the inputs and the shapes cannot be broadcasted to match." 5918,maximum,tensorflow/tensorflow/python/keras/layers/merge.py,854,function,"Functional interface to compute maximum (element-wise) list of `inputs`. This is equivalent to the `tf.keras.layers.Maximum` layer. For example: ```python input1 = tf.keras.layers.Input(shape=(16,)) x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8) input2 = tf.keras.layers.Input(shape=(32,)) x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8) max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8) out = tf.keras.layers.Dense(4)(max_inp) model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) ``` Arguments: inputs: A list of input tensors (at least 2) of same shape. **kwargs: Standard layer keyword arguments. Returns: A tensor (of same shape as input tensor) with the element-wise maximum of the inputs. Raises: ValueError: If input tensors are of different shape." 5919,minimum,tensorflow/tensorflow/python/keras/layers/merge.py,886,function,"Functional interface to the `Minimum` layer. Arguments: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the element-wise minimum of the inputs." 5920,concatenate,tensorflow/tensorflow/python/keras/layers/merge.py,900,function,"Functional interface to the `Concatenate` layer. >>> x = np.arange(20).reshape(2, 2, 5) >>> print(x) [[[ 0 1 2 3 4] [ 5 6 7 8 9]] [[10 11 12 13 14] [15 16 17 18 19]]] >>> y = np.arange(20, 30).reshape(2, 1, 5) >>> print(y) [[[20 21 22 23 24]] [[25 26 27 28 29]]] >>> tf.keras.layers.concatenate([x, y], ... axis=1) Arguments: inputs: A list of input tensors (at least 2). axis: Concatenation axis. **kwargs: Standard layer keyword arguments. Returns: A tensor, the concatenation of the inputs alongside axis `axis`." 5921,dot,tensorflow/tensorflow/python/keras/layers/merge.py,935,function,"Functional interface to the `Dot` layer. Arguments: inputs: A list of input tensors (at least 2). axes: Integer or tuple of integers, axis or axes along which to take the dot product. normalize: Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to True, then the output of the dot product is the cosine proximity between the two samples. **kwargs: Standard layer keyword arguments. Returns: A tensor, the dot product of the samples from the inputs." 5922,MergeLayersTest,tensorflow/tensorflow/python/keras/layers/merge_test.py,36,class, 5923,MergeLayersTestNoExecution,tensorflow/tensorflow/python/keras/layers/merge_test.py,266,class, 5924,GaussianNoise,tensorflow/tensorflow/python/keras/layers/noise.py,32,class,"Apply additive zero-centered Gaussian noise. This is useful to mitigate overfitting (you could see it as a form of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs. As it is a regularization layer, it is only active at training time. Arguments: stddev: Float, standard deviation of the noise distribution. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding noise) or in inference mode (doing nothing). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input." 5925,GaussianDropout,tensorflow/tensorflow/python/keras/layers/noise.py,86,class,"Apply multiplicative 1-centered Gaussian noise. As it is a regularization layer, it is only active at training time. Arguments: rate: Float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input." 5926,AlphaDropout,tensorflow/tensorflow/python/keras/layers/noise.py,140,class,"Applies Alpha Dropout to the input. Alpha Dropout is a `Dropout` that keeps mean and variance of inputs to their original values, in order to ensure the self-normalizing property even after this dropout. Alpha Dropout fits well to Scaled Exponential Linear Units by randomly setting activations to the negative saturation value. Arguments: rate: float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. seed: A Python integer to use as random seed. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input." 5927,NoiseLayersTest,tensorflow/tensorflow/python/keras/layers/noise_test.py,31,class, 5928,BatchNormalizationBase,tensorflow/tensorflow/python/keras/layers/normalization.py,43,class,"Normalize and scale inputs or activations. Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. Batch normalization differs from other layers in several key aspects: 1) Adding BatchNormalization with `training=True` to a model causes the result of one example to depend on the contents of all other examples in a minibatch. Be careful when padding batches or masking examples, as these can change the minibatch statistics and affect other examples. 2) Updates to the weights (moving statistics) are based on the forward pass of a model rather than the result of gradient computations. 3) When performing inference using a model containing batch normalization, it is generally (though not always) desirable to use accumulated statistics rather than mini-batch statistics. This is accomplished by passing `training=False` when calling the model, or using `model.predict`. Arguments: axis: Integer, the axis that should be normalized (typically the features axis). For instance, after a `Conv2D` layer with `data_format=""channels_first""`, set `axis=1` in `BatchNormalization`. momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling will be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. renorm: Whether to use [Batch Renormalization]( https://arxiv.org/abs/1702.03275). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. fused: if `True`, use a faster, fused implementation, or raise a ValueError if the fused implementation cannot be used. If `None`, use the faster implementation if possible. If False, do not used the fused implementation. trainable: Boolean, if `True` the variables will be marked as trainable. virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`, which means batch normalization is performed across the whole batch. When `virtual_batch_size` is not `None`, instead perform ""Ghost Batch Normalization"", which creates virtual sub-batches which are each normalized separately (with shared gamma, beta, and moving statistics). Must divide the actual batch size during execution. adjustment: A function taking the `Tensor` containing the (dynamic) shape of the input tensor and returning a pair (scale, bias) to apply to the normalized values (before gamma and beta), only during training. For example, if axis==-1, `adjustment = lambda shape: ( tf.random.uniform(shape[-1:], 0.93, 1.07), tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized value by up to 7% up or down, then shift the result by up to 0.1 (with independent scaling and bias for each feature but shared across all examples), and finally apply gamma and/or beta. If `None`, no adjustment is applied. Cannot be specified if virtual_batch_size is specified. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. - `training=True`: The layer will normalize its inputs using the mean and variance of the current batch of inputs. - `training=False`: The layer will normalize its inputs using the mean and variance of its moving statistics, learned during training. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. {{TRAINABLE_ATTRIBUTE_NOTE}} Normalization equations: Consider the intermediate activations \(x\) of a mini-batch of size \\(m\\): We can compute the mean and variance of the batch \\({\mu_B} = \frac{1}{m} \sum_{i=1}^{m} {x_i}\\) \\({\sigma_B^2} = \frac{1}{m} \sum_{i=1}^{m} ({x_i} - {\mu_B})^2\\) and then compute a normalized \\(x\\), including a small factor \\({\epsilon}\\) for numerical stability. \\(\hat{x_i} = \frac{x_i - \mu_B}{\sqrt{\sigma_B^2 + \epsilon}}\\) And finally \\(\hat{x}\) is linearly transformed by \({\gamma}\\) and \\({\beta}\\), which are learned parameters: \\({y_i} = {\gamma * \hat{x_i} + \beta}\\) Reference: - [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167)." 5929,replace_in_base_docstring,tensorflow/tensorflow/python/keras/layers/normalization.py,925,function, 5930,BatchNormalization,tensorflow/tensorflow/python/keras/layers/normalization.py,934,class, 5931,LayerNormalization,tensorflow/tensorflow/python/keras/layers/normalization.py,949,class,"Layer normalization layer (Ba et al., 2016). Normalize the activations of the previous layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1. Given a tensor `inputs`, moments are calculated and normalization is performed across the axes specified in `axis`. Example: >>> data = tf.constant(np.arange(10).reshape(5, 2) * 10, dtype=tf.float32) >>> print(data) tf.Tensor( [[ 0. 10.] [20. 30.] [40. 50.] [60. 70.] [80. 90.]], shape=(5, 2), dtype=float32) >>> layer = tf.keras.layers.LayerNormalization(axis=1) >>> output = layer(data) >>> print(output) tf.Tensor( [[-1. 1.] [-1. 1.] [-1. 1.] [-1. 1.] [-1. 1.]], shape=(5, 2), dtype=float32) Notice that with Layer Normalization the normalization happens across the axes *within* each example, rather than across different examples in the batch. If `scale` or `center` are enabled, the layer will scale the normalized outputs by broadcasting them with a trainable variable `gamma`, and center the outputs by broadcasting with a trainable variable `beta`. `gamma` will default to a ones tensor and `beta` will default to a zeros tensor, so that centering and scaling are no-ops before training has begun. So, with scaling and centering enabled the normalization equations are as follows: Let the intermediate activations for a mini-batch to be the `inputs`. For each sample `x_i` in `inputs` with `k` features, we compute the mean and variance of the sample: ```python mean_i = sum(x_i[j] for j in range(k)) / k var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k ``` and then compute a normalized `x_i_normalized`, including a small factor `epsilon` for numerical stability. ```python x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon) ``` And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`, which are learned parameters: ```python output_i = x_i_normalized * gamma + beta ``` `gamma` and `beta` will span the axes of `inputs` specified in `axis`, and this part of the inputs' shape must be fully defined. For example: >>> layer = tf.keras.layers.LayerNormalization(axis=[1, 2, 3]) >>> layer.build([5, 20, 30, 40]) >>> print(layer.beta.shape) (20, 30, 40) >>> print(layer.gamma.shape) (20, 30, 40) Note that other implementations of layer normalization may choose to define `gamma` and `beta` over a separate set of axes from the axes being normalized across. For example, Group Normalization ([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1 corresponds to a Layer Normalization that normalizes across height, width, and channel and has `gamma` and `beta` span only the channel dimension. So, this Layer Normalization implementation will not match a Group Normalization layer with group size set to 1. Arguments: axis: Integer or List/Tuple. The axis or axes to normalize across. Typically this is the features axis/axes. The left-out axes are typically the batch axis/axes. This argument defaults to `-1`, the last dimension in the input. epsilon: Small float added to variance to avoid dividing by zero. Defaults to 1e-3 center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. Defaults to True. scale: If True, multiply by `gamma`. If False, `gamma` is not used. Defaults to True. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling will be done by the next layer. beta_initializer: Initializer for the beta weight. Defaults to zeros. gamma_initializer: Initializer for the gamma weight. Defaults to ones. beta_regularizer: Optional regularizer for the beta weight. None by default. gamma_regularizer: Optional regularizer for the gamma weight. None by default. beta_constraint: Optional constraint for the beta weight. None by default. gamma_constraint: Optional constraint for the gamma weight. None by default. trainable: Boolean, if `True` the variables will be marked as trainable. Defaults to True. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. Reference: - [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450)." 5932,BatchNormalizationTest,tensorflow/tensorflow/python/keras/layers/normalization_test.py,44,class, 5933,BatchNormalizationV1Test,tensorflow/tensorflow/python/keras/layers/normalization_test.py,238,class, 5934,BatchNormalizationV2Test,tensorflow/tensorflow/python/keras/layers/normalization_test.py,260,class, 5935,_run_batchnorm_correctness_test,tensorflow/tensorflow/python/keras/layers/normalization_test.py,348,function, 5936,NormalizationLayersGraphModeOnlyTest,tensorflow/tensorflow/python/keras/layers/normalization_test.py,375,class, 5937,_run_layernorm_correctness_test,tensorflow/tensorflow/python/keras/layers/normalization_test.py,474,function, 5938,LayerNormalizationTest,tensorflow/tensorflow/python/keras/layers/normalization_test.py,496,class, 5939,LayerNormalizationNumericsTest,tensorflow/tensorflow/python/keras/layers/normalization_test.py,619,class,Tests LayerNormalization has correct and numerically stable outputs. 5940,SyncBatchNormalization,tensorflow/tensorflow/python/keras/layers/normalization_v2.py,32,class,"Normalize and scale inputs or activations synchronously across replicas. Applies batch normalization to activations of the previous layer at each batch by synchronizing the global batch statistics across all devices that are training the model. For specific details about batch normalization please refer to the `tf.keras.layers.BatchNormalization` layer docs. If this layer is used when using tf.distribute strategy to train models across devices/workers, there will be an allreduce call to aggregate batch statistics across all replicas at every training step. Without tf.distribute strategy, this layer behaves as a regular `tf.keras.layers.BatchNormalization` layer. Example usage: ``` strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(16)) model.add(tf.keras.layers.experimental.SyncBatchNormalization()) ``` Arguments: axis: Integer, the axis that should be normalized (typically the features axis). For instance, after a `Conv2D` layer with `data_format=""channels_first""`, set `axis=1` in `BatchNormalization`. momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling will be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: Optional constraint for the beta weight. gamma_constraint: Optional constraint for the gamma weight. renorm: Whether to use [Batch Renormalization]( https://arxiv.org/abs/1702.03275). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. trainable: Boolean, if `True` the variables will be marked as trainable. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. - `training=True`: The layer will normalize its inputs using the mean and variance of the current batch of inputs. - `training=False`: The layer will normalize its inputs using the mean and variance of its moving statistics, learned during training. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input." 5941,BatchNormalization,tensorflow/tensorflow/python/keras/layers/normalization_v2.py,208,class, 5942,Pooling1D,tensorflow/tensorflow/python/keras/layers/pooling.py,34,class,"Pooling layer for arbitrary pooling functions, for 1D inputs. This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. name: A string, the name of the layer." 5943,MaxPooling1D,tensorflow/tensorflow/python/keras/layers/pooling.py,112,class,"Max pooling operation for 1D temporal data. Downsamples the input representation by taking the maximum value over the window defined by `pool_size`. The window is shifted by `strides`. The resulting output when using ""valid"" padding option has a shape of: `output_shape = (input_shape - pool_size + 1) / strides)` The resulting output shape when using the ""same"" padding option is: `output_shape = input_shape / strides` For example, for strides=1 and padding=""valid"": >>> x = tf.constant([1., 2., 3., 4., 5.]) >>> x = tf.reshape(x, [1, 5, 1]) >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2, ... strides=1, padding='valid') >>> max_pool_1d(x) For example, for strides=2 and padding=""valid"": >>> x = tf.constant([1., 2., 3., 4., 5.]) >>> x = tf.reshape(x, [1, 5, 1]) >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2, ... strides=2, padding='valid') >>> max_pool_1d(x) For example, for strides=1 and padding=""same"": >>> x = tf.constant([1., 2., 3., 4., 5.]) >>> x = tf.reshape(x, [1, 5, 1]) >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2, ... strides=1, padding='same') >>> max_pool_1d(x) Arguments: pool_size: Integer, size of the max pooling window. strides: Integer, or None. Specifies how much the pooling window moves for each pooling step. If None, it will default to `pool_size`. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, steps)`. Output shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, downsampled_steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, downsampled_steps)`." 5944,AveragePooling1D,tensorflow/tensorflow/python/keras/layers/pooling.py,204,class,"Average pooling for temporal data. Arguments: pool_size: Integer, size of the average pooling windows. strides: Integer, or None. Factor by which to downscale. E.g. 2 will halve the input. If None, it will default to `pool_size`. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, steps)`. Output shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, downsampled_steps, features)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, features, downsampled_steps)`." 5945,Pooling2D,tensorflow/tensorflow/python/keras/layers/pooling.py,248,class,"Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images). This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer." 5946,MaxPooling2D,tensorflow/tensorflow/python/keras/layers/pooling.py,334,class,"Max pooling operation for 2D spatial data. Downsamples the input representation by taking the maximum value over the window defined by `pool_size` for each dimension along the features axis. The window is shifted by `strides` in each dimension. The resulting output when using ""valid"" padding option has a shape(number of rows or columns) of: `output_shape = (input_shape - pool_size + 1) / strides)` The resulting output shape when using the ""same"" padding option is: `output_shape = input_shape / strides` For example, for stride=(1,1) and padding=""valid"": >>> x = tf.constant([[1., 2., 3.], ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = tf.reshape(x, [1, 3, 3, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding='valid') >>> max_pool_2d(x) For example, for stride=(2,2) and padding=""valid"": >>> x = tf.constant([[1., 2., 3., 4.], ... [5., 6., 7., 8.], ... [9., 10., 11., 12.]]) >>> x = tf.reshape(x, [1, 3, 4, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding='valid') >>> max_pool_2d(x) Usage Example: >>> input_image = tf.constant([[[[1.], [1.], [2.], [4.]], ... [[2.], [2.], [3.], [2.]], ... [[4.], [1.], [1.], [1.]], ... [[2.], [2.], [1.], [4.]]]]) >>> output = tf.constant([[[[1], [0]], ... [[0], [1]]]]) >>> model = tf.keras.models.Sequential() >>> model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... input_shape=(4,4,1))) >>> model.compile('adam', 'mean_squared_error') >>> model.predict(input_image, steps=1) array([[[[2.], [4.]], [[4.], [4.]]]], dtype=float32) For example, for stride=(1,1) and padding=""same"": >>> x = tf.constant([[1., 2., 3.], ... [4., 5., 6.], ... [7., 8., 9.]]) >>> x = tf.reshape(x, [1, 3, 3, 1]) >>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), ... strides=(1, 1), padding='same') >>> max_pool_2d(x) Arguments: pool_size: integer or tuple of 2 integers, window size over which to take the maximum. `(2, 2)` will take the max value over a 2x2 pooling window. If only one integer is specified, the same window length will be used for both dimensions. strides: Integer, tuple of 2 integers, or None. Strides values. Specifies how far the pooling window moves for each pooling step. If None, it will default to `pool_size`. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`. Returns: A tensor of rank 4 representing the maximum pooled values. See above for output shape." 5947,AveragePooling2D,tensorflow/tensorflow/python/keras/layers/pooling.py,470,class,"Average pooling operation for spatial data. Arguments: pool_size: integer or tuple of 2 integers, factors by which to downscale (vertical, horizontal). `(2, 2)` will halve the input in both spatial dimension. If only one integer is specified, the same window length will be used for both dimensions. strides: Integer, tuple of 2 integers, or None. Strides values. If None, it will default to `pool_size`. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`." 5948,Pooling3D,tensorflow/tensorflow/python/keras/layers/pooling.py,522,class,"Pooling layer for arbitrary pooling functions, for 3D inputs. This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer." 5949,MaxPooling3D,tensorflow/tensorflow/python/keras/layers/pooling.py,619,class,"Max pooling operation for 3D data (spatial or spatio-temporal). Arguments: pool_size: Tuple of 3 integers, factors by which to downscale (dim1, dim2, dim3). `(2, 2, 2)` will halve the size of the 3D input in each dimension. strides: tuple of 3 integers, or None. Strides values. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`" 5950,AveragePooling3D,tensorflow/tensorflow/python/keras/layers/pooling.py,672,class,"Average pooling operation for 3D data (spatial or spatio-temporal). Arguments: pool_size: tuple of 3 integers, factors by which to downscale (dim1, dim2, dim3). `(2, 2, 2)` will halve the size of the 3D input in each dimension. strides: tuple of 3 integers, or None. Strides values. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`" 5951,GlobalPooling1D,tensorflow/tensorflow/python/keras/layers/pooling.py,724,class,Abstract class for different global pooling 1D layers. 5952,GlobalAveragePooling1D,tensorflow/tensorflow/python/keras/layers/pooling.py,750,class,"Global average pooling operation for temporal data. Examples: >>> input_shape = (2, 3, 4) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalAveragePooling1D()(x) >>> print(y.shape) (2, 4) Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(batch_size, steps)` indicating whether a given step should be masked (excluded from the average). Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: 2D tensor with shape `(batch_size, features)`." 5953,GlobalMaxPooling1D,tensorflow/tensorflow/python/keras/layers/pooling.py,809,class,"Global max pooling operation for 1D temporal data. Downsamples the input representation by taking the maximum value over the time dimension. For example: >>> x = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) >>> x = tf.reshape(x, [3, 3, 1]) >>> x >>> max_pool_1d = tf.keras.layers.GlobalMaxPooling1D() >>> max_pool_1d(x) Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: 2D tensor with shape `(batch_size, features)`." 5954,GlobalPooling2D,tensorflow/tensorflow/python/keras/layers/pooling.py,857,class,"Abstract class for different global pooling 2D layers. " 5955,GlobalAveragePooling2D,tensorflow/tensorflow/python/keras/layers/pooling.py,884,class,"Global average pooling operation for spatial data. Examples: >>> input_shape = (2, 4, 5, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalAveragePooling2D()(x) >>> print(y.shape) (2, 3) Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: 2D tensor with shape `(batch_size, channels)`." 5956,GlobalMaxPooling2D,tensorflow/tensorflow/python/keras/layers/pooling.py,925,class,"Global max pooling operation for spatial data. Examples: >>> input_shape = (2, 4, 5, 3) >>> x = tf.random.normal(input_shape) >>> y = tf.keras.layers.GlobalMaxPool2D()(x) >>> print(y.shape) (2, 3) Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: - If `data_format='channels_last'`: 4D tensor with shape `(batch_size, rows, cols, channels)`. - If `data_format='channels_first'`: 4D tensor with shape `(batch_size, channels, rows, cols)`. Output shape: 2D tensor with shape `(batch_size, channels)`." 5957,GlobalPooling3D,tensorflow/tensorflow/python/keras/layers/pooling.py,965,class,Abstract class for different global pooling 3D layers. 5958,GlobalAveragePooling3D,tensorflow/tensorflow/python/keras/layers/pooling.py,991,class,"Global Average pooling operation for 3D data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: 2D tensor with shape `(batch_size, channels)`." 5959,GlobalMaxPooling3D,tensorflow/tensorflow/python/keras/layers/pooling.py,1026,class,"Global Max pooling operation for 3D data. Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: 2D tensor with shape `(batch_size, channels)`." 5960,GlobalPoolingTest,tensorflow/tensorflow/python/keras/layers/pooling_test.py,34,class, 5961,Pooling2DTest,tensorflow/tensorflow/python/keras/layers/pooling_test.py,148,class, 5962,Pooling3DTest,tensorflow/tensorflow/python/keras/layers/pooling_test.py,194,class, 5963,Pooling1DTest,tensorflow/tensorflow/python/keras/layers/pooling_test.py,238,class, 5964,StackedRNNCells,tensorflow/tensorflow/python/keras/layers/recurrent.py,59,class,"Wrapper allowing a stack of RNN cells to behave as a single cell. Used to implement efficient stacked RNNs. Arguments: cells: List of RNN cell instances. Examples: ```python batch_size = 3 sentence_max_length = 5 n_features = 2 new_shape = (batch_size, sentence_max_length, n_features) x = tf.constant(np.reshape(np.arange(30), new_shape), dtype = tf.float32) rnn_cells = [tf.keras.layers.LSTMCell(128) for _ in range(2)] stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells) lstm_layer = tf.keras.layers.RNN(stacked_lstm) result = lstm_layer(x) ```" 5965,RNN,tensorflow/tensorflow/python/keras/layers/recurrent.py,204,class,"Base class for recurrent layers. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. Arguments: cell: A RNN cell instance or a list of RNN cell instances. A RNN cell is a class that has: - A `call(input_at_t, states_at_t)` method, returning `(output_at_t, states_at_t_plus_1)`. The call method of the cell can also take the optional argument `constants`, see section ""Note on passing external constants"" below. - A `state_size` attribute. This can be a single integer (single state) in which case it is the size of the recurrent state. This can also be a list/tuple of integers (one size per state). The `state_size` can also be TensorShape or tuple/list of TensorShape, to represent high dimension state. - A `output_size` attribute. This can be a single integer or a TensorShape, which represent the shape of the output. For backward compatible reason, if this attribute is not available for the cell, the value will be inferred by the first element of the `state_size`. - A `get_initial_state(inputs=None, batch_size=None, dtype=None)` method that creates a tensor meant to be fed to `call()` as the initial state, if the user didn't specify any initial state via other means. The returned initial state should have a shape of [batch_size, cell.state_size]. The cell might choose to create a tensor full of zeros, or full of other values based on the cell's implementation. `inputs` is the input tensor to the RNN layer, which should contain the batch size as its shape[0], and also dtype. Note that the shape[0] might be `None` during the graph construction. Either the `inputs` or the pair of `batch_size` and `dtype` are provided. `batch_size` is a scalar tensor that represents the batch size of the inputs. `dtype` is `tf.DType` that represents the dtype of the inputs. For backward compatible reason, if this method is not implemented by the cell, the RNN layer will create a zero filled tensor with the size of [batch_size, cell.state_size]. In the case that `cell` is a list of RNN cell instances, the cells will be stacked on top of each other in the RNN, resulting in an efficient stacked RNN. return_sequences: Boolean (default `False`). Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean (default `False`). Whether to return the last state in addition to the output. go_backwards: Boolean (default `False`). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default `False`). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default `False`). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. zero_output_for_mask: Boolean (default `False`). Whether the output should use zeros for the masked timesteps. Note that this field is only used when `return_sequences` is True and mask is provided. It can useful if you want to reuse the raw output sequence of the RNN without interference from the masked timesteps, eg, merging bidirectional RNNs. Call arguments: inputs: Input tensor. mask: Binary tensor of shape `[batch_size, timesteps]` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is for use with cells that use dropout. initial_state: List of initial state tensors to be passed to the first call of the cell. constants: List of constant tensors to be passed to the cell at each timestep. Input shape: N-D tensor with shape `[batch_size, timesteps, ...]` or `[timesteps, batch_size, ...]` when time_major is True. Output shape: - If `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each with shape `[batch_size, state_size]`, where `state_size` could be a high dimension tensor shape. - If `return_sequences`: N-D tensor with shape `[batch_size, timesteps, output_size]`, where `output_size` could be a high dimension tensor shape, or `[timesteps, batch_size, output_size]` when `time_major` is True. - Else, N-D tensor with shape `[batch_size, output_size]`, where `output_size` could be a high dimension tensor shape. Masking: This layer supports masking for input data with a variable number of timesteps. To introduce masks to your data, use an [tf.keras.layers.Embedding] layer with the `mask_zero` parameter set to `True`. Note on using statefulness in RNNs: You can set RNN layers to be 'stateful', which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches. To enable statefulness: - Specify `stateful=True` in the layer constructor. - Specify a fixed batch size for your model, by passing If sequential model: `batch_input_shape=(...)` to the first layer in your model. Else for functional model with 1 or more Input layers: `batch_shape=(...)` to all the first layers in your model. This is the expected shape of your inputs *including the batch size*. It should be a tuple of integers, e.g. `(32, 10, 100)`. - Specify `shuffle=False` when calling `fit()`. To reset the states of your model, call `.reset_states()` on either a specific layer, or on your entire model. Note on specifying the initial state of RNNs: You can specify the initial state of RNN layers symbolically by calling them with the keyword argument `initial_state`. The value of `initial_state` should be a tensor or list of tensors representing the initial state of the RNN layer. You can specify the initial state of RNN layers numerically by calling `reset_states` with the keyword argument `states`. The value of `states` should be a numpy array or list of numpy arrays representing the initial state of the RNN layer. Note on passing external constants to RNNs: You can pass ""external"" constants to the cell using the `constants` keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This requires that the `cell.call` method accepts the same keyword argument `constants`. Such constants can be used to condition the cell transformation on additional static inputs (not changing over time), a.k.a. an attention mechanism. Examples: ```python # First, let's define a RNN Cell, as a layer subclass. class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = K.dot(inputs, self.kernel) output = h + K.dot(prev_output, self.recurrent_kernel) return output, [output] # Let's use this cell in a RNN layer: cell = MinimalRNNCell(32) x = keras.Input((None, 5)) layer = RNN(cell) y = layer(x) # Here's how to use the cell to build a stacked RNN: cells = [MinimalRNNCell(32), MinimalRNNCell(64)] x = keras.Input((None, 5)) layer = RNN(cells) y = layer(x) ```" 5966,AbstractRNNCell,tensorflow/tensorflow/python/keras/layers/recurrent.py,995,class,"Abstract object representing an RNN cell. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. This is the base class for implementing RNN cells with custom behavior. Every `RNNCell` must have the properties below and implement `call` with the signature `(output, next_state) = call(input, state)`. Examples: ```python class MinimalRNNCell(AbstractRNNCell): def __init__(self, units, **kwargs): self.units = units super(MinimalRNNCell, self).__init__(**kwargs) @property def state_size(self): return self.units def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = K.dot(inputs, self.kernel) output = h + K.dot(prev_output, self.recurrent_kernel) return output, output ``` This definition of cell differs from the definition used in the literature. In the literature, 'cell' refers to an object with a single scalar output. This definition refers to a horizontal array of such units. An RNN cell, in the most abstract setting, is anything that has a state and performs some operation that takes a matrix of inputs. This operation results in an output matrix with `self.output_size` columns. If `self.state_size` is an integer, this operation also results in a new state matrix with `self.state_size` columns. If `self.state_size` is a (possibly nested tuple of) TensorShape object(s), then it should return a matching structure of Tensors having shape `[batch_size].concatenate(s)` for each `s` in `self.batch_size`." 5967,DropoutRNNCellMixin,tensorflow/tensorflow/python/keras/layers/recurrent.py,1086,class,"Object that hold dropout related fields for RNN Cell. This class is not a standalone RNN cell. It suppose to be used with a RNN cell by multiple inheritance. Any cell that mix with class should have following fields: dropout: a float number within range [0, 1). The ratio that the input tensor need to dropout. recurrent_dropout: a float number within range [0, 1). The ratio that the recurrent state weights need to dropout. This object will create and cache created dropout masks, and reuse them for the incoming data, so that the same mask is used for every batch input." 5968,SimpleRNNCell,tensorflow/tensorflow/python/keras/layers/recurrent.py,1221,class,"Cell class for SimpleRNN. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. This class processes one step within the whole time sequence input, whereas `tf.keras.layer.SimpleRNN` processes the whole sequence. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. Call arguments: inputs: A 2D tensor, with shape of `[batch, feature]`. states: A 2D tensor with shape of `[batch, units]`, which is the state from the previous time step. For timestep 0, the initial state provided by user will be feed to cell. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. Examples: ```python inputs = np.random.random([32, 10, 8]).astype(np.float32) rnn = tf.keras.layers.RNN(tf.keras.layers.SimpleRNNCell(4)) output = rnn(inputs) # The output has shape `[32, 4]`. rnn = tf.keras.layers.RNN( tf.keras.layers.SimpleRNNCell(4), return_sequences=True, return_state=True) # whole_sequence_output has shape `[32, 10, 4]`. # final_state has shape `[32, 4]`. whole_sequence_output, final_state = rnn(inputs) ```" 5969,SimpleRNN,tensorflow/tensorflow/python/keras/layers/recurrent.py,1423,class,"Fully-connected RNN where the output is to be fed back to input. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass None, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation""). Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. Default: `False`. return_state: Boolean. Whether to return the last state in addition to the output. Default: `False` go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. Call arguments: inputs: A 3D tensor, with shape `[batch, timesteps, feature]`. mask: Binary tensor of shape `[batch, timesteps]` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used. initial_state: List of initial state tensors to be passed to the first call of the cell. Examples: ```python inputs = np.random.random([32, 10, 8]).astype(np.float32) simple_rnn = tf.keras.layers.SimpleRNN(4) output = simple_rnn(inputs) # The output has shape `[32, 4]`. simple_rnn = tf.keras.layers.SimpleRNN( 4, return_sequences=True, return_state=True) # whole_sequence_output has shape `[32, 10, 4]`. # final_state has shape `[32, 4]`. whole_sequence_output, final_state = simple_rnn(inputs) ```" 5970,GRUCell,tensorflow/tensorflow/python/keras/layers/recurrent.py,1676,class,"Cell class for the GRU layer. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass None, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: hard sigmoid (`hard_sigmoid`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = ""before"" (default), True = ""after"" (CuDNN compatible). Call arguments: inputs: A 2D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used." 5971,GRU,tensorflow/tensorflow/python/keras/layers/recurrent.py,1956,class,"Gated Recurrent Unit - Cho et al. 2014. There are two variants. The default one is based on 1406.1078v3 and has reset gate applied to hidden state before matrix multiplication. The other one is based on original 1406.1078v1 and has the order reversed. The second variant is compatible with CuDNNGRU (GPU-only) and allows inference on CPU. Thus it has separate biases for `kernel` and `recurrent_kernel`. Use `'reset_after'=True` and `recurrent_activation='sigmoid'`. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: hard sigmoid (`hard_sigmoid`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation"").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = ""before"" (default), True = ""after"" (CuDNN compatible). Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used. initial_state: List of initial state tensors to be passed to the first call of the cell." 5972,LSTMCell,tensorflow/tensorflow/python/keras/layers/recurrent.py,2240,class,"Cell class for the LSTM layer. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: hard sigmoid (`hard_sigmoid`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer=""zeros""`. This is recommended in [Jozefowicz et al., 2015]( http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Call arguments: inputs: A 2D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used." 5973,PeepholeLSTMCell,tensorflow/tensorflow/python/keras/layers/recurrent.py,2527,class,"Equivalent to LSTMCell class but adds peephole connections. Peephole connections allow the gates to utilize the previous internal state as well as the previous hidden state (which is what LSTMCell is limited to). This allows PeepholeLSTMCell to better learn precise timings over LSTMCell. From [Gers et al., 2002]( http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf): ""We find that LSTM augmented by 'peephole connections' from its internal cells to its multiplicative gates can learn the fine distinction between sequences of spikes spaced either 50 or 49 time steps apart without the help of any short training exemplars."" The peephole implementation is based on: [Sak et al., 2014](https://research.google.com/pubs/archive/43905.pdf) Example: ```python # Create 2 PeepholeLSTMCells peephole_lstm_cells = [PeepholeLSTMCell(size) for size in [128, 256]] # Create a layer composed sequentially of the peephole LSTM cells. layer = RNN(peephole_lstm_cells) input = keras.Input((timesteps, input_dim)) output = layer(input) ```" 5974,LSTM,tensorflow/tensorflow/python/keras/layers/recurrent.py,2645,class,"Long Short-Term Memory layer - Hochreiter 1997. Note that this cell is not optimized for performance on GPU. Please use `tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU. Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: hard sigmoid (`hard_sigmoid`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs.. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer=""zeros""`. This is recommended in [Jozefowicz et al., 2015]( http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation""). kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used. initial_state: List of initial state tensors to be passed to the first call of the cell." 5975,_generate_dropout_mask,tensorflow/tensorflow/python/keras/layers/recurrent.py,2925,function, 5976,_standardize_args,tensorflow/tensorflow/python/keras/layers/recurrent.py,2937,function,"Standardizes `__call__` to a single list of tensor inputs. When running a model loaded from a file, the input tensors `initial_state` and `constants` can be passed to `RNN.__call__()` as part of `inputs` instead of by the dedicated keyword arguments. This method makes sure the arguments are separated and that `initial_state` and `constants` are lists of tensors (or None). Arguments: inputs: Tensor or list/tuple of tensors. which may include constants and initial states. In that case `num_constant` must be specified. initial_state: Tensor or list of tensors or None, initial states. constants: Tensor or list of tensors or None, constant tensors. num_constants: Expected number of constants (if constants are passed as part of the `inputs` list. Returns: inputs: Single tensor or tuple of tensors. initial_state: List of tensors or None. constants: List of tensors or None." 5977,_is_multiple_state,tensorflow/tensorflow/python/keras/layers/recurrent.py,2998,function,Check whether the state_size contains multiple states. 5978,_generate_zero_filled_state_for_cell,tensorflow/tensorflow/python/keras/layers/recurrent.py,3004,function, 5979,_generate_zero_filled_state,tensorflow/tensorflow/python/keras/layers/recurrent.py,3011,function,"Generate a zero filled tensor with shape [batch_size, state_size]." 5980,_caching_device,tensorflow/tensorflow/python/keras/layers/recurrent.py,3029,function,"Returns the caching device for the RNN variable. This is useful for distributed training, when variable is not located as same device as the training worker. By enabling the device cache, this allows worker to read the variable once and cache locally, rather than read it every time step from remote when it is needed. Note that this is assuming the variable that cell needs for each time step is having the same value in the forward path, and only gets updated in the backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the cell body relies on any variable that gets updated every time step, then caching device will cause it to read the stall value. Args: rnn_cell: the rnn cell instance." 5981,_config_for_enable_caching_device,tensorflow/tensorflow/python/keras/layers/recurrent.py,3076,function,"Return the dict config for RNN cell wrt to enable_caching_device field. Since enable_caching_device is a internal implementation detail for speed up the RNN variable read when running on the multi remote worker setting, we don't want this config to be serialized constantly in the JSON. We will only serialize this field when a none default value is used to create the cell. Args: rnn_cell: the RNN cell for serialize. Returns: A dict which contains the JSON config for enable_caching_device value or empty dict if the enable_caching_device value is same as the default value." 5982,RNNTest,tensorflow/tensorflow/python/keras/layers/recurrent_test.py,62,class, 5983,RNNCellWithConstants,tensorflow/tensorflow/python/keras/layers/recurrent_test.py,1700,class, 5984,Minimal2DRNNCell,tensorflow/tensorflow/python/keras/layers/recurrent_test.py,1738,class,"The minimal 2D RNN cell is a simple combination of 2 1-D RNN cell. Both internal state and output have 2 dimensions and are orthogonal between each other." 5985,PlusOneRNNCell,tensorflow/tensorflow/python/keras/layers/recurrent_test.py,1776,class,"Add one to the input and state. This cell is used for testing state_size and output_size." 5986,NestedCell,tensorflow/tensorflow/python/keras/layers/recurrent_test.py,1792,class, 5987,GRUCell,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,69,class,"Cell class for the GRU layer. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. This class processes one step within the whole time sequence input, whereas `tf.keras.layer.GRU` processes the whole sequence. For example: >>> inputs = tf.random.normal([32, 10, 8]) >>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4)) >>> output = rnn(inputs) >>> print(output.shape) (32, 4) >>> rnn = tf.keras.layers.RNN( ... tf.keras.layers.GRUCell(4), ... return_sequences=True, ... return_state=True) >>> whole_sequence_output, final_state = rnn(inputs) >>> print(whole_sequence_output.shape) (32, 10, 4) >>> print(final_state.shape) (32, 4) Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass None, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 (default) will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Default: 2. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = ""before"", True = ""after"" (default and CuDNN compatible). Call arguments: inputs: A 2D tensor, with shape of `[batch, feature]`. states: A 2D tensor with shape of `[batch, units]`, which is the state from the previous time step. For timestep 0, the initial state provided by user will be feed to cell. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used." 5988,GRU,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,188,class,"Gated Recurrent Unit - Cho et al. 2014. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the CuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: 1. `activation` == `tanh` 2. `recurrent_activation` == `sigmoid` 3. `recurrent_dropout` == 0 4. `unroll` is `False` 5. `use_bias` is `True` 6. `reset_after` is `True` 7. Inputs, if use masking, are strictly right-padded. 8. Eager execution is enabled in the outermost context. There are two variants of the GRU implementation. The default one is based on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to hidden state before matrix multiplication. The other one is based on [original](https://arxiv.org/abs/1406.1078v1) and has the order reversed. The second variant is compatible with CuDNNGRU (GPU-only) and allows inference on CPU. Thus it has separate biases for `kernel` and `recurrent_kernel`. To use this variant, set `'reset_after'=True` and `recurrent_activation='sigmoid'`. For example: >>> inputs = tf.random.normal([32, 10, 8]) >>> gru = tf.keras.layers.GRU(4) >>> output = gru(inputs) >>> print(output.shape) (32, 4) >>> gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True) >>> whole_sequence_output, final_state = gru(inputs) >>> print(whole_sequence_output.shape) (32, 10, 4) >>> print(final_state.shape) (32, 4) Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation""). Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Default: 2. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. Default: `False`. return_state: Boolean. Whether to return the last state in addition to the output. Default: `False`. go_backwards: Boolean (default `False`). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. unroll: Boolean (default False). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `[timesteps, batch, feature]`, whereas in the False case, it will be `[batch, timesteps, feature]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. reset_after: GRU convention (whether to apply reset gate after or before matrix multiplication). False = ""before"", True = ""after"" (default and CuDNN compatible). Call arguments: inputs: A 3D tensor, with shape `[batch, timesteps, feature]`. mask: Binary tensor of shape `[samples, timesteps]` indicating whether a given timestep should be masked (optional, defaults to `None`). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used (optional, defaults to `None`). initial_state: List of initial state tensors to be passed to the first call of the cell (optional, defaults to `None` which causes creation of zero-filled initial state tensors)." 5989,standard_gru,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,508,function,"GRU with standard kernel implementation. This implementation can be run on all types of hardware. This implementation lifts out all the layer weights and make them function parameters. It has same number of tensor input params as the CuDNN counterpart. The RNN step logic has been simplified, eg dropout and mask is removed since CuDNN implementation does not support that. Arguments: inputs: Input tensor of GRU layer. init_h: Initial state tensor for the cell output. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. The bias contains the combined input_bias and recurrent_bias. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. zero_output_for_mask: Boolean, whether to output zero for masked timestep. Returns: last_output: output tensor for the last timestep, which has shape [batch, units]. outputs: output tensor for all timesteps, which has shape [batch, time, units]. state_0: the cell output, which has same shape as init_h. runtime: constant string tensor which indicate real runtime hardware. This value is for testing purpose and should be used by user." 5990,gpu_gru,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,590,function,GRU with CuDNN implementation which is only available for GPU. 5991,gru_with_backend_selection,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,676,function,"Call the GRU with optimized backend kernel selection. Under the hood, this function will create two TF function, one with the most generic kernel and can run on all device condition, and the second one with CuDNN specific kernel, which can only run on GPU. The first function will be called with normal_lstm_params, while the second function is not called, but only registered in the graph. The Grappler will do the proper graph rewrite and swap the optimized TF function based on the device placement. Args: inputs: Input tensor of GRU layer. init_h: Initial state tensor for the cell output. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. zero_output_for_mask: Boolean, whether to output zero for masked timestep. Returns: List of output tensors, same as standard_gru." 5992,LSTMCell,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,791,class,"Cell class for the LSTM layer. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. This class processes one step within the whole time sequence input, whereas `tf.keras.layer.LSTM` processes the whole sequence. For example: >>> inputs = tf.random.normal([32, 10, 8]) >>> rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4)) >>> output = rnn(inputs) >>> print(output.shape) (32, 4) >>> rnn = tf.keras.layers.RNN( ... tf.keras.layers.LSTMCell(4), ... return_sequences=True, ... return_state=True) >>> whole_seq_output, final_memory_state, final_carry_state = rnn(inputs) >>> print(whole_seq_output.shape) (32, 10, 4) >>> print(final_memory_state.shape) (32, 4) >>> print(final_carry_state.shape) (32, 4) Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean, (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer=""zeros""`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 (default) will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Default: 2. Call arguments: inputs: A 2D tensor, with shape of `[batch, feature]`. states: List of 2 tensors that corresponding to the cell's units. Both of them have shape `[batch, units]`, the first tensor is the memory state from previous time step, the second tensor is the carry state from previous time step. For timestep 0, the initial state provided by user will be feed to cell. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used." 5993,LSTM,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,913,class,"Long Short-Term Memory layer - Hochreiter 1997. See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn) for details about the usage of RNN API. Based on available runtime hardware and constraints, this layer will choose different implementations (cuDNN-based or pure-TensorFlow) to maximize the performance. If a GPU is available and all the arguments to the layer meet the requirement of the CuDNN kernel (see below for details), the layer will use a fast cuDNN implementation. The requirements to use the cuDNN implementation are: 1. `activation` == `tanh` 2. `recurrent_activation` == `sigmoid` 3. `recurrent_dropout` == 0 4. `unroll` is `False` 5. `use_bias` is `True` 6. Inputs, if use masking, are strictly right-padded. 7. Eager execution is enabled in the outermost context. For example: >>> inputs = tf.random.normal([32, 10, 8]) >>> lstm = tf.keras.layers.LSTM(4) >>> output = lstm(inputs) >>> print(output.shape) (32, 4) >>> lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True) >>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs) >>> print(whole_seq_output.shape) (32, 10, 4) >>> print(final_memory_state.shape) (32, 4) >>> print(final_carry_state.shape) (32, 4) Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. Default: sigmoid (`sigmoid`). If you pass `None`, no activation is applied (ie. ""linear"" activation: `a(x) = x`). use_bias: Boolean (default `True`), whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. Default: `glorot_uniform`. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. Default: `orthogonal`. bias_initializer: Initializer for the bias vector. Default: `zeros`. unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of the forget gate at initialization. Setting it to true will also force `bias_initializer=""zeros""`. This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf). kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. Default: `None`. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_regularizer: Regularizer function applied to the bias vector. Default: `None`. activity_regularizer: Regularizer function applied to the output of the layer (its ""activation""). Default: `None`. kernel_constraint: Constraint function applied to the `kernel` weights matrix. Default: `None`. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. Default: `None`. bias_constraint: Constraint function applied to the bias vector. Default: `None`. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. Default: 0. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Default: 0. implementation: Implementation mode, either 1 or 2. Mode 1 will structure its operations as a larger number of smaller dot products and additions, whereas mode 2 will batch them into fewer, larger operations. These modes will have different performance profiles on different hardware and for different applications. Default: 2. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. Default: `False`. return_state: Boolean. Whether to return the last state in addition to the output. Default: `False`. go_backwards: Boolean (default `False`). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default `False`). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. time_major: The shape format of the `inputs` and `outputs` tensors. If True, the inputs and outputs will be in shape `[timesteps, batch, feature]`, whereas in the False case, it will be `[batch, timesteps, feature]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. unroll: Boolean (default `False`). If True, the network will be unrolled, else a symbolic loop will be used. Unrolling can speed-up a RNN, although it tends to be more memory-intensive. Unrolling is only suitable for short sequences. Call arguments: inputs: A 3D tensor with shape `[batch, timesteps, feature]`. mask: Binary tensor of shape `[batch, timesteps]` indicating whether a given timestep should be masked (optional, defaults to `None`). training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` is used (optional, defaults to `None`). initial_state: List of initial state tensors to be passed to the first call of the cell (optional, defaults to `None` which causes creation of zero-filled initial state tensors)." 5994,_canonical_to_params,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1208,function,"Utility function convert variable to CuDNN compatible parameter. Note that Keras weights for kernels are different from the CuDNN format. Eg.: ``` Keras CuDNN [[0, 1, 2], <---> [[0, 2, 4], [3, 4, 5]] [1, 3, 5]] ``` If the input weights need to be in a unified format, then set `transpose_weights=True` to convert the weights. Args: weights: list of weights for the individual kernels and recurrent kernels. biases: list of biases for individual gate. shape: the shape for the converted variables that will be feed to CuDNN. transpose_weights: boolean, whether to transpose the weights. Returns: The converted weights that can be feed to CuDNN ops as param." 5995,standard_lstm,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1239,function,"LSTM with standard kernel implementation. This implementation can be run on all types for hardware. This implementation lifts out all the layer weights and make them function parameters. It has same number of tensor input params as the CuDNN counterpart. The RNN step logic has been simplified, eg dropout and mask is removed since CuDNN implementation does not support that. Note that the first half of the bias tensor should be ignored by this impl. The CuDNN impl need an extra set of input gate bias. In order to make the both function take same shape of parameter, that extra set of bias is also feed here. Args: inputs: input tensor of LSTM layer. init_h: initial state tensor for the cell output. init_c: initial state tensor for the cell hidden state. kernel: weights for cell kernel. recurrent_kernel: weights for cell recurrent kernel. bias: weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. zero_output_for_mask: Boolean, whether to output zero for masked timestep. Returns: last_output: output tensor for the last timestep, which has shape [batch, units]. outputs: output tensor for all timesteps, which has shape [batch, time, units]. state_0: the cell output, which has same shape as init_h. state_1: the cell hidden state, which has same shape as init_c. runtime: constant string tensor which indicate real runtime hardware. This value is for testing purpose and should be used by user." 5996,gpu_lstm,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1321,function,"LSTM with either CuDNN or ROCm implementation which is only available for GPU. Note that currently only right padded data is supported, or the result will be polluted by the unmasked data which should be filtered. Args: inputs: Input tensor of LSTM layer. init_h: Initial state tensor for the cell output. init_c: Initial state tensor for the cell hidden state. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. Returns: last_output: Output tensor for the last timestep, which has shape [batch, units]. outputs: Output tensor for all timesteps, which has shape [batch, time, units]. state_0: The cell output, which has same shape as init_h. state_1: The cell hidden state, which has same shape as init_c. runtime: Constant string tensor which indicate real runtime hardware. This value is for testing purpose and should not be used by user." 5997,lstm_with_backend_selection,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1443,function,"Call the LSTM with optimized backend kernel selection. Under the hood, this function will create two TF function, one with the most generic kernel and can run on all device condition, and the second one with CuDNN specific kernel, which can only run on GPU. The first function will be called with normal_lstm_params, while the second function is not called, but only registered in the graph. The Grappler will do the proper graph rewrite and swap the optimized TF function based on the device placement. Args: inputs: Input tensor of LSTM layer. init_h: Initial state tensor for the cell output. init_c: Initial state tensor for the cell hidden state. kernel: Weights for cell kernel. recurrent_kernel: Weights for cell recurrent kernel. bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias is used in this case. mask: Boolean tensor for mask out the steps within sequence. time_major: Boolean, whether the inputs are in the format of [time, batch, feature] or [batch, time, feature]. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. sequence_lengths: The lengths of all sequences coming from a variable length input, such as ragged tensors. If the input has a fixed timestep size, this should be None. zero_output_for_mask: Boolean, whether to output zero for masked timestep. Returns: List of output tensors, same as standard_lstm." 5998,is_sequence_right_padded,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1565,function,"Check the mask tensor and see if it right padded. For CuDNN kernel, it uses the sequence length param to skip the tailing timestep. If the data is left padded, or not a strict right padding (has masked value in the middle of the sequence), then CuDNN kernel won't be work properly in those cases. Left padded data: [[False, False, True, True, True]]. Right padded data: [[True, True, True, False, False]]. Mixture of mask/unmasked data: [[True, False, True, False, False]]. Note that for the mixed data example above, the actually data RNN should see are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not pollute the internal states. Args: mask: the Boolean tensor with shape [batch, timestep] Returns: boolean scalar tensor, whether the mask is strictly right padded." 5999,has_fully_masked_sequence,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1594,function, 6000,is_cudnn_supported_inputs,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1607,function, 6001,calculate_sequence_by_mask,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1616,function,"Calculate the sequence length tensor (1-D) based on the masking tensor. The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For any timestep that should be masked, the corresponding field will be False. Consider the following example: a = [[True, True, False, False], [True, True, True, False]] It is a (2, 4) tensor, and the corresponding sequence length result should be 1D tensor with value [2, 3]. Note that the masking tensor must be right padded that could be checked by, e.g., `is_sequence_right_padded()`. Args: mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if time_major=True. time_major: Boolean, which indicates whether the mask is time major or batch major. Returns: sequence_length: 1D int32 tensor." 6002,_generate_defun_backend,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1641,function, 6003,_get_context_device_type,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1653,function,"Parse the current context and return the device type, eg CPU/GPU." 6004,_runtime,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1661,function, 6005,_read_variable_value,tensorflow/tensorflow/python/keras/layers/recurrent_v2.py,1667,function,Read the value of a variable if it is variable. 6006,RNNV2Test,tensorflow/tensorflow/python/keras/layers/recurrent_v2_test.py,40,class, 6007,_RNNCellWrapperV2,tensorflow/tensorflow/python/keras/layers/rnn_cell_wrapper_v2.py,34,class,"Base class for cells wrappers V2 compatibility. This class along with `rnn_cell_impl._RNNCellWrapperV1` allows to define wrappers that are compatible with V1 and V2, and defines helper methods for this purpose." 6008,DropoutWrapper,tensorflow/tensorflow/python/keras/layers/rnn_cell_wrapper_v2.py,98,class,Operator adding dropout to inputs and outputs of the given cell. 6009,ResidualWrapper,tensorflow/tensorflow/python/keras/layers/rnn_cell_wrapper_v2.py,113,class,RNNCell wrapper that ensures cell inputs are added to the outputs. 6010,DeviceWrapper,tensorflow/tensorflow/python/keras/layers/rnn_cell_wrapper_v2.py,124,class,Operator that ensures an RNNCell runs on a particular device. 6011,RNNCellWrapperTest,tensorflow/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py,39,class, 6012,SeparableConv1DTest,tensorflow/tensorflow/python/keras/layers/separable_convolutional_test.py,31,class, 6013,SeparableConv2DTest,tensorflow/tensorflow/python/keras/layers/separable_convolutional_test.py,99,class, 6014,populate_deserializable_objects,tensorflow/tensorflow/python/keras/layers/serialization.py,83,function,"Populates dict ALL_OBJECTS with every built-in layer. " 6015,serialize,tensorflow/tensorflow/python/keras/layers/serialization.py,154,function, 6016,deserialize,tensorflow/tensorflow/python/keras/layers/serialization.py,159,function,"Instantiates a layer from a config dictionary. Arguments: config: dict of the form {'class_name': str, 'config': dict} custom_objects: dict mapping class names (or function names) of custom (non-Keras) objects to class/functions Returns: Layer instance (may be Model, Sequential, Network, Layer...)" 6017,SerializableInt,tensorflow/tensorflow/python/keras/layers/serialization_test.py,33,class, 6018,LayerSerializationTest,tensorflow/tensorflow/python/keras/layers/serialization_test.py,47,class, 6019,SimpleRNNLayerTest,tensorflow/tensorflow/python/keras/layers/simplernn_test.py,37,class, 6020,SubclassedLayersTest,tensorflow/tensorflow/python/keras/layers/subclassed_layers_test.py,33,class, 6021,_single_op_at_end,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,43,function, 6022,_single_identity_op_at_end,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,50,function, 6023,_multiple_ops_at_end,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,57,function, 6024,_single_op_in_middle,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,65,function, 6025,_multiple_ops_in_middle,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,73,function, 6026,_shape_op_inference,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,82,function, 6027,_shape_op_known_batch_size,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,91,function, 6028,_shape_op_slice_and_range,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,106,function, 6029,_shape_op_slice_and_range_known_dim,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,117,function, 6030,_single_standalone_branch,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,134,function, 6031,_single_op_with_attrs,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,141,function, 6032,_multiple_uses,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,148,function, 6033,_op_with_tensor_list,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,157,function, 6034,_add_n,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,164,function, 6035,_reuse_op,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,170,function, 6036,_float64_op,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,181,function, 6037,MyAdd,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,190,class, 6038,_layer_with_tensor_arg,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,196,function, 6039,LayerWithLayer,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,203,class, 6040,_inner_layer,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,215,function, 6041,_reuse_ancillary_layer,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,221,function, 6042,AutoLambdaTest,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,242,class, 6043,InputInEagerTest,tensorflow/tensorflow/python/keras/layers/tensorflow_op_layer_test.py,681,class,"Tests ops on keras inputs in Eager runtime. Input returns graph/symbolic tensors in the Eager runtime (this happens, for example, with tensors returned from Keras layers). These should be routed to the graph-style branch of these ops (b/134715641)" 6044,Wrapper,tensorflow/tensorflow/python/keras/layers/wrappers.py,40,class,"Abstract wrapper base class. Wrappers take another layer and augment it in various ways. Do not use this class as a layer, it is only an abstract base class. Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. Arguments: layer: The layer to be wrapped." 6045,TimeDistributed,tensorflow/tensorflow/python/keras/layers/wrappers.py,85,class,"This wrapper allows to apply a layer to every temporal slice of an input. The input should be at least 3D, and the dimension of index one will be considered to be the temporal dimension. Consider a batch of 32 video samples, where each sample is a 128x128 RGB image with `channels_last` data format, across 10 timesteps. The batch input shape is `(32, 10, 128, 128, 3)`. You can then use `TimeDistributed` to apply a `Conv2D` layer to each of the 10 timesteps, independently: >>> inputs = tf.keras.Input(shape=(10, 128, 128, 3)) >>> conv_2d_layer = tf.keras.layers.Conv2D(64, (3, 3)) >>> outputs = tf.keras.layers.TimeDistributed(conv_2d_layer)(inputs) >>> outputs.shape TensorShape([None, 10, 126, 126, 64]) Arguments: layer: a `tf.keras.layers.Layer` instance. Call arguments: inputs: Input tensor. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the wrapped layer (only if the layer supports this argument). mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. This argument is passed to the wrapped layer (only if the layer supports this argument). Raises: ValueError: If not initialized with a `tf.keras.layers.Layer` instance." 6046,Bidirectional,tensorflow/tensorflow/python/keras/layers/wrappers.py,325,class,"Bidirectional wrapper for RNNs. Arguments: layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or `keras.layers.GRU`. It could also be a `keras.layers.Layer` instance that meets the following criteria: 1. Be a sequence-processing layer (accepts 3D+ inputs). 2. Have a `go_backwards`, `return_sequences` and `return_state` attribute (with the same semantics as for the `RNN` class). 3. Have an `input_spec` attribute. 4. Implement serialization via `get_config()` and `from_config()`. Note that the recommended way to create new RNN layers is to write a custom RNN cell and use it with `keras.layers.RNN`, instead of subclassing `keras.layers.Layer` directly. merge_mode: Mode by which outputs of the forward and backward RNNs will be combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the outputs will not be combined, they will be returned as a list. Default value is 'concat'. backward_layer: Optional `keras.layers.RNN`, or `keras.layers.Layer` instance to be used to handle backwards input processing. If `backward_layer` is not provided, the layer instance passed as the `layer` argument will be used to generate the backward layer automatically. Note that the provided `backward_layer` layer should have properties matching those of the `layer` argument, in particular it should have the same values for `stateful`, `return_states`, `return_sequence`, etc. In addition, `backward_layer` and `layer` should have different `go_backwards` argument values. A `ValueError` will be raised if these requirements are not met. Call arguments: The call arguments for this layer are the same as those of the wrapped RNN layer. Beware that when passing the `initial_state` argument during the call of this layer, the first half in the list of elements in the `initial_state` list will be passed to the forward RNN call and the last half in the list of elements will be passed to the backward RNN call. Raises: ValueError: 1. If `layer` or `backward_layer` is not a `Layer` instance. 2. In case of invalid `merge_mode` argument. 3. If `backward_layer` has mismatched properties compared to `layer`. Examples: ```python model = Sequential() model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10))) model.add(Bidirectional(LSTM(10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # With custom backward layer model = Sequential() forward_layer = LSTM(10, return_sequences=True) backward_layer = LSTM(10, activation='relu', return_sequences=True, go_backwards=True) model.add(Bidirectional(forward_layer, backward_layer=backward_layer, input_shape=(5, 10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') ```" 6047,_RNNCellWithConstants,tensorflow/tensorflow/python/keras/layers/wrappers_test.py,50,class, 6048,_ResidualLSTMCell,tensorflow/tensorflow/python/keras/layers/wrappers_test.py,88,class, 6049,_AddOneCell,tensorflow/tensorflow/python/keras/layers/wrappers_test.py,95,class,Increments inputs and state by one on each call. 6050,TimeDistributedTest,tensorflow/tensorflow/python/keras/layers/wrappers_test.py,113,class, 6051,BidirectionalTest,tensorflow/tensorflow/python/keras/layers/wrappers_test.py,471,class, 6052,ExampleWrapper,tensorflow/tensorflow/python/keras/layers/wrappers_test.py,1252,class,Simple Wrapper subclass. 6053,WrapperTest,tensorflow/tensorflow/python/keras/layers/wrappers_test.py,1259,class, 6054,_to_list,tensorflow/tensorflow/python/keras/layers/wrappers_test.py,1273,function, 6055,_hasattr,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,64,function, 6056,assert_like_rnncell,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,73,function,"Raises a TypeError if cell is not like an RNNCell. NOTE: Do not rely on the error message (in particular in tests) which can be subject to change to increase readability. Use ASSERT_LIKE_RNNCELL_ERROR_REGEXP. Args: cell_name: A string to give a meaningful error referencing to the name of the functionargument. cell: The object which should behave like an RNNCell. Raises: TypeError: A human-friendly exception." 6057,_concat,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,107,function,"Concat that enables int, Tensor, or TensorShape values. This function takes a size specification, which can be an integer, a TensorShape, or a Tensor, and converts it into a concatenated Tensor (if static = False) or a list of integers (if static = True). Args: prefix: The prefix; usually the batch size (and/or time step size). (TensorShape, int, or Tensor.) suffix: TensorShape, int, or Tensor. static: If `True`, return a python list with possibly unknown dimensions. Otherwise return a `Tensor`. Returns: shape: the concatenation of prefix and suffix. Raises: ValueError: if `suffix` is not a scalar or vector (or TensorShape). ValueError: if prefix or suffix was `None` and asked for dynamic Tensors out." 6058,_zero_state_tensors,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,169,function,"Create tensors of zeros based on state_size, batch_size, and dtype." 6059,RNNCell,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,185,class,"Abstract object representing an RNN cell. Every `RNNCell` must have the properties below and implement `call` with the signature `(output, next_state) = call(input, state)`. The optional third input argument, `scope`, is allowed for backwards compatibility purposes; but should be left off for new subclasses. This definition of cell differs from the definition used in the literature. In the literature, 'cell' refers to an object with a single scalar output. This definition refers to a horizontal array of such units. An RNN cell, in the most abstract setting, is anything that has a state and performs some operation that takes a matrix of inputs. This operation results in an output matrix with `self.output_size` columns. If `self.state_size` is an integer, this operation also results in a new state matrix with `self.state_size` columns. If `self.state_size` is a (possibly nested tuple of) TensorShape object(s), then it should return a matching structure of Tensors having shape `[batch_size].concatenate(s)` for each `s` in `self.batch_size`." 6060,LayerRNNCell,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,349,class,"Subclass of RNNCells that act like proper `tf.Layer` objects. For backwards compatibility purposes, most `RNNCell` instances allow their `call` methods to instantiate variables via `tf.compat.v1.get_variable`. The underlying variable scope thus keeps track of any variables, and returning cached versions. This is atypical of `tf.layer` objects, which separate this part of layer building into a `build` method that is only called once. Here we provide a subclass for `RNNCell` objects that act exactly as `Layer` objects do. They must provide a `build` method and their `call` methods do not access Variables `tf.compat.v1.get_variable`." 6061,BasicRNNCell,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,392,class,"The most basic RNN cell. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU. Args: num_units: int, The number of units in the RNN cell. activation: Nonlinearity to use. Default: `tanh`. It could also be string that is within Keras activation function names. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. **kwargs: Dict, keyword named properties for common layer attributes, like `trainable` etc when constructing the cell from configs of get_config()." 6062,GRUCell,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,486,class,"Gated Recurrent Unit cell. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnGRU` for better performance on GPU, or `tf.contrib.rnn.GRUBlockCellV2` for better performance on CPU. Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight and projection matrices. bias_initializer: (optional) The initializer to use for the bias. name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. dtype: Default dtype of the layer (default of `None` means use the type of the first input). Required when `build` is called before `call`. **kwargs: Dict, keyword named properties for common layer attributes, like `trainable` etc when constructing the cell from configs of get_config(). References: Learning Phrase Representations using RNN Encoder Decoder for Statistical Machine Translation: [Cho et al., 2014] (https://aclanthology.coli.uni-saarland.de/papers/D14-1179/d14-1179) ([pdf](http://emnlp2014.org/papers/pdf/EMNLP2014179.pdf))" 6063,LSTMStateTuple,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,623,class,"Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state. Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state and `h` is the output. Only used when `state_is_tuple=True`." 6064,BasicLSTMCell,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,643,class,"DEPRECATED: Please use `tf.compat.v1.nn.rnn_cell.LSTMCell` instead. Basic LSTM recurrent network cell. The implementation is based on We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. It does not allow cell clipping, a projection layer, and does not use peep-hole connections: it is the basic baseline. For advanced models, please use the full `tf.compat.v1.nn.rnn_cell.LSTMCell` that follows. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU." 6065,LSTMCell,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,811,class,"Long short-term memory unit (LSTM) recurrent network cell. The default non-peephole implementation is based on (Gers et al., 1999). The peephole implementation is based on (Sak et al., 2014). The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. Note that this cell is not optimized for performance. Please use `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for better performance on CPU. References: Long short-term memory recurrent neural network architectures for large scale acoustic modeling: [Sak et al., 2014] (https://www.isca-speech.org/archive/interspeech_2014/i14_0338.html) ([pdf] (https://www.isca-speech.org/archive/archive_papers/interspeech_2014/i14_0338.pdf)) Learning to forget: [Gers et al., 1999] (http://digital-library.theiet.org/content/conferences/10.1049/cp_19991218) ([pdf](https://arxiv.org/pdf/1409.2329.pdf)) Long Short-Term Memory: [Hochreiter et al., 1997] (https://www.mitpressjournals.org/doi/abs/10.1162/neco.1997.9.8.1735) ([pdf](http://ml.jku.at/publications/older/3504.pdf))" 6066,_RNNCellWrapperV1,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,1094,class,"Base class for cells wrappers V1 compatibility. This class along with `_RNNCellWrapperV2` allows to define cells wrappers that are compatible with V1 and V2, and defines helper methods for this purpose." 6067,DropoutWrapper,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,1176,class,Operator adding dropout to inputs and outputs of the given cell. 6068,ResidualWrapper,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,1187,class,RNNCell wrapper that ensures cell inputs are added to the outputs. 6069,DeviceWrapper,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,1198,class, 6070,MultiRNNCell,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,1208,class,"RNN cell composed sequentially of multiple simple cells. Example: ```python num_units = [128, 64] cells = [BasicLSTMCell(num_units=n) for n in num_units] stacked_rnn_cell = MultiRNNCell(cells) ```" 6071,_check_rnn_cell_input_dtypes,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,1332,function,"Check whether the input tensors are with supported dtypes. Default RNN cells only support floats and complex as its dtypes since the activation function (tanh and sigmoid) only allow those types. This function will throw a proper error message if the inputs is not in a supported type. Args: inputs: tensor or nested structure of tensors that are feed to RNN cell as input or state. Raises: ValueError: if any of the input tensor are not having dtypes of float or complex." 6072,_check_supported_dtypes,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py,1351,function, 6073,DropoutWrapperBase,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py,38,class,Operator adding dropout to inputs and outputs of the given cell. 6074,ResidualWrapperBase,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py,323,class,RNNCell wrapper that ensures cell inputs are added to the outputs. 6075,DeviceWrapperBase,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py,411,class,Operator that ensures an RNNCell runs on a particular device. 6076,_serialize_function_to_config,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py,451,function,Serialize the function for get_config(). 6077,_parse_config_to_function,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py,468,function,Reconstruct the function from the config. 6078,_default_dropout_state_filter_visitor,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py,497,function, 6079,_enumerated_map_structure_up_to,tensorflow/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py,507,function, 6080,dense,tensorflow/tensorflow/python/keras/layers/ops/core.py,30,function,"Densely connected NN layer op. Arguments: inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation. kernel: `tf.Variable`. Matrix kernel. bias: (Optional) `tf.Variable`. Bias to add to outputs. activation: (Optional) 1-argument callable. Activation function to apply to outputs. dtype: (Optional) `tf.DType`. Dtype to cast `inputs` to. Returns: `tf.Tensor`. Output of dense connection." 6081,CategoryCrossing,tensorflow/tensorflow/python/keras/layers/preprocessing/category_crossing.py,39,class,"Category crossing layer. This layer concatenates multiple categorical inputs into a single categorical output (similar to Cartesian product). The output dtype is string. Usage: >>> inp_1 = ['a', 'b', 'c'] >>> inp_2 = ['d', 'e', 'f'] >>> layer = tf.keras.layers.experimental.preprocessing.CategoryCrossing() >>> layer([inp_1, inp_2]) >>> inp_1 = ['a', 'b', 'c'] >>> inp_2 = ['d', 'e', 'f'] >>> layer = tf.keras.layers.experimental.preprocessing.CategoryCrossing( ... separator='-') >>> layer([inp_1, inp_2]) Arguments: depth: depth of input crossing. By default None, all inputs are crossed into one output. It can also be an int or tuple/list of ints. Passing an integer will create combinations of crossed outputs with depth up to that integer, i.e., [1, 2, ..., `depth`), and passing a tuple of integers will create crossed outputs with depth for the specified values in the tuple, i.e., `depth`=(N1, N2) will create all possible crossed outputs with depth equal to N1 or N2. Passing `None` means a single crossed output with all inputs. For example, with inputs `a`, `b` and `c`, `depth=2` means the output will be [a;b;c;cross(a, b);cross(bc);cross(ca)]. separator: A string added between each input being joined. Defaults to '_X_'. name: Name to give to the layer. **kwargs: Keyword arguments to construct a layer. Input shape: a list of string or int tensors or sparse tensors of shape `[batch_size, d1, ..., dm]` Output shape: a single string or int tensor or sparse tensor of shape `[batch_size, d1, ..., dm]` Returns: If any input is `RaggedTensor`, the output is `RaggedTensor`. Else, if any input is `SparseTensor`, the output is `SparseTensor`. Otherwise, the output is `Tensor`. Example: (`depth`=None) If the layer receives three inputs: `a=[[1], [4]]`, `b=[[2], [5]]`, `c=[[3], [6]]` the output will be a string tensor: `[[b'1_X_2_X_3'], [b'4_X_5_X_6']]` Example: (`depth` is an integer) With the same input above, and if `depth`=2, the output will be a list of 6 string tensors: `[[b'1'], [b'4']]` `[[b'2'], [b'5']]` `[[b'3'], [b'6']]` `[[b'1_X_2'], [b'4_X_5']]`, `[[b'2_X_3'], [b'5_X_6']]`, `[[b'3_X_1'], [b'6_X_4']]` Example: (`depth` is a tuple/list of integers) With the same input above, and if `depth`=(2, 3) the output will be a list of 4 string tensors: `[[b'1_X_2'], [b'4_X_5']]`, `[[b'2_X_3'], [b'5_X_6']]`, `[[b'3_X_1'], [b'6_X_4']]`, `[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`" 6082,batch_wrapper,tensorflow/tensorflow/python/keras/layers/preprocessing/category_crossing_distribution_test.py,36,function, 6083,CategoryCrossingDistributionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/category_crossing_distribution_test.py,53,class, 6084,CategoryCrossingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/category_crossing_test.py,41,class, 6085,CategoryEncoding,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding.py,56,class,"Category encoding layer. This layer provides options for condensing data into a categorical encoding. It accepts integer values as inputs and outputs a dense representation (one sample = 1-index tensor of float values representing data about the sample's tokens) of those inputs. Examples: >>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding( ... max_tokens=4, output_mode=""count"") >>> layer([[0, 1], [0, 0], [1, 2], [3, 1]]) Examples with weighted inputs: >>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding( ... max_tokens=4, output_mode=""count"") >>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]]) >>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights) Attributes: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. output_mode: Specification for the output of the layer. Defaults to ""binary"". Values can be ""binary"", ""count"" or ""tf-idf"", configuring the layer as follows: ""binary"": Outputs a single int array per batch, of either vocab_size or max_tokens size, containing 1s in all elements where the token mapped to that index exists at least once in the batch item. ""count"": As ""binary"", but the int array contains a count of the number of times the token at that index appeared in the batch item. ""tf-idf"": As ""binary"", but the TF-IDF algorithm is applied to find the value in each token slot. sparse: Boolean. If true, returns a `SparseTensor` instead of a dense `Tensor`. Defaults to `False`. Call arguments: inputs: A 2D tensor `(samples, timesteps)`. count_weights: A 2D tensor in the same shape as `inputs` indicating the weight for each sample value when summing up in `count` mode. Not used in `binary` or `tfidf` mode." 6086,_CategoryEncodingAccumulator,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding.py,332,class, 6087,_CategoryEncodingCombiner,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding.py,337,class,"Combiner for the CategoryEncoding preprocessing layer. This class encapsulates the logic for computing the number of elements in the input dataset and the document frequency for each element. Attributes: compute_max_element: (Optional) If set, this combiner will return the maximum element in this set as part of its `extract()` call. compute_idf: (Optional) If set, the inverse document frequency will be computed for each value." 6088,batch_wrapper,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_distribution_test.py,36,function, 6089,CategoryEncodingDistributionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_distribution_test.py,53,class, 6090,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py,43,function, 6091,CategoryEncodingInputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py,51,class, 6092,CategoryEncodingAdaptTest,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py,272,class, 6093,CategoryEncodingOutputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py,434,class, 6094,CategoryEncodingModelBuildingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py,568,class, 6095,CategoryEncodingCombinerTest,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py,621,class, 6096,CategoryEncoding,tensorflow/tensorflow/python/keras/layers/preprocessing/category_encoding_v1.py,27,class,"CategoryEncoding layer. This layer provides options for condensing input data into denser representations. It accepts either integer values or strings as inputs, allows users to map those inputs into a contiguous integer space, and outputs either those integer values (one sample = 1D tensor of integer token indices) or a dense representation (one sample = 1D tensor of float values representing data about the sample's tokens). If desired, the user can call this layer's adapt() method on a dataset. When this layer is adapted, it will analyze the dataset, determine the frequency of individual integer or string values, and create a 'vocabulary' from them. This vocabulary can have unlimited size or be capped, depending on the configuration options for this layer; if there are more unique values in the input than the maximum vocabulary size, the most frequent terms will be used to create the vocabulary. Attributes: max_elements: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. output_mode: Optional specification for the output of the layer. Values can be ""int"", ""binary"", ""count"" or ""tf-idf"", configuring the layer as follows: ""int"": Outputs integer indices, one integer index per split string token. ""binary"": Outputs a single int array per batch, of either vocab_size or max_elements size, containing 1s in all elements where the token mapped to that index exists at least once in the batch item. ""count"": As ""binary"", but the int array contains a count of the number of times the token at that index appeared in the batch item. ""tf-idf"": As ""binary"", but the TF-IDF algorithm is applied to find the value in each token slot. output_sequence_length: Only valid in INT mode. If set, the output will have its time dimension padded or truncated to exactly `output_sequence_length` values, resulting in a tensor of shape [batch_size, output_sequence_length] regardless of the input shape. pad_to_max_elements: Only valid in ""binary"", ""count"", and ""tf-idf"" modes. If True, the output will have its feature axis padded to `max_elements` even if the number of unique values in the vocabulary is less than max_elements, resulting in a tensor of shape [batch_size, max_elements] regardless of vocabulary size. Defaults to False." 6097,Discretization,tensorflow/tensorflow/python/keras/layers/preprocessing/discretization.py,32,class,"Buckets data into discrete ranges. This layer will place each element of its input data into one of several contiguous ranges and output an integer index indicating which range each element was placed in. Input shape: Any `tf.Tensor` or `tf.RaggedTensor` of dimension 2 or higher. Output shape: Same as input shape. Attributes: bins: Optional boundary specification. Bins include the left boundary and exclude the right boundary, so `bins=[0., 1., 2.]` generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. Examples: Bucketize float values based on provided buckets. >>> input = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]]) >>> layer = tf.keras.layers.experimental.preprocessing.Discretization( ... bins=[0., 1., 2.]) >>> layer(input) " 6098,DiscretizationDistributionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/discretization_distribution_test.py,36,class, 6099,DiscretizationTest,tensorflow/tensorflow/python/keras/layers/preprocessing/discretization_test.py,35,class, 6100,Hashing,tensorflow/tensorflow/python/keras/layers/preprocessing/hashing.py,44,class,"Implements categorical feature hashing, also known as ""hashing trick"". This layer transforms single or multiple categorical inputs to hashed output. It converts a sequence of int or string to a sequence of int. The stable hash function uses tensorflow::ops::Fingerprint to produce universal output that is consistent across platforms. This layer uses [FarmHash64](https://github.com/google/farmhash) by default, which provides a consistent hashed output across different platforms and is stable across invocations, regardless of device and context, by mixing the input bits thoroughly. If you want to obfuscate the hashed output, you can also pass a random `salt` argument in the constructor. In that case, the layer will use the [SipHash64](https://github.com/google/highwayhash) hash function, with the `salt` value serving as additional input to the hash function. Example (FarmHash64): >>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3) >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']] >>> layer(inp) Example (FarmHash64) with list of inputs: >>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3) >>> inp_1 = [['A'], ['B'], ['C'], ['D'], ['E']] >>> inp_2 = np.asarray([[5], [4], [3], [2], [1]]) >>> layer([inp_1, inp_2]) Example (SipHash64): >>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3, ... salt=[133, 137]) >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']] >>> layer(inp) Example (Siphash64 with a single integer, same as `salt=[133, 133]` >>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3, ... salt=133) >>> inp = [['A'], ['B'], ['C'], ['D'], ['E']] >>> layer(inp) Reference: [SipHash with salt](https://www.131002.net/siphash/siphash.pdf) Arguments: num_bins: Number of hash bins. salt: A single unsigned integer or None. If passed, the hash function used will be SipHash64, with these values used as an additional input (known as a ""salt"" in cryptography). These should be non-zero. Defaults to `None` (in that case, the FarmHash64 hash function is used). It also supports tuple/list of 2 unsigned integer numbers, see reference paper for details. name: Name to give to the layer. **kwargs: Keyword arguments to construct a layer. Input shape: A single or list of string, int32 or int64 `Tensor`, `SparseTensor` or `RaggedTensor` of shape `[batch_size, ...,]` Output shape: An int64 `Tensor`, `SparseTensor` or `RaggedTensor` of shape `[batch_size, ...]`. If any input is `RaggedTensor` then output is `RaggedTensor`, otherwise if any input is `SparseTensor` then output is `SparseTensor`, otherwise the output is `Tensor`." 6101,HashingDistributionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/hashing_distribution_test.py,39,class, 6102,HashingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/hashing_test.py,38,class, 6103,check_fill_mode_and_interpolation,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,60,function, 6104,Resizing,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,71,class,"Image resizing layer. Resize the batched image input to target height and width. The input should be a 4-D tensor in the format of NHWC. Arguments: height: Integer, the height of the output shape. width: Integer, the width of the output shape. interpolation: String, the interpolation method. Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic` name: A string, the name of the layer." 6105,CenterCrop,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,123,class,"Crop the central portion of the images to target height and width. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, target_height, target_width, channels)`. If the input height/width is even and the target height/width is odd (or inversely), the input image is left-padded by 1 pixel. Arguments: height: Integer, the height of the output shape. width: Integer, the width of the output shape. name: A string, the name of the layer." 6106,RandomCrop,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,191,class,"Randomly crop the images to target height and width. This layer will crop all the images in the same batch to the same cropping location. By default, random cropping is only applied during training. At inference time, the images will be first rescaled to preserve the shorter side, and center cropped. If you need to apply random cropping at inference time, set `training` to True when calling the layer. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, target_height, target_width, channels)`. Arguments: height: Integer, the height of the output shape. width: Integer, the width of the output shape. seed: Integer. Used to create a random seed. name: A string, the name of the layer." 6107,Rescaling,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,299,class,"Multiply inputs by `scale` and adds `offset`. For instance: 1. To rescale an input in the `[0, 255]` range to be in the `[0, 1]` range, you would pass `scale=1./255`. 2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range, you would pass `scale=1./127.5, offset=-1`. The rescaling is applied both during training and inference. Input shape: Arbitrary. Output shape: Same as input. Arguments: scale: Float, the scale to apply to the inputs. offset: Float, the offset to apply to the inputs. name: A string, the name of the layer." 6108,RandomFlip,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,354,class,"Randomly flip each image horizontally and vertically. This layer will flip the images based on the `mode` attribute. During inference time, the output will be identical to input. Call the layer with `training=True` to flip the input. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Attributes: mode: String indicating which flip mode to use. Can be ""horizontal"", ""vertical"", or ""horizontal_and_vertical"". Defaults to ""horizontal_and_vertical"". ""horizontal"" is a left-right flip and ""vertical"" is a top-bottom flip. seed: Integer. Used to create a random seed. name: A string, the name of the layer." 6109,RandomTranslation,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,435,class,"Randomly translate each image during training. Arguments: height_factor: a float represented as fraction of value, or a tuple of size 2 representing lower and upper bound for shifting vertically. A negative value means shifting image up, while a positive value means shifting image down. When represented as a single positive float, this value is used for both the upper and lower bound. For instance, `height_factor=(-0.2, 0.3)` results in an output shifted by a random amount in the range [-20%, +30%]. `height_factor=0.2` results in an output height shifted by a random amount in the range [-20%, +20%]. width_factor: a float represented as fraction of value, or a tuple of size 2 representing lower and upper bound for shifting horizontally. A negative value means shifting image left, while a positive value means shifting image right. When represented as a single positive float, this value is used for both the upper and lower bound. For instance, `width_factor=(-0.2, 0.3)` results in an output shifted left by 20%, and shifted right by 30%. `width_factor=0.2` results in an output height shifted left or right by 20%. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`). - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by reflecting about the edge of the last pixel. - *constant*: `(k k k k | a b c d | k k k k)` The input is extended by filling all values beyond the edge with the same constant value k = 0. - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by wrapping around to the opposite edge. - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the nearest pixel. interpolation: Interpolation mode. Supported values: ""nearest"", ""bilinear"". seed: Integer. Used to create a random seed. name: A string, the name of the layer. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Raise: ValueError: if either bound is not between [0, 1], or upper bound is less than lower bound." 6110,get_translation_matrix,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,583,function,"Returns projective transform(s) for the given translation(s). Args: translations: A matrix of 2-element lists representing [dx, dy] to translate for each image (for a batch of images). name: The name of the op. Returns: A tensor of shape (num_images, 8) projective transforms which can be given to `transform`." 6111,transform,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,616,function,"Applies the given transform(s) to the image(s). Args: images: A tensor of shape (num_images, num_rows, num_columns, num_channels) (NHWC), (num_rows, num_columns, num_channels) (HWC), or (num_rows, num_columns) (HW). The rank must be statically known (the shape is not `TensorShape(None)`. transforms: Projective transform matrix/matrices. A vector of length 8 or tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point `(x, y)` to a transformed *input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to the transform mapping input points to output points. Note that gradients are not backpropagated into transformation parameters. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`). interpolation: Interpolation mode. Supported values: ""nearest"", ""bilinear"". output_shape: Output dimesion after the transform, [height, width]. If None, output is the same size as input image. name: The name of the op. ## Fill mode. Behavior for each valid value is as follows: reflect (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel. constant (k k k k | a b c d | k k k k) The input is extended by filling all values beyond the edge with the same constant value k = 0. wrap (a b c d | a b c d | a b c d) The input is extended by wrapping around to the opposite edge. nearest (a a a a | a b c d | d d d d) The input is extended by the nearest pixel. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Returns: Image(s) with the same type and shape as `images`, with the given transform(s) applied. Transformed coordinates outside of the input image will be filled with zeros. Raises: TypeError: If `image` is an invalid type. ValueError: If output shape is not 1-D int32 Tensor." 6112,get_rotation_matrix,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,699,function,"Returns projective transform(s) for the given angle(s). Args: angles: A scalar angle to rotate all images by, or (for batches of images) a vector with an angle to rotate each image in the batch. The rank must be statically known (the shape is not `TensorShape(None)`). image_height: Height of the image(s) to be transformed. image_width: Width of the image(s) to be transformed. name: The name of the op. Returns: A tensor of shape (num_images, 8). Projective transforms which can be given to operation `image_projective_transform_v2`. If one row of transforms is [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point `(x, y)` to a transformed *input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`." 6113,RandomRotation,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,740,class,"Randomly rotate each image. By default, random rotations are only applied during training. At inference time, the layer does nothing. If you need to apply random rotations at inference time, set `training` to True when calling the layer. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Attributes: factor: a float represented as fraction of 2pi, or a tuple of size 2 representing lower and upper bound for rotating clockwise and counter-clockwise. A positive values means rotating counter clock-wise, while a negative value means clock-wise. When represented as a single float, this value is used for both the upper and lower bound. For instance, `factor=(-0.2, 0.3)` results in an output rotation by a random amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in an output rotating by a random amount in the range `[-20% * 2pi, 20% * 2pi]`. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`). - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by reflecting about the edge of the last pixel. - *constant*: `(k k k k | a b c d | k k k k)` The input is extended by filling all values beyond the edge with the same constant value k = 0. - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by wrapping around to the opposite edge. - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the nearest pixel. interpolation: Interpolation mode. Supported values: ""nearest"", ""bilinear"". seed: Integer. Used to create a random seed. name: A string, the name of the layer. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Raise: ValueError: if either bound is not between [0, 1], or upper bound is less than lower bound." 6114,RandomZoom,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,858,class,"Randomly zoom each image during training. Arguments: height_factor: a float represented as fraction of value, or a tuple of size 2 representing lower and upper bound for zooming vertically. When represented as a single float, this value is used for both the upper and lower bound. A positive value means zooming out, while a negative value means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an output zoomed out by a random amount in the range [+20%, +30%]. `height_factor=(-0.3, -0.2)` result in an output zoomed in by a random amount in the range [+20%, +30%]. width_factor: a float represented as fraction of value, or a tuple of size 2 representing lower and upper bound for zooming horizontally. When represented as a single float, this value is used for both the upper and lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an output zooming in between 20% to 30%. Defaults to `None`, i.e., zooming vertical and horizontal directions by preserving the aspect ratio. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'reflect', 'wrap', 'nearest'}`). - *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by reflecting about the edge of the last pixel. - *constant*: `(k k k k | a b c d | k k k k)` The input is extended by filling all values beyond the edge with the same constant value k = 0. - *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by wrapping around to the opposite edge. - *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by the nearest pixel. interpolation: Interpolation mode. Supported values: ""nearest"", ""bilinear"". seed: Integer. Used to create a random seed. name: A string, the name of the layer. Example: >>> input_img = np.random.random((32, 224, 224, 3)) >>> layer = tf.keras.layers.experimental.preprocessing.RandomZoom(.5, .2) >>> out_img = layer(input_img) >>> out_img.shape TensorShape([32, 224, 224, 3]) Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Raise: ValueError: if lower bound is not between [0, 1], or upper bound is negative." 6115,get_zoom_matrix,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,1009,function,"Returns projective transform(s) for the given zoom(s). Args: zooms: A matrix of 2-element lists representing [zx, zy] to zoom for each image (for a batch of images). image_height: Height of the image(s) to be transformed. image_width: Width of the image(s) to be transformed. name: The name of the op. Returns: A tensor of shape (num_images, 8). Projective transforms which can be given to operation `image_projective_transform_v2`. If one row of transforms is [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point `(x, y)` to a transformed *input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`." 6116,RandomContrast,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,1051,class,"Adjust the contrast of an image or images by a random factor. Contrast is adjusted independently for each channel of each image during training. For each channel, this layer computes the mean of the image pixels in the channel and then adjusts each component `x` of each pixel to `(x - mean) * contrast_factor + mean`. Input shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, height, width, channels)`, data_format='channels_last'. Attributes: factor: a positive float represented as fraction of value, or a tuple of size 2 representing lower and upper bound. When represented as a single float, lower = upper. The contrast factor will be randomly picked between [1.0 - lower, 1.0 + upper]. seed: Integer. Used to create a random seed. name: A string, the name of the layer. Raise: ValueError: if lower bound is not between [0, 1], or upper bound is negative." 6117,RandomHeight,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,1123,class,"Randomly vary the height of a batch of images during training. Adjusts the height of a batch of images by a random factor. The input should be a 4-D tensor in the ""channels_last"" image data format. By default, this layer is inactive during inference. Arguments: factor: A positive float (fraction of original height), or a tuple of size 2 representing lower and upper bound for resizing vertically. When represented as a single float, this value is used for both the upper and lower bound. For instance, `factor=(0.2, 0.3)` results in an output with height changed by a random amount in the range `[20%, 30%]`. `factor=(-0.2, 0.3)` results in an output with height changed by a random amount in the range `[-20%, +30%]. `factor=0.2` results in an output with height changed by a random amount in the range `[-20%, +20%]`. interpolation: String, the interpolation method. Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic` seed: Integer. Used to create a random seed. name: A string, the name of the layer. Input shape: 4D tensor with shape: `(samples, height, width, channels)` (data_format='channels_last'). Output shape: 4D tensor with shape: `(samples, random_height, width, channels)`." 6118,RandomWidth,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,1221,class,"Randomly vary the width of a batch of images during training. Adjusts the width of a batch of images by a random factor. The input should be a 4-D tensor in the ""channels_last"" image data format. By default, this layer is inactive during inference. Arguments: factor: A positive float (fraction of original height), or a tuple of size 2 representing lower and upper bound for resizing vertically. When represented as a single float, this value is used for both the upper and lower bound. For instance, `factor=(0.2, 0.3)` results in an output with width changed by a random amount in the range `[20%, 30%]`. `factor=(-0.2, 0.3)` results in an output with width changed by a random amount in the range `[-20%, +30%]. `factor=0.2` results in an output with width changed by a random amount in the range `[-20%, +20%]`. interpolation: String, the interpolation method. Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic` seed: Integer. Used to create a random seed. name: A string, the name of the layer. Input shape: 4D tensor with shape: `(samples, height, width, channels)` (data_format='channels_last'). Output shape: 4D tensor with shape: `(samples, height, random_width, channels)`." 6119,_RandomGenerator,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,1322,class,"A subclass that allows creation inside distribution strategies. This is a temporary solution to allow creating tf.random.Generator inside distribution strategies. It will be removed when proper API is in place. All replicas will have the same RNG state and generate the same random numbers." 6120,make_generator,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,1352,function, 6121,get_interpolation,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py,1359,function, 6122,ImagePreprocessingDistributionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_distribution_test.py,38,class, 6123,ResizingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,41,class, 6124,get_numpy_center_crop,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,131,function, 6125,CenterCropTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,142,class, 6126,RandomCropTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,203,class, 6127,RescalingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,308,class, 6128,RandomFlipTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,342,class, 6129,RandomContrastTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,426,class, 6130,RandomTranslationTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,511,class, 6131,RandomTransformTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,695,class, 6132,RandomRotationTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,990,class, 6133,RandomZoomTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,1046,class, 6134,RandomHeightTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,1152,class, 6135,RandomWidthTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,1243,class, 6136,LearningPhaseTest,tensorflow/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py,1333,class, 6137,IndexLookup,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup.py,44,class,"Maps values from a vocabulary to integer indices. This layer translates a set of arbitrary hashables into an integer output via a table-based lookup, with optional out-of-vocabulary handling. This is the basis layer for both IntegerLookup and IndexLookup; it holds the common logic but is not intended to be exported as part of the Keras API. If desired, the user can call this layer's `adapt()` method on a data set, which will analyze the data set, determine the frequency of individual string values, and create a vocabulary from them. This vocabulary can have unlimited size or be capped, depending on the configuration options for this layer; if there are more unique values in the input than the maximum vocabulary size, the most frequent terms will be used to create the vocabulary. Attributes: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary includes the OOV and mask tokens, so the effective number of tokens is (max_tokens - num_oov_indices - (1 if mask_token else 0)) num_oov_indices: The number of out-of-vocabulary tokens to use. If this value is more than 1, OOV inputs are hashed to determine their OOV value; if this value is 0, passing an OOV input will result in a '-1' being returned for that value in the output tensor. (Note that, because the value is -1 and not 0, this will allow you to effectively drop OOV values from categorical encodings.) mask_token: A token that represents masked values, and which is mapped to index 0. If set to None, no mask term will be added and the OOV tokens, if any, will be indexed from (0...num_oov_indices) instead of (1...num_oov_indices+1). oov_token: The token representing an out-of-vocabulary value. This token is only used when performing an inverse lookup. vocabulary: An optional list of vocabulary terms. If the list contains the same token multiple times, an error will be thrown. invert: If true, this layer will map indices to vocabulary items instead of mapping vocabulary items to indices." 6138,_IndexLookupAccumulator,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup.py,379,class, 6139,_IndexLookupCombiner,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup.py,384,class,"Combiner for the IndexLookup preprocessing layer. This class encapsulates the logic for computing a vocabulary based on the frequency of each token. Attributes: vocab_size: (Optional) If set, only the top `vocab_size` tokens (based on frequency across the dataset) are retained in the vocabulary. If None, or set to a value greater than the total number of distinct tokens in the dataset, all tokens are retained.s" 6140,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_distribution_test.py,37,function, 6141,IndexLookupDistributionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_distribution_test.py,48,class, 6142,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,48,function, 6143,_get_end_to_end_test_cases,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,55,function, 6144,IndexLookupLayerTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,217,class, 6145,CategoricalEncodingInputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,267,class, 6146,CategoricalEncodingMultiOOVTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,382,class, 6147,CategoricalEncodingAdaptTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,478,class, 6148,IndexLookupOutputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,599,class, 6149,IndexLookupVocabularyTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,672,class, 6150,IndexLookupInverseVocabularyTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,875,class, 6151,IndexLookupSaveableTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,987,class, 6152,IndexLookupErrorTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,1029,class, 6153,IndexLookupSavingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,1056,class, 6154,IndexLookupStringCombinerTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,1101,class, 6155,IndexLookupIntCombinerTest,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_test.py,1232,class, 6156,IndexLookup,tensorflow/tensorflow/python/keras/layers/preprocessing/index_lookup_v1.py,26,class,"IndexLookup layer. This layer translates a set of arbitray strings or integers into an integer output via a table-based lookup, with optional out-of-vocabulary handling. If desired, the user can call this layer's adapt() method on a data set. When this layer is adapted, it will analyze the dataset, determine the frequency of individual string or integer values, and create a vocabulary from them. This vocabulary can have unlimited size or be capped, depending on the configuration options for this layer; if there are more unique values in the input than the maximum vocabulary size, the most frequent terms will be used to create the vocabulary. Attributes: max_vocab_size: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that the vocabulary does include OOV buckets, so the effective number of unique values in the vocabulary is (max_vocab_size - num_oov_buckets) when this value is set. num_oov_buckets: The number of out-of-vocabulary tokens to use; defaults to 1. If this value is more than 1, OOV inputs are hashed to determine their OOV value; if this value is 0, passing an OOV input will result in a runtime error. reserve_zero: Whether to reserve the index '0', which has a special meaning in the Keras masking system. If True, the output of this layer will be in the range [1...max_vocab_size+1); if False, the output will be in the range [0...max_vocab_size). Defaults to True. mask_inputs: If True, input values of 0 (for integers) and """" (for strings) will be treated as masked values and assigned an output value of 0. If this option is set, reserve_zero must also be set. Defaults to False." 6157,IntegerLookup,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup.py,28,class,"Maps integers from a vocabulary to integer indices. This layer translates a set of arbitrary integers into an integer output via a table-based lookup, with optional out-of-vocabulary handling. If desired, the user can call this layer's `adapt()` method on a data set, which will analyze the data set, determine the frequency of individual string values, and create a vocabulary from them. This vocabulary can have unlimited size or be capped, depending on the configuration options for this layer; if there are more unique values in the input than the maximum vocabulary size, the most frequent terms will be used to create the vocabulary. Attributes: max_values: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary includes the OOV and mask values, so the effective number of values is (max_values - num_oov_values - (1 if mask_token else 0)) num_oov_indices: The number of out-of-vocabulary values to use; defaults to 1. If this value is more than 1, OOV inputs are modulated to determine their OOV value; if this value is 0, passing an OOV input will result in a '-1' being returned for that value in the output tensor. (Note that, because the value is -1 and not 0, this will allow you to effectively drop OOV values from categorical encodings.) mask_value: A value that represents masked inputs, and which is mapped to index 0. Defaults to 0. If set to None, no mask term will be added and the OOV values, if any, will be indexed from (0...num_oov_values) instead of (1...num_oov_values+1). oov_value: The value representing an out-of-vocabulary value. Defaults to -1. vocabulary: An optional list of values, or a path to a text file containing a vocabulary to load into this layer. The file should contain one value per line. If the list or file contains the same token multiple times, an error will be thrown. invert: If true, this layer will map indices to vocabulary items instead of mapping vocabulary items to indices. Examples: Creating a lookup layer with a known vocabulary This example creates a lookup layer with a pre-existing vocabulary. >>> vocab = [12, 36, 1138, 42] >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]]) >>> layer = IntegerLookup(vocabulary=vocab) >>> layer(data) Creating a lookup layer with an adapted vocabulary This example creates a lookup layer and generates the vocabulary by analyzing the dataset. >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]]) >>> layer = IntegerLookup() >>> layer.adapt(data) >>> layer.get_vocabulary() [0, -1, 42, 1138, 1000, 36, 12] Note how the mask value 0 and the OOV value -1 have been added to the vocabulary. The remaining values are sorted by frequency (1138, which has 2 occurrences, is first) then by inverse sort order. >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]]) >>> layer = IntegerLookup() >>> layer.adapt(data) >>> layer(data) Lookups with multiple OOV tokens. This example demonstrates how to use a lookup layer with multiple OOV tokens. When a layer is created with more than one OOV token, any OOV values are hashed into the number of OOV buckets, distributing OOV values in a deterministic fashion across the set. >>> vocab = [12, 36, 1138, 42] >>> data = tf.constant([[12, 1138, 42], [37, 1000, 36]]) >>> layer = IntegerLookup(vocabulary=vocab, num_oov_indices=2) >>> layer(data) Note that the output for OOV value 37 is 2, while the output for OOV value 1000 is 1. The in-vocab terms have their output index increased by 1 from earlier examples (12 maps to 3, etc) in order to make space for the extra OOV value. Inverse lookup This example demonstrates how to map indices to values using this layer. (You can also use adapt() with inverse=True, but for simplicity we'll pass the vocab in this example.) >>> vocab = [12, 36, 1138, 42] >>> data = tf.constant([[1, 3, 4], [4, 5, 2]]) >>> layer = IntegerLookup(vocabulary=vocab, invert=True) >>> layer(data) Note that the integer 5, which is out of the vocabulary space, returns an OOV token. Forward and inverse lookup pairs This example demonstrates how to use the vocabulary of a standard lookup layer to create an inverse lookup layer. >>> vocab = [12, 36, 1138, 42] >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]]) >>> layer = IntegerLookup(vocabulary=vocab) >>> i_layer = IntegerLookup(vocabulary=layer.get_vocabulary(), invert=True) >>> int_data = layer(data) >>> i_layer(int_data) In this example, the input value 1000 resulted in an output of -1, since 1000 was not in the vocabulary - it got represented as an OOV, and all OOV values are returned as -1 in the inverse layer. Also, note that for the inverse to work, you must have already set the forward layer vocabulary either directly or via fit() before calling get_vocabulary()." 6158,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,48,function, 6159,_get_end_to_end_test_cases,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,55,function, 6160,IntegerLookupLayerTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,93,class, 6161,CategoricalEncodingInputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,136,class, 6162,CategoricalEncodingMultiOOVTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,177,class, 6163,CategoricalEncodingAdaptTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,223,class, 6164,IntegerLookupOutputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,300,class, 6165,IntegerLookupVocabularyTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,382,class, 6166,IntegerLookupSaveableTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,442,class, 6167,IntegerLookupErrorTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,475,class, 6168,IntegerLookupSavingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_test.py,492,class, 6169,IntegerLookup,tensorflow/tensorflow/python/keras/layers/preprocessing/integer_lookup_v1.py,27,class,Maps integers from a vocabulary to integer indices. 6170,Normalization,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization.py,42,class,"Feature-wise normalization of the data. This layer will coerce its inputs into a distribution centered around 0 with standard deviation 1. It accomplishes this by precomputing the mean and variance of the data, and calling (input-mean)/sqrt(var) at runtime. What happens in `adapt`: Compute mean and variance of the data and store them as the layer's weights. `adapt` should be called before `fit`, `evaluate`, or `predict`. Attributes: axis: Integer or tuple of integers, the axis or axes that should be ""kept"". These axes are not be summed over when calculating the normalization statistics. By default the last axis, the `features` axis is kept and any `space` or `time` axes are summed. Each element in the the axes that are kept is normalized independently. If `axis` is set to 'None', the layer will perform scalar normalization (diving the input by a single scalar value). The `batch` axis, 0, is always summed over (`axis=0` is not allowed). Examples: Calculate the mean and variance by analyzing the dataset in `adapt`. >>> adapt_data = np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32) >>> input_data = np.array([[1.], [2.], [3.]], np.float32) >>> layer = Normalization() >>> layer.adapt(adapt_data) >>> layer(input_data) " 6171,_NormalizingCombiner,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization.py,180,class,"Combiner for the Normalization preprocessing layer. This class encapsulates the computations for finding the mean and variance of a set of data in a stable and numerically correct way. Its associated accumulator is a namedtuple('count', 'mean', 'variance'). Attributes: axis: The axis to compute mean and var over." 6172,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_distribution_test.py,35,function, 6173,_get_layer_computation_test_cases,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_distribution_test.py,42,function, 6174,NormalizationTest,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_distribution_test.py,112,class, 6175,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_test.py,37,function, 6176,_get_layer_computation_test_cases,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_test.py,44,function, 6177,NormalizationTest,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_test.py,126,class, 6178,_get_layer_computation_test_cases,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_tpu_test.py,34,function, 6179,NormalizationTest,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_tpu_test.py,101,class, 6180,Normalization,tensorflow/tensorflow/python/keras/layers/preprocessing/normalization_v1.py,28,class, 6181,PreprocessingStage,tensorflow/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py,30,class,"A sequential preprocessing stage. This preprocessing stage wraps a list of preprocessing layers into a Sequential-like object that enables you to `adapt()` the whole list via a single `adapt()` call on the preprocessing stage. Arguments: layers: List of layers. Can include layers that aren't preprocessing layers. name: String. Optional name for the preprocessing stage object." 6182,PreprocessingStageTest,tensorflow/tensorflow/python/keras/layers/preprocessing/preprocessing_stage_test.py,37,class, 6183,PreprocessingLayerTest,tensorflow/tensorflow/python/keras/layers/preprocessing/preprocessing_test_utils.py,27,class,Base test class for preprocessing layer API validation. 6184,get_reduce_op,tensorflow/tensorflow/python/keras/layers/preprocessing/reduction.py,28,function,Translate a reduction string name to a reduction op. 6185,Reduction,tensorflow/tensorflow/python/keras/layers/preprocessing/reduction.py,45,class,"Performs an optionally-weighted reduction. This layer performs a reduction across one axis of its input data. This data may optionally be weighted by passing in an identical float tensor. Arguments: reduction: The type of reduction to perform. Can be one of the following: ""max"", ""mean"", ""min"", ""prod"", or ""sum"". This layer uses the Tensorflow reduce op which corresponds to that reduction (so, for ""mean"", we use ""reduce_mean""). axis: The axis to reduce along. Defaults to '-2', which is usually the axis that contains embeddings (but is not within the embedding itself). Input shape: A tensor of 2 or more dimensions of any numeric dtype. Output: A tensor of 1 less dimension than the input tensor, of the same dtype. Call arguments: inputs: The data to reduce. weights: An optional tensor or constant of the same shape as inputs that will weight the input data before it is reduced." 6186,ReductionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/reduction_test.py,33,class, 6187,StringLookup,tensorflow/tensorflow/python/keras/layers/preprocessing/string_lookup.py,28,class,"Maps strings from a vocabulary to integer indices. This layer translates a set of arbitrary strings into an integer output via a table-based lookup, with optional out-of-vocabulary handling. If desired, the user can call this layer's `adapt()` method on a data set, which will analyze the data set, determine the frequency of individual string values, and create a vocabulary from them. This vocabulary can have unlimited size or be capped, depending on the configuration options for this layer; if there are more unique values in the input than the maximum vocabulary size, the most frequent terms will be used to create the vocabulary. Attributes: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary includes the OOV and mask tokens, so the effective number of tokens is (max_tokens - num_oov_indices - (1 if mask_token else 0)) num_oov_indices: The number of out-of-vocabulary tokens to use; defaults to 1. If this value is more than 1, OOV inputs are hashed to determine their OOV value; if this value is 0, passing an OOV input will result in a '-1' being returned for that value in the output tensor. (Note that, because the value is -1 and not 0, this will allow you to effectively drop OOV values from categorical encodings.) mask_token: A token that represents masked values, and which is mapped to index 0. Defaults to the empty string """". If set to None, no mask term will be added and the OOV tokens, if any, will be indexed from (0...num_oov_indices) instead of (1...num_oov_indices+1). oov_token: The token representing an out-of-vocabulary value. Defaults to ""[UNK]"". vocabulary: An optional list of vocabulary terms, or a path to a text file containing a vocabulary to load into this layer. The file should contain one token per line. If the list or file contains the same token multiple times, an error will be thrown. encoding: The Python string encoding to use. Defaults to `'utf-8'`. invert: If true, this layer will map indices to vocabulary items instead of mapping vocabulary items to indices. Examples: Creating a lookup layer with a known vocabulary This example creates a lookup layer with a pre-existing vocabulary. >>> vocab = [""a"", ""b"", ""c"", ""d""] >>> data = tf.constant([[""a"", ""c"", ""d""], [""d"", ""z"", ""b""]]) >>> layer = StringLookup(vocabulary=vocab) >>> layer(data) Creating a lookup layer with an adapted vocabulary This example creates a lookup layer and generates the vocabulary by analyzing the dataset. >>> data = tf.constant([[""a"", ""c"", ""d""], [""d"", ""z"", ""b""]]) >>> layer = StringLookup() >>> layer.adapt(data) >>> layer.get_vocabulary() ['', '[UNK]', 'd', 'z', 'c', 'b', 'a'] Note how the mask token '' and the OOV token [UNK] have been added to the vocabulary. The remaining tokens are sorted by frequency ('d', which has 2 occurrences, is first) then by inverse sort order. >>> data = tf.constant([[""a"", ""c"", ""d""], [""d"", ""z"", ""b""]]) >>> layer = StringLookup() >>> layer.adapt(data) >>> layer(data) Lookups with multiple OOV tokens. This example demonstrates how to use a lookup layer with multiple OOV tokens. When a layer is created with more than one OOV token, any OOV values are hashed into the number of OOV buckets, distributing OOV values in a deterministic fashion across the set. >>> vocab = [""a"", ""b"", ""c"", ""d""] >>> data = tf.constant([[""a"", ""c"", ""d""], [""m"", ""z"", ""b""]]) >>> layer = StringLookup(vocabulary=vocab, num_oov_indices=2) >>> layer(data) Note that the output for OOV value 'm' is 1, while the output for OOV value 'z' is 2. The in-vocab terms have their output index increased by 1 from earlier examples (a maps to 3, etc) in order to make space for the extra OOV value. Inverse lookup This example demonstrates how to map indices to strings using this layer. (You can also use adapt() with inverse=True, but for simplicity we'll pass the vocab in this example.) >>> vocab = [""a"", ""b"", ""c"", ""d""] >>> data = tf.constant([[1, 3, 4], [4, 5, 2]]) >>> layer = StringLookup(vocabulary=vocab, invert=True) >>> layer(data) Note that the integer 5, which is out of the vocabulary space, returns an OOV token. Forward and inverse lookup pairs This example demonstrates how to use the vocabulary of a standard lookup layer to create an inverse lookup layer. >>> vocab = [""a"", ""b"", ""c"", ""d""] >>> data = tf.constant([[""a"", ""c"", ""d""], [""d"", ""z"", ""b""]]) >>> layer = StringLookup(vocabulary=vocab) >>> i_layer = StringLookup(vocabulary=layer.get_vocabulary(), invert=True) >>> int_data = layer(data) >>> i_layer(int_data) In this example, the input value 'z' resulted in an output of '[UNK]', since 1000 was not in the vocabulary - it got represented as an OOV, and all OOV values are returned as '[OOV}' in the inverse layer. Also, note that for the inverse to work, you must have already set the forward layer vocabulary either directly or via fit() before calling get_vocabulary()." 6188,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py,44,function, 6189,_get_end_to_end_test_cases,tensorflow/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py,51,function, 6190,StringLookupLayerTest,tensorflow/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py,88,class, 6191,StringLookupVocabularyTest,tensorflow/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py,131,class, 6192,StringLookupSaveableTest,tensorflow/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py,238,class, 6193,StringLookup,tensorflow/tensorflow/python/keras/layers/preprocessing/string_lookup_v1.py,27,class,Maps strings from a vocabulary to integer indices. 6194,TableHandler,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils.py,36,class,Wrapper object that holds a lookup table and provides accessors. 6195,get_vocabulary_from_file,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils.py,151,function,Read a vocabulary in from a file. 6196,validate_vocabulary_is_unique,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils.py,171,function,Validate that a vocabulary contains no repeated tokens. 6197,assert_same_type,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils.py,184,function,Assert that 'values' is of type 'expected_type'. 6198,convert_to_ndarray,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils.py,191,function,Convert 'x' to a numpy array. 6199,get_table,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils_test.py,34,function, 6200,CategoricalEncodingInputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils_test.py,45,class, 6201,CategoricalEncodingMultiOOVTest,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils_test.py,122,class, 6202,IndexLookupOutputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/table_utils_test.py,212,class, 6203,TextVectorization,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization.py,74,class,"Text vectorization layer. This layer has basic options for managing text in a Keras model. It transforms a batch of strings (one sample = one string) into either a list of token indices (one sample = 1D tensor of integer token indices) or a dense representation (one sample = 1D tensor of float values representing data about the sample's tokens). If desired, the user can call this layer's adapt() method on a dataset. When this layer is adapted, it will analyze the dataset, determine the frequency of individual string values, and create a 'vocabulary' from them. This vocabulary can have unlimited size or be capped, depending on the configuration options for this layer; if there are more unique values in the input than the maximum vocabulary size, the most frequent terms will be used to create the vocabulary. The processing of each sample contains the following steps: 1. standardize each sample (usually lowercasing + punctuation stripping) 2. split each sample into substrings (usually words) 3. recombine substrings into tokens (usually ngrams) 4. index tokens (associate a unique int value with each token) 5. transform each sample using this index, either into a vector of ints or a dense float vector. Some notes on passing Callables to customize splitting and normalization for this layer: 1. Any callable can be passed to this Layer, but if you want to serialize this object you should only pass functions that are registered Keras serializables (see `tf.keras.utils.register_keras_serializable` for more details). 2. When using a custom callable for `standardize`, the data received by the callable will be exactly as passed to this layer. The callable should return a tensor of the same shape as the input. 3. When using a custom callable for `split`, the data received by the callable will have the 1st dimension squeezed out - instead of `[[""string to split""], [""another string to split""]]`, the Callable will see `[""string to split"", ""another string to split""]`. The callable should return a Tensor with the first dimension containing the split tokens - in this example, we should see something like `[[""string"", ""to"", ""split], [""another"", ""string"", ""to"", ""split""]]`. This makes the callable site natively compatible with `tf.strings.split()`. Attributes: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary contains 1 OOV token, so the effective number of tokens is `(max_tokens - 1 - (1 if output == ""int"" else 0))`. standardize: Optional specification for standardization to apply to the input text. Values can be None (no standardization), 'lower_and_strip_punctuation' (lowercase and remove punctuation) or a Callable. Default is 'lower_and_strip_punctuation'. split: Optional specification for splitting the input text. Values can be None (no splitting), 'whitespace' (split on ASCII whitespace), or a Callable. The default is 'whitespace'. ngrams: Optional specification for ngrams to create from the possibly-split input text. Values can be None, an integer or tuple of integers; passing an integer will create ngrams up to that integer, and passing a tuple of integers will create ngrams for the specified values in the tuple. Passing None means that no ngrams will be created. output_mode: Optional specification for the output of the layer. Values can be ""int"", ""binary"", ""count"" or ""tf-idf"", configuring the layer as follows: ""int"": Outputs integer indices, one integer index per split string token. When output == ""int"", 0 is reserved for masked locations; this reduces the vocab size to max_tokens-2 instead of max_tokens-1 ""binary"": Outputs a single int array per batch, of either vocab_size or max_tokens size, containing 1s in all elements where the token mapped to that index exists at least once in the batch item. ""count"": As ""binary"", but the int array contains a count of the number of times the token at that index appeared in the batch item. ""tf-idf"": As ""binary"", but the TF-IDF algorithm is applied to find the value in each token slot. output_sequence_length: Only valid in INT mode. If set, the output will have its time dimension padded or truncated to exactly `output_sequence_length` values, resulting in a tensor of shape [batch_size, output_sequence_length] regardless of how many tokens resulted from the splitting step. Defaults to None. pad_to_max_tokens: Only valid in ""binary"", ""count"", and ""tf-idf"" modes. If True, the output will have its feature axis padded to `max_tokens` even if the number of unique tokens in the vocabulary is less than max_tokens, resulting in a tensor of shape [batch_size, max_tokens] regardless of vocabulary size. Defaults to True. Example: This example instantiates a TextVectorization layer that lowercases text, splits on whitespace, strips punctuation, and outputs integer vocab indices. >>> text_dataset = tf.data.Dataset.from_tensor_slices([""foo"", ""bar"", ""baz""]) >>> max_features = 5000 # Maximum vocab size. >>> max_len = 4 # Sequence length to pad the outputs to. >>> embedding_dims = 2 >>> >>> # Create the layer. >>> vectorize_layer = TextVectorization( ... max_tokens=max_features, ... output_mode='int', ... output_sequence_length=max_len) >>> >>> # Now that the vocab layer has been created, call `adapt` on the text-only >>> # dataset to create the vocabulary. You don't have to batch, but for large >>> # datasets this means we're not keeping spare copies of the dataset. >>> vectorize_layer.adapt(text_dataset.batch(64)) >>> >>> # Create the model that uses the vectorize text layer >>> model = tf.keras.models.Sequential() >>> >>> # Start by creating an explicit input layer. It needs to have a shape of >>> # (1,) (because we need to guarantee that there is exactly one string >>> # input per batch), and the dtype needs to be 'string'. >>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string)) >>> >>> # The first layer in our model is the vectorization layer. After this >>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab >>> # indices. >>> model.add(vectorize_layer) >>> >>> # Now, the model can map strings to integers, and you can add an embedding >>> # layer to map these integers to learned embeddings. >>> input_data = [[""foo qux bar""], [""qux baz""]] >>> model.predict(input_data) array([[2, 1, 4, 0], [1, 3, 0, 0]])" 6204,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_distribution_test.py,37,function, 6205,TextVectorizationDistributionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_distribution_test.py,48,class, 6206,get_layer_class,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,50,function, 6207,_get_end_to_end_test_cases,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,57,function, 6208,TextVectorizationLayerTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,264,class, 6209,TextVectorizationPreprocessingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,413,class, 6210,TextVectorizationDistributionTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,695,class, 6211,TextVectorizationOutputTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,722,class, 6212,TextVectorizationModelBuildingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,1208,class, 6213,TextVectorizationSaveableTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,1291,class, 6214,TextVectorizationErrorTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,1316,class, 6215,custom_standardize_fn,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,1427,function, 6216,custom_split_fn,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,1432,function, 6217,TextVectorizationSavingTest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,1437,class, 6218,TextVectorizationE2ETest,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py,1570,class, 6219,TextVectorization,tensorflow/tensorflow/python/keras/layers/preprocessing/text_vectorization_v1.py,30,class,"Text vectorization layer. This layer has basic options for managing text in a Keras model. It transforms a batch of strings (one sample = one string) into either a list of token indices (one sample = 1D tensor of integer token indices) or a dense representation (one sample = 1D tensor of float values representing data about the sample's tokens). The processing of each sample contains the following steps: 1) standardize each sample (usually lowercasing + punctuation stripping) 2) split each sample into substrings (usually words) 3) recombine substrings into tokens (usually ngrams) 4) index tokens (associate a unique int value with each token) 5) transform each sample using this index, either into a vector of ints or a dense float vector. Attributes: max_tokens: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. standardize: Optional specification for standardization to apply to the input text. Values can be None (no standardization), LOWER_AND_STRIP_PUNCTUATION (lowercase and remove punctuation) or a Callable. split: Optional specification for splitting the input text. Values can be None (no splitting), SPLIT_ON_WHITESPACE (split on ASCII whitespace), or a Callable. ngrams: Optional specification for ngrams to create from the possibly-split input text. Values can be None, an integer or tuple of integers; passing an integer will create ngrams up to that integer, and passing a tuple of integers will create ngrams for the specified values in the tuple. Passing None means that no ngrams will be created. output_mode: Optional specification for the output of the layer. Values can be INT, BINARY, COUNT or TFIDF, which control the outputs as follows: INT: Outputs integer indices, one integer index per split string token. BINARY: Outputs a single int array per batch, of either vocab_size or max_tokens size, containing 1s in all elements where the token mapped to that index exists at least once in the batch item. COUNT: As BINARY, but the int array contains a count of the number of times the token at that index appeared in the batch item. TFIDF: As BINARY, but the TF-IDF algorithm is applied to find the value in each token slot. output_sequence_length: Optional length for the output tensor. If set, the output will be padded or truncated to this value in INT mode. pad_to_max_tokens: If True, BINARY, COUNT, and TFIDF modes will have their outputs padded to max_tokens, even if the number of unique tokens in the vocabulary is less than max_tokens." 6220,int_gen,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/category_crossing_benchmark.py,43,function, 6221,BenchmarkLayer,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/category_crossing_benchmark.py,48,class,Benchmark the layer forward pass. 6222,BenchmarkLayer,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/category_encoding_benchmark.py,39,class,Benchmark the layer forward pass. 6223,word_gen,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/hashing_benchmark.py,45,function, 6224,BenchmarkLayer,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/hashing_benchmark.py,50,class,Benchmark the layer forward pass. 6225,rotate,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/image_preproc_benchmark.py,48,function,rotate image. 6226,zoom,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/image_preproc_benchmark.py,62,function,zoom image. 6227,image_augmentation,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/image_preproc_benchmark.py,78,function,image augmentation. 6228,BenchmarkLayer,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/image_preproc_benchmark.py,88,class,Benchmark the layer forward pass. 6229,word_gen,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py,45,function, 6230,get_top_k,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py,50,function,Python implementation of vocabulary building using a defaultdict. 6231,BenchmarkAdapt,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/index_lookup_adapt_benchmark.py,66,class,Benchmark adapt. 6232,reduce_fn,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/normalization_adapt_benchmark.py,41,function,tf.data.Dataset-friendly implementation of mean and variance. 6233,BenchmarkAdapt,tensorflow/tensorflow/python/keras/layers/preprocessing/benchmarks/normalization_adapt_benchmark.py,63,class,Benchmark adapt. 6234,keras_style_scope,tensorflow/tensorflow/python/keras/legacy_tf_layers/base.py,47,function,"Use Keras-style variable management. All tf.layers and tf RNN cells created in this scope use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this scope is to allow users of existing layers to slowly transition to a Keras layers API without breaking existing functionality. One example of this is when using TensorFlow's RNN classes with Keras Models or Networks. Because Keras models do not properly set variable scopes, users of RNNs may either accidentally share scopes between two different models, or get errors about variables that already exist. Example: ```python class RNNModel(tf.keras.Model): def __init__(self, name): super(RNNModel, self).__init__(name=name) self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell( [tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)]) def call(self, input, state): return self.rnn(input, state) model_1 = RNNModel(""model_1"") model_2 = RNNModel(""model_2"") # OK output_1, next_state_1 = model_1(input, state) # Raises an error about trying to create an already existing variable. output_2, next_state_2 = model_2(input, state) ``` The solution is to wrap the model construction and execution in a keras-style scope: ```python with keras_style_scope(): model_1 = RNNModel(""model_1"") model_2 = RNNModel(""model_2"") # model_1 and model_2 are guaranteed to create their own variables. output_1, next_state_1 = model_1(input, state) output_2, next_state_2 = model_2(input, state) assert len(model_1.weights) > 0 assert len(model_2.weights) > 0 assert(model_1.weights != model_2.weights) ``` Yields: A keras layer style scope." 6235,set_keras_style,tensorflow/tensorflow/python/keras/legacy_tf_layers/base.py,115,function,"Use Keras-style variable management. All tf.layers and tf RNN cells created after keras style ha been enabled use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this function is to allow users of existing layers to slowly transition to Keras layers API without breaking existing functionality. For more details, see the documentation for `keras_style_scope`. Note, once keras style has been set, it is set globally for the entire program and cannot be unset. Example: ```python set_keras_style() model_1 = RNNModel(name=""model_1"") model_2 = RNNModel(name=""model_2"") # model_1 and model_2 are guaranteed to create their own variables. output_1, next_state_1 = model_1(input, state) output_2, next_state_2 = model_2(input, state) assert len(model_1.weights) > 0 assert len(model_2.weights) > 0 assert(model_1.weights != model_2.weights) ```" 6236,_is_in_keras_style_scope,tensorflow/tensorflow/python/keras/legacy_tf_layers/base.py,152,function, 6237,Layer,tensorflow/tensorflow/python/keras/legacy_tf_layers/base.py,158,class,"Base layer class. It is considered legacy, and we recommend the use of `tf.keras.layers.Layer` instead. Arguments: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: Default dtype of the layer's weights (default of `None` means use the type of the first input). Read-only properties: name: The name of the layer (string). dtype: Default dtype of the layer's weights (default of `None` means use the type of the first input). trainable_variables: List of trainable variables. non_trainable_variables: List of non-trainable variables. variables: List of all variables of this layer, trainable and non-trainable. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). Mutable properties: trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer." 6238,_add_elements_to_collection,tensorflow/tensorflow/python/keras/legacy_tf_layers/base.py,582,function, 6239,BaseLayerTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/base_test.py,47,class, 6240,IdentityLayer,tensorflow/tensorflow/python/keras/legacy_tf_layers/base_test.py,647,class,A layer returns the identity of it's input. 6241,DTypeTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/base_test.py,655,class, 6242,Conv1D,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,30,class,"1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: An integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer." 6243,conv1d,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,125,function,"Functional interface for 1D convolution layer (e.g. temporal convolution). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of a single integer, specifying the length of the 1D convolution window. strides: An integer or tuple/list of a single integer, specifying the stride length of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: An integer or tuple/list of a single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6244,Conv2D,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,228,class,"2D convolution layer (e.g. spatial convolution over images). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer." 6245,conv2d,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,330,function,"Functional interface for the 2D convolution layer. This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6246,Conv3D,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,440,class,"3D convolution layer (e.g. spatial convolution over volumes). This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. dilation_rate: An integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer." 6247,conv3d,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,543,function,"Functional interface for the 3D convolution layer. This layer creates a convolution kernel that is convolved (actually cross-correlated) with the layer input to produce a tensor of outputs. If `use_bias` is True (and a `bias_initializer` is provided), a bias vector is created and added to the outputs. Finally, if `activation` is not `None`, it is applied to the outputs as well. Arguments: inputs: Tensor input. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. dilation_rate: An integer or tuple/list of 3 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6248,SeparableConv1D,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,654,class,"Depthwise separable 1D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial dimensions of the filters. strides: A single integer specifying the strides of the convolution. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: A single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer." 6249,SeparableConv2D,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,764,class,"Depthwise separable 2D convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer." 6250,separable_conv1d,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,882,function,"Functional interface for the depthwise separable 1D convolution layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A single integer specifying the spatial dimensions of the filters. strides: A single integer specifying the strides of the convolution. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. dilation_rate: A single integer, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6251,separable_conv2d,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,1005,function,"Functional interface for the depthwise separable 2D convolution layer. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. It then optionally applies an activation function to produce the final output. Arguments: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `stride` value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. dilation_rate: An integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any stride value != 1. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `num_filters_in * depth_multiplier`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. depthwise_initializer: An initializer for the depthwise convolution kernel. pointwise_initializer: An initializer for the pointwise convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6252,Conv2DTranspose,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,1130,class,"Transposed 2D convolution layer (sometimes called 2D Deconvolution). The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer." 6253,conv2d_transpose,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,1221,function,"Functional interface for transposed 2D convolution layer. The need for transposed convolutions generally arises from the desire to use a transformation going in the opposite direction of a normal convolution, i.e., from something that has the shape of the output of some convolution to something that has the shape of its input while maintaining a connectivity pattern that is compatible with said convolution. Arguments: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 2 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 2 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. activation: Activation function. Set it to `None` to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If `None`, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6254,Conv3DTranspose,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,1319,class,"Transposed 3D convolution layer (sometimes called 3D Deconvolution). Arguments: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. padding: One of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. activation: Activation function. Set it to `None` to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If `None`, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer." 6255,conv3d_transpose,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional.py,1407,function,"Functional interface for transposed 3D convolution layer. Arguments: inputs: Input tensor. filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: A tuple or list of 3 positive integers specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. strides: A tuple or list of 3 positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. padding: one of `""valid""` or `""same""` (case-insensitive). `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. activation: Activation function. Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If None, the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: A string, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6256,ConvTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py,37,class, 6257,SeparableConv1DTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py,353,class, 6258,SeparableConv2DTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py,530,class, 6259,Conv2DTransposeTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py,790,class, 6260,Conv3DTransposeTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py,987,class, 6261,Dense,tensorflow/tensorflow/python/keras/legacy_tf_layers/core.py,33,class,"Densely-connected layer class. This layer implements the operation: `outputs = activation(inputs * kernel + bias)` Where `activation` is the activation function passed as the `activation` argument (if not `None`), `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). Arguments: units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the default initializer used by `tf.compat.v1.get_variable`. bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: String, the name of the layer. Layers with the same name will share weights, but to avoid mistakes we require reuse=True in such cases. _reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Properties: units: Python integer, dimensionality of the output space. activation: Activation function (callable). use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer instance (or name) for the kernel matrix. bias_initializer: Initializer instance (or name) for the bias. kernel_regularizer: Regularizer instance for the kernel matrix (callable) bias_regularizer: Regularizer instance for the bias (callable). activity_regularizer: Regularizer instance for the output (callable) kernel_constraint: Constraint function for the kernel matrix. bias_constraint: Constraint function for the bias. kernel: Weight matrix (TensorFlow variable or tensor). bias: Bias vector, if applicable (TensorFlow variable or tensor)." 6262,dense,tensorflow/tensorflow/python/keras/legacy_tf_layers/core.py,116,function,"Functional interface for the densely-connected layer. This layer implements the operation: `outputs = activation(inputs * kernel + bias)` where `activation` is the activation function passed as the `activation` argument (if not `None`), `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only if `use_bias` is `True`). Arguments: inputs: Tensor input. units: Integer or Long, dimensionality of the output space. activation: Activation function (callable). Set it to None to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: Initializer function for the weight matrix. If `None` (default), weights are initialized using the default initializer used by `tf.compat.v1.get_variable`. bias_initializer: Initializer function for the bias. kernel_regularizer: Regularizer function for the weight matrix. bias_regularizer: Regularizer function for the bias. activity_regularizer: Regularizer function for the output. kernel_constraint: An optional projection function to be applied to the kernel after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: An optional projection function to be applied to the bias after being updated by an `Optimizer`. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). name: String, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: Output tensor the same shape as `inputs` except the last dimension is of size `units`. Raises: ValueError: if eager execution is enabled." 6263,Dropout,tensorflow/tensorflow/python/keras/legacy_tf_layers/core.py,191,class,"Applies Dropout to the input. Dropout consists in randomly setting a fraction `rate` of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by `1 / (1 - rate)`, so that their sum is unchanged at training time and inference time. Arguments: rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out 10% of input units. noise_shape: 1D tensor of type `int32` representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)`, and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=[batch_size, 1, features]`. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed`. for behavior. name: The name of the layer (string)." 6264,dropout,tensorflow/tensorflow/python/keras/legacy_tf_layers/core.py,233,function,"Applies Dropout to the input. Dropout consists in randomly setting a fraction `rate` of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by `1 / (1 - rate)`, so that their sum is unchanged at training time and inference time. Arguments: inputs: Tensor input. rate: The dropout rate, between 0 and 1. E.g. ""rate=0.1"" would drop out 10% of input units. noise_shape: 1D tensor of type `int32` representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)`, and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=[batch_size, 1, features]`. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (apply dropout) or in inference mode (return the input untouched). name: The name of the layer (string). Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6265,Flatten,tensorflow/tensorflow/python/keras/legacy_tf_layers/core.py,275,class,"Flattens an input tensor while preserving the batch axis (axis 0). Arguments: data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, ...)`. Examples: ``` x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32') y = Flatten()(x) # now `y` has shape `(None, 16)` x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32') y = Flatten()(x) # now `y` has shape `(None, None)` ```" 6266,flatten,tensorflow/tensorflow/python/keras/legacy_tf_layers/core.py,304,function,"Flattens an input tensor while preserving the batch axis (axis 0). Arguments: inputs: Tensor input. name: The name of the layer (string). data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. Returns: Reshaped tensor. Examples: ``` x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32') y = flatten(x) # now `y` has shape `(None, 16)` x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32') y = flatten(x) # now `y` has shape `(None, None)` ```" 6267,DenseTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/core_test.py,45,class, 6268,_get_variable_dict_from_varstore,tensorflow/tensorflow/python/keras/legacy_tf_layers/core_test.py,379,function, 6269,DropoutTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/core_test.py,386,class, 6270,FlattenTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/core_test.py,469,class, 6271,BatchNormalization,tensorflow/tensorflow/python/keras/legacy_tf_layers/normalization.py,31,class,"Batch Normalization layer from (Ioffe et al., 2015). Keras APIs handle BatchNormalization updates to the moving_mean and moving_variance as part of their `fit()` and `evaluate()` loops. However, if a custom training loop is used with an instance of `Model`, these updates need to be explicitly included. Here's a simple example of how it can be done: ```python # model is an instance of Model that contains BatchNormalization layer. update_ops = model.get_updates_for(None) + model.get_updates_for(features) train_op = optimizer.minimize(loss) train_op = tf.group([train_op, update_ops]) ``` Arguments: axis: An `int` or list of `int`, the axis or axes that should be normalized, typically the features axis/axes. For instance, after a `Conv2D` layer with `data_format=""channels_first""`, set `axis=1`. If a list of axes is provided, each axis in `axis` will be normalized simultaneously. Default is `-1` which uses the last axis. Note: when using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and `moving_variance` variables are the same rank as the input Tensor, with dimension size 1 in all reduced (non-axis) dimensions). momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling can be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: An optional projection function to be applied to the `beta` weight after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. gamma_constraint: An optional projection function to be applied to the `gamma` weight after being updated by an `Optimizer`. renorm: Whether to use Batch Renormalization (Ioffe, 2017). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. fused: if `None` or `True`, use a faster, fused implementation if possible. If `False`, use the system recommended implementation. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`, which means batch normalization is performed across the whole batch. When `virtual_batch_size` is not `None`, instead perform ""Ghost Batch Normalization"", which creates virtual sub-batches which are each normalized separately (with shared gamma, beta, and moving statistics). Must divide the actual batch size during execution. adjustment: A function taking the `Tensor` containing the (dynamic) shape of the input tensor and returning a pair (scale, bias) to apply to the normalized values (before gamma and beta), only during training. For example, if axis==-1, `adjustment = lambda shape: ( tf.random.uniform(shape[-1:], 0.93, 1.07), tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized value by up to 7% up or down, then shift the result by up to 0.1 (with independent scaling and bias for each feature but shared across all examples), and finally apply gamma and/or beta. If `None`, no adjustment is applied. Cannot be specified if virtual_batch_size is specified. name: A string, the name of the layer. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf)) Batch Renormalization - Towards Reducing Minibatch Dependence in Batch-Normalized Models: [Ioffe, 2017](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models) ([pdf](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models.pdf))" 6272,batch_normalization,tensorflow/tensorflow/python/keras/legacy_tf_layers/normalization.py,181,function,"Functional interface for the batch normalization layer from_config(Ioffe et al., 2015). Note: when training, the moving_mean and moving_variance need to be updated. By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they need to be executed alongside the `train_op`. Also, be sure to add any batch_normalization ops before getting the update_ops collection. Otherwise, update_ops will be empty, and training/inference will not work properly. For example: ```python x_norm = tf.compat.v1.layers.batch_normalization(x, training=training) # ... update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = optimizer.minimize(loss) train_op = tf.group([train_op, update_ops]) ``` Arguments: inputs: Tensor input. axis: An `int`, the axis that should be normalized (typically the features axis). For instance, after a `Convolution2D` layer with `data_format=""channels_first""`, set `axis=1` in `BatchNormalization`. momentum: Momentum for the moving average. epsilon: Small float added to variance to avoid dividing by zero. center: If True, add offset of `beta` to normalized tensor. If False, `beta` is ignored. scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the next layer is linear (also e.g. `nn.relu`), this can be disabled since the scaling can be done by the next layer. beta_initializer: Initializer for the beta weight. gamma_initializer: Initializer for the gamma weight. moving_mean_initializer: Initializer for the moving mean. moving_variance_initializer: Initializer for the moving variance. beta_regularizer: Optional regularizer for the beta weight. gamma_regularizer: Optional regularizer for the gamma weight. beta_constraint: An optional projection function to be applied to the `beta` weight after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. gamma_constraint: An optional projection function to be applied to the `gamma` weight after being updated by an `Optimizer`. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (normalized with statistics of the current batch) or in inference mode (normalized with moving statistics). **NOTE**: make sure to set this parameter correctly, or else your training/inference will not work properly. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). name: String, the name of the layer. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. renorm: Whether to use Batch Renormalization (Ioffe, 2017). This adds extra variables during training. The inference is the same for either value of this parameter. renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to scalar `Tensors` used to clip the renorm correction. The correction `(r, d)` is used as `corrected_value = normalized_value * r + d`, with `r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin, dmax are set to inf, 0, inf, respectively. renorm_momentum: Momentum used to update the moving means and standard deviations with renorm. Unlike `momentum`, this affects training and should be neither too small (which would add noise) nor too large (which would give stale estimates). Note that `momentum` is still applied to get the means and variances for inference. fused: if `None` or `True`, use a faster, fused implementation if possible. If `False`, use the system recommended implementation. virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`, which means batch normalization is performed across the whole batch. When `virtual_batch_size` is not `None`, instead perform ""Ghost Batch Normalization"", which creates virtual sub-batches which are each normalized separately (with shared gamma, beta, and moving statistics). Must divide the actual batch size during execution. adjustment: A function taking the `Tensor` containing the (dynamic) shape of the input tensor and returning a pair (scale, bias) to apply to the normalized values (before gamma and beta), only during training. For example, if axis==-1, `adjustment = lambda shape: ( tf.random.uniform(shape[-1:], 0.93, 1.07), tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized value by up to 7% up or down, then shift the result by up to 0.1 (with independent scaling and bias for each feature but shared across all examples), and finally apply gamma and/or beta. If `None`, no adjustment is applied. Cannot be specified if virtual_batch_size is specified. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf)) Batch Renormalization - Towards Reducing Minibatch Dependence in Batch-Normalized Models: [Ioffe, 2017](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models) ([pdf](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models.pdf))" 6273,BNTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/normalization_test.py,43,class, 6274,AveragePooling1D,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,29,class,"Average Pooling layer for 1D inputs. Arguments: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer." 6275,average_pooling1d,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,64,function,"Average Pooling layer for 1D inputs. Arguments: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled." 6276,MaxPooling1D,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,99,class,"Max Pooling layer for 1D inputs. Arguments: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer." 6277,max_pooling1d,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,134,function,"Max Pooling layer for 1D inputs. Arguments: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. Raises: ValueError: if eager execution is enabled." 6278,AveragePooling2D,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,169,class,"Average pooling layer for 2D inputs (e.g. images). Arguments: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer." 6279,average_pooling2d,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,204,function,"Average pooling layer for 2D inputs (e.g. images). Arguments: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6280,MaxPooling2D,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,242,class,"Max pooling layer for 2D inputs (e.g. images). Arguments: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer." 6281,max_pooling2d,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,277,function,"Max pooling layer for 2D inputs (e.g. images). Arguments: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6282,AveragePooling3D,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,315,class,"Average pooling layer for 3D inputs (e.g. volumes). Arguments: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer." 6283,average_pooling3d,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,352,function,"Average pooling layer for 3D inputs (e.g. volumes). Arguments: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6284,MaxPooling3D,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,392,class,"Max pooling layer for 3D inputs (e.g. volumes). Arguments: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer." 6285,max_pooling3d,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling.py,429,function,"Max pooling layer for 3D inputs (e.g. volumes). Arguments: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled." 6286,PoolingTest,tensorflow/tensorflow/python/keras/legacy_tf_layers/pooling_test.py,28,class, 6287,AutoCastVariable,tensorflow/tensorflow/python/keras/mixed_precision/experimental/autocast_variable.py,30,class,"Variable that will cast itself to a different dtype in applicable contexts. This class wraps a floating-point `tf.Variable`. It emulates the variable interface and delegates to the wrapped variable, but it additionally will cast the wrapped variable under a `Graph._enable_auto_casting_variables(dtype)` context manager. For example: >>> v = tf.Variable(1.0, dtype=tf.float32) >>> v = AutoCastVariable(v) >>> tf.identity(v).dtype tf.float32 >>> with ops.get_default_graph()._enable_auto_casting_variables(tf.float16): ... tf.identity(v).dtype tf.float16 >>> with ops.get_default_graph()._enable_auto_casting_variables(tf.float16): ... v.dtype # v.dtype also changes under the context manager tf.float16 The purpose of this class is to allow Keras layers to create variables in float32, and automatically cast them to float16 or bfloat16 when the layer is called." 6288,create_autocast_variable,tensorflow/tensorflow/python/keras/mixed_precision/experimental/autocast_variable.py,431,function,"Creates an AutoCastVariable that wraps another variable. This typically just returns `AutoCastVariable(variable)`. But, if the variable is a DistributedVariable or one of its subclasses, we instead dynamically create a class that subclasses from both AutoCastVariable and variable.__class__. This is so the returned variable will still pass `isinstance(variable, variable.__class__)`, which is required for DistributedVariables and its subclasses to work properly. Args: variable: A floating-point resource variable to wrap. Returns: An AutoCastVariable that wraps the variable." 6289,_maybe_wrap,tensorflow/tensorflow/python/keras/mixed_precision/experimental/autocast_variable.py,474,function,"Creates an AutoCastVariable that wraps another variable if applicable. This function is used to wrap the return value of AutoCastVariable.assign. Unfortunately MirroredVariable.assign will (incorrectly) return a Mirrored value instead of a MirroredVariable. So we cannot properly wrap it in an AutoCastVariable. We return the original variable in that case. Args: variable: A tf.Variable or op. wrap: A boolean to define whether to wrap the variable in an AutoCastVariable or not. Returns: An AutoCastVariable if wrap is True and variable is a resource variable." 6290,get_var,tensorflow/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py,51,function, 6291,AutoCastVariableTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py,56,class, 6292,_dedup_strings,tensorflow/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check.py,36,function,"Groups together consecutive identical strings. For example, given: ['GPU 1', 'GPU 2', 'GPU 2', 'GPU 3', 'GPU 3', 'GPU 3'] This function returns: ['GPU 1', 'GPU 2 (x2)', 'GPU 3 (x3)'] Args: device_strs: A list of strings, each representing a device. Returns: A copy of the input, but identical consecutive strings are merged into a single string." 6293,_log_device_compatibility_check,tensorflow/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check.py,61,function,"Logs a compatibility check if the devices support the policy. Currently only logs for the policy mixed_float16. Args: policy_name: The name of the dtype policy. gpu_details_list: A list of dicts, one dict per GPU. Each dict is the device details for a GPU, as returned by `tf.config.experimental.get_device_details()`." 6294,log_device_compatibility_check,tensorflow/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check.py,135,function,"Logs a compatibility check if the devices support the policy. Currently only logs for the policy mixed_float16. A log is shown only the first time this function is called. Args: policy_name: The name of the dtype policy." 6295,device_details,tensorflow/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check_test.py,29,function, 6296,DeviceCompatibilityCheckTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check_test.py,39,class, 6297,get_layer_policy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/get_layer_policy.py,29,function,"Returns the dtype policy of a layer. Args: layer: A `tf.keras.layers.Layer`. Returns: The `tf.keras.mixed_precision.experimental.Policy` of the layer." 6298,GetLayerPolicyTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/get_layer_policy_test.py,28,class, 6299,MultiplyLayerWithoutAutoCast,tensorflow/tensorflow/python/keras/mixed_precision/experimental/keras_test.py,65,class,"Same as MultiplyLayer, but does not use AutoCastVariables." 6300,MultiplyLayerWithFunction,tensorflow/tensorflow/python/keras/mixed_precision/experimental/keras_test.py,86,class,"Same as MultiplyLayer, but _multiply is decorated with a tf.function." 6301,create_mirrored_strategy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/keras_test.py,99,function,"Create a MirroredStrategy, using a GPU if it is available." 6302,create_central_storage_strategy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/keras_test.py,107,function,"Create a CentralStorageStrategy, using a GPU if it is available." 6303,KerasLayerTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/keras_test.py,124,class,Test mixed precision with Keras layers. 6304,KerasModelTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/keras_test.py,427,class,Test mixed precision with Keras models. 6305,create_mirrored_strategy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/layer_correctness_test.py,49,function, 6306,LayerCorrectnessTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/layer_correctness_test.py,55,class, 6307,serialize,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale.py,30,function, 6308,deserialize,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale.py,34,function, 6309,get,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale.py,48,function,Get a loss scale object. 6310,_get_strategy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_benchmark.py,38,function, 6311,LossScaleBenchmark,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_benchmark.py,46,class,Benchmark for loss scaling. 6312,_UnwrapPreventer,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py,39,class,"Wrapper that DistributionStrategy will not unwrap. Typically, DistributionStrategy will unwrap values when going from a cross- replica context to a replica context via `call_for_each_replica`. This class is a wrapper that DistributionStrategy will not unwrap, so it can be used to prevent it from unwrapping a value. TODO(reedwm): Find/implement a better way of preventing values from being unwrapped by DistributionStrategy" 6313,_DelegatingTrackableMixin,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py,57,class,"A mixin that delegates all Trackable methods to another trackable object. This class must be used with multiple inheritance. A class that subclasses Trackable can also subclass this class, which causes all Trackable methods to be delegated to the trackable object passed in the constructor. A subclass can use this mixin to appear as if it were the trackable passed to the constructor, from a Checkpoint's perspective. LossScaleOptimizer uses this mixin, so that the checkpoint format for a LossScaleOptimizer is identical to the checkpoint format for a normal optimizer. This allows a model to be saved with a normal Optimizer and restored with a LossScaleOptimizer, or vice versa. The only difference in checkpoint format is that the loss scale is also saved with a LossScaleOptimizer." 6314,LossScaleOptimizer,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py,176,class,"An optimizer that applies loss scaling. Loss scaling is a process that multiplies the loss by a multiplier called the loss scale, and divides each gradient by the same multiplier. The pseudocode for this process is: ``` loss = ... loss *= loss_scale grads = gradients(loss, vars) grads /= loss_scale ``` Mathematically, loss scaling has no effect, but can help avoid numerical underflow in intermediate gradients when float16 tensors are used. By multiplying the loss, each intermediate gradient will have the same multiplier applied. The loss scale can either be a fixed constant, chosen by the user, or be dynamically determined. Dynamically determining the loss scale is convenient as a loss scale does not have to be explicitly chosen. However it reduces performance. This optimizer wraps another optimizer and applies loss scaling to it via a `LossScale`. Loss scaling is applied whenever gradients are computed, either through `minimize()` or `get_gradients()`. The loss scale is updated via `LossScale.update()` whenever gradients are applied, either through `minimize()` or `apply_gradients()`. For example: >>> opt = tf.keras.optimizers.SGD(0.25) >>> opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, ... ""dynamic"") >>> var = tf.Variable(1.) >>> loss_fn = lambda: var ** 2 >>> # 'minimize' applies loss scaling to the loss and updates the loss sale. >>> opt.minimize(loss_fn, var_list=var) >>> var.numpy() 0.5 If a `tf.GradientTape` is used to compute gradients instead of `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, the loss and gradients must be scaled manually. This can be done by calling `LossScaleOptimizer.get_scaled_loss` before passing the loss to `tf.GradientTape`, and `LossScaleOptimizer.get_unscaled_gradients` after computing the gradients with `tf.GradientTape`. For example: >>> with tf.GradientTape() as tape: ... loss = loss_fn() ... scaled_loss = opt.get_scaled_loss(loss) >>> scaled_grad = tape.gradient(scaled_loss, var) >>> (grad,) = opt.get_unscaled_gradients([scaled_grad]) >>> opt.apply_gradients([(grad, var)]) # Loss scale is updated here >>> var.numpy() 0.25" 6315,FakeOptimizerForRestoration,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py,545,class,"A fake optimizer used to support restoring TensorFlow 2.2 checkpoints. The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow. In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the following in LossScaleOptimizer.__init__ ``` self._track_trackable(self._optimizer, 'base_optimizer') ``` This means a dependency from the LossScaleOptimizer to the wrapped optimizer would be stored in the checkpoint. However now, the checkpoint format with a LossScaleOptimizer is the same as the format without a LossScaleOptimizer, except the loss scale is also stored. This means there is no dependency from the LossScaleOptimizer to the wrapped optimizer. Instead, the LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's perspective, by overriding all Trackable methods and delegating them to the wrapped optimizer. To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency on this class instead of the inner optimizer. When restored, this class will instead restore the slot variables of the inner optimizer. Since this class has no variables, it does not affect the checkpoint when saved." 6316,_multiply_gradient,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py,590,function,Multiply a (possibly sparse) gradient by the given scale factor. 6317,strategy_supports_loss_scaling,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py,602,function,Returns True if the current Strategy supports loss scaling. 6318,create_mirrored_strategy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py,55,function, 6319,LossScaleOptimizerTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py,73,class, 6320,MixedPrecisionTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/mixed_precision_graph_rewrite_test.py,42,class, 6321,Policy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,41,class,"A dtype policy for a Keras layer. A dtype policy determines dtype-related aspects of a layer, such as its computation and variable dtypes. Each layer has a policy. Policies can be passed to the `dtype` argument of layer constructors, or a global policy can be set with `tf.keras.mixed_precision.experimental.set_policy`. A layer will default to the global policy if no policy is passed to it's constructor. For many models, each layer's policy will have the same compute dtype and variable dtype, which will typically be float32. In this case, we refer to the singular dtype as the layer's dtype, which can be queried by the property `tf.keras.layers.Layer.dtype`. When mixed precision training is used, most layers will instead have a float16 or bfloat16 compute dtype and a float32 variable dtype, and so the layer does not have a single dtype. When the variable dtype does not match the compute dtype, variables will be automatically casted to the compute dtype to avoid type errors. In this case, `tf.keras.layers.Layer.dtype` refers to the variable dtype, not the compute dtype. See [the mixed precision guide]( https://www.tensorflow.org/guide/keras/mixed_precision) for more information on how to use mixed precision. Certain policies also have a `tf.mixed_precision.experimental.LossScale` instance, which is used by `tf.keras.Model`s to performance loss scaling. Loss scaling is a technique used with mixed precision to avoid numerical underflow in float16 gradients. Loss scaling is only done by Models in `Model.fit`, `Model.train_on_batch`, and similar methods. Layers which are not Models ignore the loss scale. Policies are constructed by passing a string to the constructor, e.g. `tf.keras.mixed_precision.experimental.Policy('float32')`. The string determines the compute and variable dtypes. It can be one of the following: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. No loss scaling is done by default. * 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or bfloat16, while the variable dtype is float32. These policies are used for mixed precision training. With 'mixed_float16', a dynamic loss scale is used by default. 'mixed_bfloat16' does no loss scaling by default, as loss scaling is unnecessary with bfloat16. ### How to use mixed precision in a Keras model To use mixed precision in a Keras model, the `'mixed_float16'` or `'mixed_bfloat16'` policy can be used. `tf.keras.mixed_precision.experimental.set_policy` can be used to set the default policy for layers if no policy is passed to them. For example: >>> tf.keras.mixed_precision.experimental.set_policy('mixed_float16') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... # Dense layers use global policy of 'mixed_float16', which does ... # computations in float16 while keeping variables in float32. ... tf.keras.layers.Dense(10), ... tf.keras.layers.Dense(10), ... # Softmax should be done in float32 for numeric stability. We pass ... # dtype='float32' to use float32 instead of the global policy. ... tf.keras.layers.Activation('softmax', dtype='float32') ... ]) Alternatively, the policy can be passed to individual layers instead of setting the global policy with `set_policy`: >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... tf.keras.layers.Dense(10, dtype=policy), ... tf.keras.layers.Dense(10, dtype=policy), ... # Softmax should be done in float32 for numeric stability. ... tf.keras.layers.Activation('softmax', dtype='float32') ... ]) Note the `'mixed_float16'` policy will apply loss scaling by default in `Model.fit`, `Model.train_on_batch`, and other training methods. If no such method is used (e.g., a custom training loop is used) and `'mixed_float16'` is used, the loss scale must be manually applied. See `tf.keras.mixed_precision.experimental.LossScaleOptimizer` for details. For `'mixed_bfloat16'`, no loss scaling is done and loss scaling never needs to be manually applied. See [the mixed precision guide]( https://www.tensorflow.org/guide/keras/mixed_precision) for more information on using mixed precision ### How to use float64 in a Keras model Using float64 is similar to mixed precision. Either the global policy can be set to float64, or `dtype='float64'` can be passed to individual layers. For example, to set the global policy: >>> tf.keras.mixed_precision.experimental.set_policy('float64') >>> model = tf.keras.models.Sequential([ ... tf.keras.layers.Input((100,)), ... # All layers use global policy of 'float64', which does computations ... # and creates variables in float64. ... tf.keras.layers.Dense(10), ... tf.keras.layers.Dense(10), ... tf.keras.layers.Activation('softmax') ... ]) >>> # Optionaly set policy back to float32 if any other models use float32 >>> tf.keras.mixed_precision.experimental.set_policy('float32') ### How a layer uses its policy's compute dtype A layer will cast its inputs to its compute dtype in TensorFlow 2. For example: >>> x = tf.ones((4, 4, 4, 4), dtype='float64') >>> # `layer`'s policy defaults to float32. >>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) >>> # `layer` casts it's inputs to its compute dtype, which is float32, and >>> # does computations in float32. >>> y = layer(x) >>> y.dtype tf.float32 Note that the base `tf.keras.layers.Layer` class inserts the casts. If subclassing your own layer, you do not have to insert any casts. Currently, only tensors in the first argument to the layer's `call` method are casted. For example: >>> class MyLayer(tf.keras.layers.Layer): ... # Bug! `b` will not be casted. ... def call(self, a, b): ... return a + 1., b + 1. >>> a = tf.constant(1., dtype=""float32"") >>> b = tf.constant(1., dtype=""float32"") >>> layer = MyLayer(dtype=""float64"") >>> x, y = layer(a, b) >>> x.dtype tf.float64 >>> y.dtype tf.float32 If writing your own layer, it is recommended to accept tensors only in the first argument. This way, all tensors are casted to the layer's compute dtype. `MyLayer` should therefore be written as: >>> class MyLayer(tf.keras.layers.Layer): ... # Now, all tensor inputs will be casted. ... def call(self, inputs): ... a, b = inputs ... return a + 1., b + 1. >>> a = tf.constant(1., dtype=""float32"") >>> b = tf.constant(1., dtype=""float32"") >>> layer = MyLayer(dtype=""float64"") >>> x, y = layer((a, b)) >>> x.dtype tf.float64 >>> y.dtype tf.float64 Other arguments are not automatically casted for technical reasons, but this may change in a future minor release. The casting only occurs in TensorFlow 2, but can be enabled if `tf.compat.v1.disable_v2_behavior()` has been called with `tf.compat.v1.keras.layers.enable_v2_dtype_behavior()`. A layer subclass can prevent its inputs from being autocasted by passing `autocast=False` to the layer constructor. For example: >>> class NonAutoCastingLayer(tf.keras.layers.Layer): ... def __init__(self, **kwargs): ... kwargs['autocast'] = False ... super(NonAutoCastingLayer, self).__init__(**kwargs) ... def call(self, inp): ... return inp >>> x = tf.ones((4, 4, 4, 4), dtype='float32') >>> layer = NonAutoCastingLayer(dtype='float64') >>> y = layer(x) # Will not cast inputs to it's compute dtype of float64 >>> y.dtype tf.float32 ### How a layer uses its policy's variable dtype The default dtype of variables created by `tf.keras.layers.Layer.add_weight` is the layer's policy's variable dtype. If a layer's compute and variable dtypes differ, `add_weight` will wrap floating-point variables with a special wrapper called an `AutoCastVariable`. This wrapper is identical to the original variable except it casts itself to the layer's compute dtype when used within `Layer.call`. Outside `Layer.call`, the variable is not casted. A layer author can prevent a variable from being wrapped with an `AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`: >>> class MyLayer(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.x = self.add_weight('x') ... self.y = self.add_weight('y', experimental_autocast=False) >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> layer = MyLayer(dtype=policy) >>> layer.build((2, 2)) >>> layer.x >>> layer.y Passing `experimental_autocast=False` is useful for layers which may internally do some math in the variable dtype instead of the compute dtype. For example, you may wish to compute variable statistics, such as mean and variance, in the variable dtype. ### How to write a layer that supports mixed precision and float64. For the most part, layers will automatically support mixed precision and float64 without any additional work, due to the fact the base layer automatically casts inputs, creates variables of the correct type, and in the case of mixed precision, wraps variables with `AutoCastVariables`. For example, this simple dense layer does not require any additional work to support mixed precision or float64. Keras automatically casts the inputs and variable to the appropriate dtype. >>> class MyDense(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.kernel = self.add_weight('kernel', (input_shape[-1], 10)) ... def call(self, inputs): ... return tf.matmul(inputs, self.kernel) >>> policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16') >>> layer = MyDense(dtype=policy) >>> x = np.random.rand(10, 10) >>> y = layer(x) >>> y.dtype tf.float16 The primary case where you need extra work to support mixed precision or float64 is when you create a new tensor, such as with `tf.ones` or `tf.constant`. In such cases, you must create the tensor of the correct dtype. For example, suppose you modify the `MyDense` layer to add a random number to the output using `tf.random.normal`. You must pass the input dtype to `tf.random.normal` to ensure the dtypes match. >>> class MyDense(tf.keras.layers.Layer): ... def build(self, input_shape): ... self.kernel = self.add_weight('kernel', (input_shape[-1], 10)) ... def call(self, inputs): ... rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype) ... return tf.matmul(inputs, self.kernel) + rand >>> >>> layer = MyDense(dtype=policy) >>> y = layer(x) >>> y.dtype tf.float16 If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a `TypeError` would have occurred. This is because the dtype defaults to `""float32""`, so the layer would only work if the inputs were float32." 6322,global_policy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,489,function,"Returns the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no policy has been set with `keras.mixed_precision.experimental.set_policy`, this will return a policy constructed from `tf.keras.backend.floatx()` (floatx defaults to float32). If TensorFlow 2 behavior has been disabled with `tf.compat.v1.disable_v2_behavior()`, this will instead return a special ""_infer"" policy which infers the dtype from the dtype of the first input the first time the layer is called. This behavior matches the behavior that existed in TensorFlow 1. See `tf.keras.mixed_precision.experimental.Policy` for more information on policies. Returns: The global Policy." 6323,policy_defaults_to_floatx,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,517,function,Returns True if `global_policy()` will use the current value of floatx. 6324,_check_if_mixed_precision_graph_rewrite_is_enabled,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,522,function, 6325,set_policy,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,539,function,"Sets the global Policy. The global policy is the default policy used for layers, if no policy is passed to the layer constructor. If no global policy is set, layers will instead default to a Policy constructed from `tf.keras.backend.floatx()`. See `keras.mixed_precision.experimental.Policy` for more information. Args: policy: A Policy, or a string that will be converted to a Policy.." 6326,policy_scope,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,568,function,"A context manager that sets the global Policy under it. Args: policy: A Policy, or a string that will be converted to a Policy.. Yields: Nothing." 6327,_is_convertible_to_dtype,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,585,function, 6328,_policy_equivalent_to_dtype,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,593,function,"Returns True if the Policy is equivalent to a single dtype. A policy is equivalent to a single dtype if the policy's compute and variable dtypes are the same and the policy does not cause the layer/model to have additional behavior, such as loss scaling. The ""_infer"" policy is considered equivalent to a single dtype. Args: policy: A Policy. Returns: True, if the policy is equivalent to a single dtype." 6329,serialize,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,615,function, 6330,deserialize,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy.py,624,function, 6331,PolicyTest,tensorflow/tensorflow/python/keras/mixed_precision/experimental/policy_test.py,40,class,Tests Policies. 6332,create_identity_with_grad_check_fn,tensorflow/tensorflow/python/keras/mixed_precision/experimental/test_util.py,32,function,"Returns a function that asserts it's gradient has a certain value. This serves as a hook to assert intermediate gradients have a certain value. This returns an identity function. The identity's gradient function is also the identity function, except it asserts that the gradient equals `expected_gradient` and has dtype `expected_dtype`. Args: expected_gradient: The gradient function asserts that the gradient is this value. expected_dtype: The gradient function asserts the gradient has this dtype. Returns: An identity function whose gradient function asserts the gradient has a certain value." 6333,create_identity_with_nan_gradients_fn,tensorflow/tensorflow/python/keras/mixed_precision/experimental/test_util.py,76,function,"Returns a function that optionally has NaN gradients. This serves as a hook to introduce NaN gradients to a model. This returns an identity function. The identity's gradient function will check if the boolean tensor `have_nan_gradients` is True. If so, the gradient will be NaN. Otherwise, the gradient will also be the identity. Args: have_nan_gradients: A scalar boolean tensor. If True, gradients will be NaN. Otherwise, the gradient function is the identity function. Returns: An identity function whose gradient function will return NaNs, if `have_nan_gradients` is True." 6334,AssertTypeLayer,tensorflow/tensorflow/python/keras/mixed_precision/experimental/test_util.py,110,class,A layer which asserts it's inputs are a certain type. 6335,MultiplyLayer,tensorflow/tensorflow/python/keras/mixed_precision/experimental/test_util.py,128,class,A layer which multiplies its input by a scalar variable. 6336,IdentityRegularizer,tensorflow/tensorflow/python/keras/mixed_precision/experimental/test_util.py,180,class, 6337,Adadelta,tensorflow/tensorflow/python/keras/optimizer_v2/adadelta.py,32,class,"Optimizer that implements the Adadelta algorithm. Adadelta optimization is a stochastic gradient descent method that is based on adaptive learning rate per dimension to address two drawbacks: - The continual decay of learning rates throughout training - The need for a manually selected global learning rate Adadelta is a more robust extension of Adagrad that adapts learning rates based on a moving window of gradient updates, instead of accumulating all past gradients. This way, Adadelta continues learning even when many updates have been done. Compared to Adagrad, in the original version of Adadelta you don't have to set an initial learning rate. In this version, initial learning rate can be set, as in most other Keras optimizers. According to section 4.3 (""Effective Learning rates""), near the end of training step sizes converge to 1 which is effectively a high learning rate which would cause divergence. This occurs only near the end of the training as gradients and step sizes are small, and the epsilon constant in the numerator and denominator dominate past gradients and parameter updates which converge the learning rate to 1. According to section 4.4(""Speech Data""),where a large neural network with 4 hidden layers was trained on a corpus of US English data, ADADELTA was used with 100 network replicas.The epsilon used is 1e-6 with rho=0.95 which converged faster than ADAGRAD, by the following construction: def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, decay=0., **kwargs): Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate. To match the exact form in the original paper use 1.0. rho: A `Tensor` or a floating point value. The decay rate. epsilon: A `Tensor` or a floating point value. A constant epsilon used to better conditioning the grad update. name: Optional name prefix for the operations created when applying gradients. Defaults to `""Adadelta""`. **kwargs: Keyword arguments. Allowed to be one of `""clipnorm""` or `""clipvalue""`. `""clipnorm""` (float) clips gradients by norm; `""clipvalue""` (float) clips gradients by value. Reference: - [Zeiler, 2012](http://arxiv.org/abs/1212.5701)" 6338,AdadeltaOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/adadelta_test.py,42,class, 6339,Adagrad,tensorflow/tensorflow/python/keras/optimizer_v2/adagrad.py,34,class,"Optimizer that implements the Adagrad algorithm. Adagrad is an optimizer with parameter-specific learning rates, which are adapted relative to how frequently a parameter gets updated during training. The more updates a parameter receives, the smaller the updates. Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate. initial_accumulator_value: A floating point value. Starting value for the accumulators, must be non-negative. epsilon: A small floating point value to avoid zero denominator. name: Optional name prefix for the operations created when applying gradients. Defaults to `""Adagrad""`. **kwargs: Keyword arguments. Allowed to be one of `""clipnorm""` or `""clipvalue""`. `""clipnorm""` (float) clips gradients by norm; `""clipvalue""` (float) clips gradients by value. Reference: - [Duchi et al., 2011]( http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)." 6340,adagrad_update_numpy,tensorflow/tensorflow/python/keras/optimizer_v2/adagrad_test.py,45,function, 6341,sparse_adagrad_update_numpy,tensorflow/tensorflow/python/keras/optimizer_v2/adagrad_test.py,51,function, 6342,AdagradOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/adagrad_test.py,72,class, 6343,Adam,tensorflow/tensorflow/python/keras/optimizer_v2/adam.py,34,class,"Optimizer that implements the Adam algorithm. Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order moments. According to [Kingma et al., 2014](http://arxiv.org/abs/1412.6980), the method is ""*computationally efficient, has little memory requirement, invariant to diagonal rescaling of gradients, and is well suited for problems that are large in terms of data/parameters*"". Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use, The learning rate. Defaults to 0.001. beta_1: A float value or a constant float tensor, or a callable that takes no arguments and returns the actual value to use. The exponential decay rate for the 1st moment estimates. Defaults to 0.9. beta_2: A float value or a constant float tensor, or a callable that takes no arguments and returns the actual value to use, The exponential decay rate for the 2nd moment estimates. Defaults to 0.999. epsilon: A small constant for numerical stability. This epsilon is ""epsilon hat"" in the Kingma and Ba paper (in the formula just before Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to 1e-7. amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from the paper ""On the Convergence of Adam and beyond"". Defaults to `False`. name: Optional name for the operations created when applying gradients. Defaults to `""Adam""`. **kwargs: Keyword arguments. Allowed to be one of `""clipnorm""` or `""clipvalue""`. `""clipnorm""` (float) clips gradients by norm; `""clipvalue""` (float) clips gradients by value. Usage: >>> opt = tf.keras.optimizers.Adam(learning_rate=0.1) >>> var1 = tf.Variable(10.0) >>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1 >>> step_count = opt.minimize(loss, [var1]).numpy() >>> # The first step is `-learning_rate*sign(grad)` >>> var1.numpy() 9.9 Reference: - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) - [Reddi et al., 2018]( https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`. Notes: The default value of 1e-7 for epsilon might not be a good default in general. For example, when training an Inception network on ImageNet a current good choice is 1.0 or 0.1. Note that since Adam uses the formulation just before Section 2.1 of the Kingma and Ba paper rather than the formulation in Algorithm 1, the ""epsilon"" referred to here is ""epsilon hat"" in the paper. The sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) does apply momentum to variable slices even if they were not used in the forward pass (meaning they have a gradient equal to zero). Momentum decay (beta1) is also applied to the entire momentum accumulator. This means that the sparse behavior is equivalent to the dense behavior (in contrast to some momentum implementations which ignore momentum unless a variable slice was actually used)." 6344,NonFusedAdam,tensorflow/tensorflow/python/keras/optimizer_v2/adam.py,255,class,"Optimizer that implements the Adam algorithm without fused kernels. Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order moments. According to the paper [Adam: A Method for Stochastic Optimization. Kingma et al., 2014](http://arxiv.org/abs/1412.6980), the method is ""*computationally efficient, has little memory requirement, invariant to diagonal rescaling of gradients, and is well suited for problems that are large in terms of data/parameters*"". For AMSGrad see [On The Convergence Of Adam And Beyond. Reddi et al., 5-8](https://openreview.net/pdf?id=ryQu7f-RZ). **If amsgrad = False**: initialize $m_0$ as 1st moment vector initialize $v_0$ as 2nd moment vector The update rule for $\theta$ with gradient $g$ uses an optimization described at the end of section 2 of the paper: $$lr_t = \mathrm{learning\_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ $$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ $$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$ $$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ **If amsgrad = True**: initialize $m_0$ as 1st moment vector initialize $v_0$ as 2nd moment vector initialize $\hat{v}_0$ as 2nd moment vector The update rule for $\theta$ with gradient $g$ uses an optimization described at the end of section 2 of the paper: $$lr_t = \mathrm{learning\_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ $$m_t = \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ $$v_t = \beta_2 * v_{t-1} + (1 - \beta_2) * g^2$$ $$\hat{v}_t = \max(\hat{v}_{t-1}, v_t)$$ $$\theta_t = \theta_{t-1} - lr_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ The default value of 1e-7 for epsilon might not be a good default in general. For example, when training an Inception network on ImageNet a current good choice is 1.0 or 0.1. Note that since Adam uses the formulation just before Section 2.1 of the Kingma and Ba paper rather than the formulation in Algorithm 1, the ""epsilon"" referred to here is ""epsilon hat"" in the paper. The sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) does apply momentum to variable slices even if they were not used in the forward pass (meaning they have a gradient equal to zero). Momentum decay (beta1) is also applied to the entire momentum accumulator. This means that the sparse behavior is equivalent to the dense behavior (in contrast to some momentum implementations which ignore momentum unless a variable slice was actually used). Usage: >>> opt = tf.keras.optimizers.Adam(learning_rate=0.1) >>> var1 = tf.Variable(10.0) >>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1 >>> step_count = opt.minimize(loss, [var1]).numpy() >>> # The first step is `-learning_rate*sign(grad)` >>> var1.numpy() 9.9" 6345,adam_update_numpy,tensorflow/tensorflow/python/keras/optimizer_v2/adam_test.py,38,function, 6346,adam_update_numpy_amsgrad,tensorflow/tensorflow/python/keras/optimizer_v2/adam_test.py,56,function, 6347,adam_sparse_update_numpy_amsgrad,tensorflow/tensorflow/python/keras/optimizer_v2/adam_test.py,76,function, 6348,get_beta_accumulators,tensorflow/tensorflow/python/keras/optimizer_v2/adam_test.py,102,function, 6349,AdamOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/adam_test.py,111,class, 6350,NonFusedAdamOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/adam_test.py,563,class, 6351,Adamax,tensorflow/tensorflow/python/keras/optimizer_v2/adamax.py,33,class,"Optimizer that implements the Adamax algorithm. It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. Adamax is sometimes superior to adam, specially in models with embeddings. Initialization: ```python m = 0 # Initialize initial 1st moment vector v = 0 # Initialize the exponentially weighted infinity norm t = 0 # Initialize timestep ``` The update rule for parameter `w` with gradient `g` is described at the end of section 7.1 of the paper: ```python t += 1 m = beta1 * m + (1 - beta) * g v = max(beta2 * v, abs(g)) current_lr = learning_rate / (1 - beta1 ** t) w = w - current_lr * m / (v + epsilon) ``` Similarly to `Adam`, the epsilon is added for numerical stability (especially to get rid of division by zero when `v_t == 0`). In contrast to `Adam`, the sparse implementation of this algorithm (used when the gradient is an IndexedSlices object, typically because of `tf.gather` or an embedding lookup in the forward pass) only updates variable slices and corresponding `m_t`, `v_t` terms when that part of the variable was used in the forward pass. This means that the sparse behavior is contrast to the dense behavior (similar to some momentum implementations which ignore momentum unless a variable slice was actually used). Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the exponentially weighted infinity norm. epsilon: A small constant for numerical stability. name: Optional name for the operations created when applying gradients. Defaults to `""Adamax""`. **kwargs: Keyword arguments. Allowed to be one of `""clipnorm""` or `""clipvalue""`. `""clipnorm""` (float) clips gradients by norm; `""clipvalue""` (float) clips gradients by value. Reference: - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)" 6352,adamax_update_numpy,tensorflow/tensorflow/python/keras/optimizer_v2/adamax_test.py,36,function, 6353,adamax_sparse_update_numpy,tensorflow/tensorflow/python/keras/optimizer_v2/adamax_test.py,51,function, 6354,get_beta_accumulators,tensorflow/tensorflow/python/keras/optimizer_v2/adamax_test.py,72,function, 6355,AdamaxOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/adamax_test.py,79,class, 6356,Ftrl,tensorflow/tensorflow/python/keras/optimizer_v2/ftrl.py,30,class,"Optimizer that implements the FTRL algorithm. See Algorithm 1 of this [paper]( https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf). This version has support for both online L2 (the L2 penalty given in the paper above) and shrinkage-type L2 (which is the addition of an L2 penalty to the loss function). Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`. The learning rate. learning_rate_power: A float value, must be less or equal to zero. Controls how the learning rate decreases during training. Use zero for a fixed learning rate. initial_accumulator_value: The starting value for accumulators. Only zero or positive values are allowed. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. name: Optional name prefix for the operations created when applying gradients. Defaults to `""Ftrl""`. l2_shrinkage_regularization_strength: A float value, must be greater than or equal to zero. This differs from L2 above in that the L2 above is a stabilization penalty, whereas this L2 shrinkage is a magnitude penalty. When input is sparse shrinkage will only happen on the active weights. **kwargs: Keyword arguments. Allowed to be one of `""clipnorm""` or `""clipvalue""`. `""clipnorm""` (float) clips gradients by norm; `""clipvalue""` (float) clips gradients by value. Reference: - [paper]( https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)" 6357,FtrlOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/ftrl_test.py,35,class, 6358,SGD,tensorflow/tensorflow/python/keras/optimizer_v2/gradient_descent.py,30,class,"Gradient descent (with momentum) optimizer. Update rule for parameter `w` with gradient `g` when `momentum` is 0: ```python w = w - learning_rate * g ``` Update rule when `momentum` is larger than 0: ```python velocity = momentum * velocity - learning_rate * g w = w * velocity ``` When `nesterov=False`, this rule becomes: ```python velocity = momentum * velocity - learning_rate * g w = w + momentum * velocity - learning_rate * g ``` Args: learning_rate: A `Tensor`, floating point value, or a schedule that is a `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that takes no arguments and returns the actual value to use. The learning rate. Defaults to 0.01. momentum: float hyperparameter >= 0 that accelerates gradient descent in the relevant direction and dampens oscillations. Defaults to 0, i.e., vanilla gradient descent. nesterov: boolean. Whether to apply Nesterov momentum. Defaults to `False`. name: Optional name prefix for the operations created when applying gradients. Defaults to `""SGD""`. **kwargs: Keyword arguments. Allowed to be one of `""clipnorm""` or `""clipvalue""`. `""clipnorm""` (float) clips gradients by norm; `""clipvalue""` (float) clips gradients by value. Usage: >>> opt = tf.keras.optimizers.SGD(learning_rate=0.1) >>> var = tf.Variable(1.0) >>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1 >>> step_count = opt.minimize(loss, [var]).numpy() >>> # Step is `- learning_rate * grad` >>> var.numpy() 0.9 >>> opt = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9) >>> var = tf.Variable(1.0) >>> val0 = var.value() >>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1 >>> # First step is `- learning_rate * grad` >>> step_count = opt.minimize(loss, [var]).numpy() >>> val1 = var.value() >>> (val0 - val1).numpy() 0.1 >>> # On later steps, step-size increases because of momentum >>> step_count = opt.minimize(loss, [var]).numpy() >>> val2 = var.value() >>> (val1 - val2).numpy() 0.18 Reference: - For `nesterov=True`, See [Sutskever et al., 2013]( http://jmlr.org/proceedings/papers/v28/sutskever13.pdf)." 6359,GradientDescentOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/gradient_descent_test.py,40,class, 6360,MomentumOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/gradient_descent_test.py,295,class, 6361,LearningRateSchedule,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,34,class,"A serializable learning rate decay schedule. `LearningRateSchedule`s can be passed in as the learning rate of optimizers in `tf.keras.optimizers`. They can be serialized and deserialized using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`." 6362,ExponentialDecay,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,65,class,"A LearningRateSchedule that uses an exponential decay schedule. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies an exponential decay function to an optimizer step, given a provided initial learning rate. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): return initial_learning_rate * decay_rate ^ (step / decay_steps) ``` If the argument `staircase` is `True`, then `step / decay_steps` is an integer division and the decayed learning rate follows a staircase function. You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: When fitting a Keras model, decay every 100000 steps with a base of 0.96: ```python initial_learning_rate = 0.1 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True) model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`." 6363,PiecewiseConstantDecay,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,170,class,"A LearningRateSchedule that uses a piecewise constant decay schedule. The function returns a 1-arg callable to compute the piecewise constant when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps. ```python step = tf.Variable(0, trainable=False) boundaries = [100000, 110000] values = [1.0, 0.5, 0.1] learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay( boundaries, values) # Later, whenever we perform an optimization step, we pass in the step. learning_rate = learning_rate_fn(step) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as the boundary tensors. The output of the 1-arg function that takes the `step` is `values[0]` when `step <= boundaries[0]`, `values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ..., and values[-1] when `step > boundaries[-1]`." 6364,PolynomialDecay,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,272,class,"A LearningRateSchedule that uses a polynomial decay schedule. It is commonly observed that a monotonically decreasing learning rate, whose degree of change is carefully chosen, results in a better performing model. This schedule applies a polynomial decay function to an optimizer step, given a provided `initial_learning_rate`, to reach an `end_learning_rate` in the given `decay_steps`. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule is a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) return ((initial_learning_rate - end_learning_rate) * (1 - step / decay_steps) ^ (power) ) + end_learning_rate ``` If `cycle` is True then a multiple of `decay_steps` is used, the first one that is bigger than `step`. ```python def decayed_learning_rate(step): decay_steps = decay_steps * ceil(step / decay_steps) return ((initial_learning_rate - end_learning_rate) * (1 - step / decay_steps) ^ (power) ) + end_learning_rate ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5): ```python ... starter_learning_rate = 0.1 end_learning_rate = 0.01 decay_steps = 10000 learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay( starter_learning_rate, decay_steps, end_learning_rate, power=0.5) model.compile(optimizer=tf.keras.optimizers.SGD( learning_rate=learning_rate_fn), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`." 6365,InverseTimeDecay,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,416,class,"A LearningRateSchedule that uses an inverse time decay schedule. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies the inverse decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): return initial_learning_rate / (1 + decay_rate * step / decay_step) ``` or, if `staircase` is `True`, as: ```python def decayed_learning_rate(step): return initial_learning_rate / (1 + decay_rate * floor(step / decay_step)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. Example: Fit a Keras model when decaying 1/t with a rate of 0.5: ```python ... initial_learning_rate = 0.1 decay_steps = 1.0 decay_rate = 0.5 learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay( initial_learning_rate, decay_steps, decay_rate) model.compile(optimizer=tf.keras.optimizers.SGD( learning_rate=learning_rate_fn), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(data, labels, epochs=5) ``` Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`." 6366,CosineDecay,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,522,class,"A LearningRateSchedule that uses a cosine decay schedule. See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps)) decayed = (1 - alpha) * cosine_decay + alpha return initial_learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed_fn = tf.keras.experimental.CosineDecay( initial_learning_rate, decay_steps) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`." 6367,CosineDecayRestarts,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,615,class,"A LearningRateSchedule that uses a cosine decay schedule with restarts. See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a cosine decay function with restarts to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. The learning rate multiplier first decays from 1 to `alpha` for `first_decay_steps` steps. Then, a warm restart is performed. Each new warm restart runs for `t_mul` times more steps and with `m_mul` times smaller initial learning rate. Example usage: ```python first_decay_steps = 1000 lr_decayed_fn = ( tf.keras.experimental.CosineDecayRestarts( initial_learning_rate, first_decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`." 6368,LinearCosineDecay,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,741,class,"A LearningRateSchedule that uses a linear cosine decay schedule. See [Bello et al., ICML2017] Neural Optimizer Search with RL. https://arxiv.org/abs/1709.07417 For the idea of warm starts here controlled by `num_periods`, see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a linear cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) linear_decay = (decay_steps - step) / decay_steps cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * step / decay_steps)) decayed = (alpha + linear_decay) * cosine_decay + beta return initial_learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed_fn = ( tf.keras.experimental.LinearCosineDecay( initial_learning_rate, decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`." 6369,NoisyLinearCosineDecay,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,859,class,"A LearningRateSchedule that uses a noisy linear cosine decay schedule. See [Bello et al., ICML2017] Neural Optimizer Search with RL. https://arxiv.org/abs/1709.07417 For the idea of warm starts here controlled by `num_periods`, see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent with Warm Restarts. https://arxiv.org/abs/1608.03983 Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This schedule applies a noisy linear cosine decay function to an optimizer step, given a provided initial learning rate. It requires a `step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The schedule a 1-arg callable that produces a decayed learning rate when passed the current optimizer step. This can be useful for changing the learning rate value across different invocations of optimizer functions. It is computed as: ```python def decayed_learning_rate(step): step = min(step, decay_steps) linear_decay = (decay_steps - step) / decay_steps) cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * step / decay_steps)) decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta return initial_learning_rate * decayed ``` where eps_t is 0-centered gaussian noise with variance initial_variance / (1 + global_step) ** variance_decay Example usage: ```python decay_steps = 1000 lr_decayed_fn = ( tf.keras.experimental.NoisyLinearCosineDecay( initial_learning_rate, decay_steps)) ``` You can pass this schedule directly into a `tf.keras.optimizers.Optimizer` as the learning rate. The learning rate schedule is also serializable and deserializable using `tf.keras.optimizers.schedules.serialize` and `tf.keras.optimizers.schedules.deserialize`. Returns: A 1-arg callable learning rate schedule that takes the current optimizer step and outputs the decayed learning rate, a scalar `Tensor` of the same type as `initial_learning_rate`." 6370,serialize,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,997,function, 6371,deserialize,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py,1002,function, 6372,_maybe_serialized,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,38,function, 6373,LRDecayTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,51,class, 6374,LinearDecayTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,170,class, 6375,SqrtDecayTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,224,class, 6376,PolynomialDecayTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,288,class, 6377,InverseDecayTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,307,class, 6378,CosineDecayTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,342,class, 6379,CosineDecayRestartsTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,375,class, 6380,LinearCosineDecayTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,439,class, 6381,NoisyLinearCosineDecayTestV2,tensorflow/tensorflow/python/keras/optimizer_v2/learning_rate_schedule_test.py,482,class, 6382,exponential_decay,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,32,function,"Applies exponential decay to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies an exponential decay function to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) ``` If the argument `staircase` is `True`, then `global_step / decay_steps` is an integer division and the decayed learning rate follows a staircase function. Example: decay every 100000 steps with a base of 0.96: ```python ... global_step = tf.Variable(0, trainable=False) starter_learning_rate = 0.1 learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.96, staircase=True) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.compat.v1.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. Must not be negative. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The decay rate. staircase: Boolean. If `True` decay the learning rate at discrete intervals name: String. Optional name of the operation. Defaults to 'ExponentialDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6383,piecewise_constant,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,108,function,"Piecewise constant from boundaries and interval values. Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps. ```python global_step = tf.Variable(0, trainable=False) boundaries = [100000, 110000] values = [1.0, 0.5, 0.1] learning_rate = tf.compat.v1.train.piecewise_constant(global_step, boundaries, values) # Later, whenever we perform an optimization step, we increment global_step. ``` Args: x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`. boundaries: A list of `Tensor`s or `int`s or `float`s with strictly increasing entries, and with all elements having the same type as `x`. values: A list of `Tensor`s or `float`s or `int`s that specifies the values for the intervals defined by `boundaries`. It should have one more element than `boundaries`, and all elements should have the same type. name: A string. Optional name of the operation. Defaults to 'PiecewiseConstant'. Returns: A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`, `values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ..., and values[-1] when `x > boundaries[-1]`. Raises: ValueError: if types of `x` and `boundaries` do not match, or types of all `values` do not match or the number of elements in the lists does not match. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6384,polynomial_decay,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,185,function,"Applies a polynomial decay to the learning rate. It is commonly observed that a monotonically decreasing learning rate, whose degree of change is carefully chosen, results in a better performing model. This function applies a polynomial decay function to a provided initial `learning_rate` to reach an `end_learning_rate` in the given `decay_steps`. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python global_step = min(global_step, decay_steps) decayed_learning_rate = (learning_rate - end_learning_rate) * (1 - global_step / decay_steps) ^ (power) + end_learning_rate ``` If `cycle` is True then a multiple of `decay_steps` is used, the first one that is bigger than `global_steps`. ```python decay_steps = decay_steps * ceil(global_step / decay_steps) decayed_learning_rate = (learning_rate - end_learning_rate) * (1 - global_step / decay_steps) ^ (power) + end_learning_rate ``` Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5): ```python ... global_step = tf.Variable(0, trainable=False) starter_learning_rate = 0.1 end_learning_rate = 0.01 decay_steps = 10000 learning_rate = tf.compat.v1.train.polynomial_decay(starter_learning_rate, global_step, decay_steps, end_learning_rate, power=0.5) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.compat.v1.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. Must not be negative. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. end_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The minimal end learning rate. power: A scalar `float32` or `float64` `Tensor` or a Python number. The power of the polynomial. Defaults to linear, 1.0. cycle: A boolean, whether or not it should cycle beyond decay_steps. name: String. Optional name of the operation. Defaults to 'PolynomialDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6385,natural_exp_decay,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,286,function,"Applies natural exponential decay to the initial learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies an exponential decay function to a provided initial learning rate. It requires an `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python decayed_learning_rate = learning_rate * exp(-decay_rate * global_step / decay_step) ``` or, if `staircase` is `True`, as: ```python decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step / decay_step)) ``` Example: decay exponentially with a base of 0.96: ```python ... global_step = tf.Variable(0, trainable=False) learning_rate = 0.1 decay_steps = 5 k = 0.5 learning_rate = tf.compat.v1.train.natural_exp_decay(learning_rate, global_step, decay_steps, k) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.compat.v1.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A Python number. Global step to use for the decay computation. Must not be negative. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'ExponentialTimeDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6386,inverse_time_decay,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,374,function,"Applies inverse time decay to the initial learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies an inverse decay function to a provided initial learning rate. It requires an `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step) ``` or, if `staircase` is `True`, as: ```python decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step)) ``` Example: decay 1/t with a rate of 0.5: ```python ... global_step = tf.Variable(0, trainable=False) learning_rate = 0.1 decay_steps = 1.0 decay_rate = 0.5 learning_rate = tf.compat.v1.train.inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate) # Passing global_step to minimize() will increment it at each step. learning_step = ( tf.compat.v1.train.GradientDescentOptimizer(learning_rate) .minimize(...my loss..., global_step=global_step) ) ``` Args: learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. global_step: A Python number. Global step to use for the decay computation. Must not be negative. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'InverseTimeDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6387,cosine_decay,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,457,function,"Applies cosine decay to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a cosine decay function to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python global_step = min(global_step, decay_steps) cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps)) decayed = (1 - alpha) * cosine_decay + alpha decayed_learning_rate = learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed = cosine_decay(learning_rate, global_step, decay_steps) ``` Args: learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of learning_rate. name: String. Optional name of the operation. Defaults to 'CosineDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. References: Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] (https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx) ([pdf](https://openreview.net/pdf?id=Skq89Scxx)) @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6388,cosine_decay_restarts,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,520,function,"Applies cosine decay with restarts to the learning rate. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a cosine decay function with restarts to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate while taking into account possible warm restarts. The learning rate multiplier first decays from 1 to `alpha` for `first_decay_steps` steps. Then, a warm restart is performed. Each new warm restart runs for `t_mul` times more steps and with `m_mul` times smaller initial learning rate. Example usage: ```python first_decay_steps = 1000 lr_decayed = cosine_decay_restarts(learning_rate, global_step, first_decay_steps) ``` Args: learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the number of iterations in the i-th period m_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the initial learning rate of the i-th period: alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of the learning_rate. name: String. Optional name of the operation. Defaults to 'SGDRDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. References: Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] (https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx) ([pdf](https://openreview.net/pdf?id=Skq89Scxx)) @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6389,linear_cosine_decay,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,597,function,"Applies linear cosine decay to the learning rate. Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a linear cosine decay function to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python global_step = min(global_step, decay_steps) linear_decay = (decay_steps - global_step) / decay_steps) cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * global_step / decay_steps)) decayed = (alpha + linear_decay) * cosine_decay + beta decayed_learning_rate = learning_rate * decayed ``` Example usage: ```python decay_steps = 1000 lr_decayed = linear_cosine_decay(learning_rate, global_step, decay_steps) ``` Args: learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'LinearCosineDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. References: Neural Optimizer Search with Reinforcement Learning: [Bello et al., 2017](http://proceedings.mlr.press/v70/bello17a.html) ([pdf](http://proceedings.mlr.press/v70/bello17a/bello17a.pdf)) Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] (https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx) ([pdf](https://openreview.net/pdf?id=Skq89Scxx)) @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6390,noisy_linear_cosine_decay,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay.py,682,function,"Applies noisy linear cosine decay to the learning rate. Note that linear cosine decay is more aggressive than cosine decay and larger initial learning rates can typically be used. When training a model, it is often recommended to lower the learning rate as the training progresses. This function applies a noisy linear cosine decay function to a provided initial learning rate. It requires a `global_step` value to compute the decayed learning rate. You can just pass a TensorFlow variable that you increment at each training step. The function returns the decayed learning rate. It is computed as: ```python global_step = min(global_step, decay_steps) linear_decay = (decay_steps - global_step) / decay_steps) cosine_decay = 0.5 * ( 1 + cos(pi * 2 * num_periods * global_step / decay_steps)) decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta decayed_learning_rate = learning_rate * decayed ``` where eps_t is 0-centered gaussian noise with variance initial_variance / (1 + global_step) ** variance_decay Example usage: ```python decay_steps = 1000 lr_decayed = noisy_linear_cosine_decay( learning_rate, global_step, decay_steps) ``` Args: learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global step to use for the decay computation. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. initial_variance: initial variance for the noise. See computation above. variance_decay: decay for the noise's variance. See computation above. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'NoisyLinearCosineDecay'. Returns: A scalar `Tensor` of the same type as `learning_rate`. The decayed learning rate. Raises: ValueError: if `global_step` is not supplied. References: Neural Optimizer Search with Reinforcement Learning: [Bello et al., 2017](http://proceedings.mlr.press/v70/bello17a.html) ([pdf](http://proceedings.mlr.press/v70/bello17a/bello17a.pdf)) Stochastic Gradient Descent with Warm Restarts: [Loshchilov et al., 2017] (https://openreview.net/forum?id=Skq89Scxx¬eId=Skq89Scxx) ([pdf](https://openreview.net/pdf?id=Skq89Scxx)) @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility" 6391,LRDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,33,class, 6392,LinearDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,147,class, 6393,SqrtDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,192,class, 6394,PolynomialDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,246,class, 6395,ExponentialDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,259,class, 6396,InverseDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,291,class, 6397,CosineDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,323,class, 6398,CosineDecayRestartsTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,352,class, 6399,LinearCosineDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,410,class, 6400,NoisyLinearCosineDecayTest,tensorflow/tensorflow/python/keras/optimizer_v2/legacy_learning_rate_decay_test.py,450,class, 6401,Nadam,tensorflow/tensorflow/python/keras/optimizer_v2/nadam.py,34,class,"Optimizer that implements the NAdam algorithm. Much like Adam is essentially RMSprop with momentum, Nadam is Adam with Nesterov momentum. Args: learning_rate: A Tensor or a floating point value. The learning rate. beta_1: A float value or a constant float tensor. The exponential decay rate for the 1st moment estimates. beta_2: A float value or a constant float tensor. The exponential decay rate for the exponentially weighted infinity norm. epsilon: A small constant for numerical stability. name: Optional name for the operations created when applying gradients. Defaults to `""Nadam""`. **kwargs: Keyword arguments. Allowed to be one of `""clipnorm""` or `""clipvalue""`. `""clipnorm""` (float) clips gradients by norm; `""clipvalue""` (float) clips gradients by value. Reference: - [Dozat, 2015](http://cs229.stanford.edu/proj2015/054_report.pdf)." 6402,get_beta_accumulators,tensorflow/tensorflow/python/keras/optimizer_v2/nadam_test.py,32,function, 6403,update_m_cache,tensorflow/tensorflow/python/keras/optimizer_v2/nadam_test.py,41,function, 6404,nadam_update_numpy,tensorflow/tensorflow/python/keras/optimizer_v2/nadam_test.py,47,function, 6405,NadamOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/nadam_test.py,73,class, 6406,_deduplicate_indexed_slices,tensorflow/tensorflow/python/keras/optimizer_v2/optimizer_v2.py,59,function,"Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index." 6407,OptimizerV2,tensorflow/tensorflow/python/keras/optimizer_v2/optimizer_v2.py,81,class,"Base class for Keras optimizers. You should not use this class directly, but instead instantiate one of its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`, etc. ### Usage ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 * var1 + 2 * var2 * var2 # In graph mode, returns op that minimizes the loss by updating the listed # variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Usage in custom training loops In Keras models, sometimes variables are created when the model is first called, instead of construction time. Examples include 1) sequential models without input shape pre-defined, or 2) subclassed models. Pass var_list as callable in these cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid')) loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for input, output in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before applying them Calling `minimize()` takes care of both computing the gradients and applying them to the variables. If you want to process the gradients before applying them you can instead use the optimizer in three steps: 1. Compute the gradients with `tf.GradientTape`. 2. Process the gradients as you wish. 3. Apply the processed gradients with `apply_gradients()`. Example: ```python # Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for a list of variables. with tf.GradientTape() as tape: loss = vars = grads = tape.gradient(loss, vars) # Process the gradients, for example cap them, etc. # capped_grads = [MyCapper(g) for g in grads] processed_grads = [process_gradient(g) for g in grads] # Ask the optimizer to apply the processed gradients. opt.apply_gradients(zip(processed_grads, var_list)) ``` ### Use with `tf.distribute.Strategy` This optimizer class is `tf.distribute.Strategy` aware, which means it automatically sums gradients across all replicas. To average gradients, you divide your loss by the global batch size, which is done automatically if you use `tf.keras` built-in training or evaluation loops. See the `reduction` argument of your loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not. To aggregate gradients yourself, call `apply_gradients` with `experimental_aggregate_gradients` set to False. This is useful if you need to process aggregated gradients. If you are not using these and you want to average gradients, you should use `tf.math.reduce_sum` to add up your per-example losses and then divide by the global batch size. Note that when using `tf.distribute.Strategy`, the first component of a tensor's shape is the *replica-local* batch size, which is off by a factor equal to the number of replicas being used to compute a single step. As a result, using `tf.math.reduce_mean` will give the wrong answer, resulting in gradients that can be many times too big. ### Variable Constraints All Keras optimizers respect variable constraints. If constraint function is passed to any variable, the constraint will be applied to the variable after the gradient has been applied to the variable. Important: If gradient is sparse tensor, variable constraint is not supported. ### Thread Compatibility The entire optimizer is currently thread compatible, not thread-safe. The user needs to perform synchronization if necessary. ### Slots Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage additional variables associated with the variables to train. These are called Slots. Slots have names and you can ask the optimizer for the names of the slots that it uses. Once you have a slot name you can ask the optimizer for the variable it created to hold the slot value. This can be useful if you want to log debug a training algorithm, report stats about the slots, etc. ### Hyperparameters These are arguments passed to the optimizer subclass constructor (the `__init__` method), and then passed to `self._set_hyper()`. They can be either regular Python values (like 1.0), tensors, or callables. If they are callable, the callable will be called during `apply_gradients()` to get the value for the hyper parameter. Hyperparameters can be overwritten through user code: Example: ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 + 2 * var2 # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) # update learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Callable learning rate Optimizer accepts a callable learning rate in two ways. The first way is through built-in or customized `tf.keras.optimizers.schedules.LearningRateSchedule`. The schedule will be called on each iteration with `schedule(iteration)`, a `tf.Variable` owned by the optimizer. Example: >>> var = tf.Variable(np.random.random(size=(1,))) >>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay( ... initial_learning_rate=.01, decay_steps=20, decay_rate=.1) >>> opt = tf.keras.optimizers.SGD(learning_rate=learning_rate) >>> loss = lambda: 3 * var >>> opt.minimize(loss, var_list=[var]) >> var = tf.Variable(np.random.random(size=(1,))) >>> def lr_callable(): ... return .1 >>> opt = tf.keras.optimizers.SGD(learning_rate=lr_callable) >>> loss = lambda: 3 * var >>> opt.minimize(loss, var_list=[var]) >> opt = tf.keras.optimizers.RMSprop(learning_rate=0.1) >>> var1 = tf.Variable(10.0) >>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1 >>> step_count = opt.minimize(loss, [var1]).numpy() >>> var1.numpy() 9.683772 Reference: - [Hinton, 2012]( http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)" 6422,RMSpropOptimizerTest,tensorflow/tensorflow/python/keras/optimizer_v2/rmsprop_test.py,62,class, 6423,SlotColocationTest,tensorflow/tensorflow/python/keras/optimizer_v2/rmsprop_test.py,552,class, 6424,LinearModel,tensorflow/tensorflow/python/keras/premade/linear.py,32,class,"Linear Model for regression and classification problems. This model approximates the following function: $$y = \beta + \sum_{i=1}^{N} w_{i} * x_{i}$$ where $$\beta$$ is the bias and $$w_{i}$$ is the weight for each feature. Example: ```python model = LinearModel() model.compile(optimizer='sgd', loss='mse') model.fit(x, y, epochs=epochs) ``` This model accepts sparse float inputs as well: Example: ```python model = LinearModel() opt = tf.keras.optimizers.Adam() loss_fn = tf.keras.losses.MeanSquaredError() with tf.GradientTape() as tape: output = model(sparse_input) loss = tf.reduce_mean(loss_fn(target, output)) grads = tape.gradient(loss, model.weights) opt.apply_gradients(zip(grads, model.weights)) ```" 6425,LinearModelTest,tensorflow/tensorflow/python/keras/premade/linear_test.py,44,class, 6426,WideDeepModel,tensorflow/tensorflow/python/keras/premade/wide_deep.py,34,class,"Wide & Deep Model for regression and classification problems. This model jointly train a linear and a dnn model. Example: ```python linear_model = LinearModel() dnn_model = keras.Sequential([keras.layers.Dense(units=64), keras.layers.Dense(units=1)]) combined_model = WideDeepModel(linear_model, dnn_model) combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse']) # define dnn_inputs and linear_inputs as separate numpy arrays or # a single numpy array if dnn_inputs is same as linear_inputs. combined_model.fit([linear_inputs, dnn_inputs], y, epochs) # or define a single `tf.data.Dataset` that contains a single tensor or # separate tensors for dnn_inputs and linear_inputs. dataset = tf.data.Dataset.from_tensors(([linear_inputs, dnn_inputs], y)) combined_model.fit(dataset, epochs) ``` Both linear and dnn model can be pre-compiled and trained separately before jointly training: Example: ```python linear_model = LinearModel() linear_model.compile('adagrad', 'mse') linear_model.fit(linear_inputs, y, epochs) dnn_model = keras.Sequential([keras.layers.Dense(units=1)]) dnn_model.compile('rmsprop', 'mse') dnn_model.fit(dnn_inputs, y, epochs) combined_model = WideDeepModel(linear_model, dnn_model) combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse']) combined_model.fit([linear_inputs, dnn_inputs], y, epochs) ```" 6427,WideDeepModelTest,tensorflow/tensorflow/python/keras/premade/wide_deep_test.py,41,class, 6428,index_directory,tensorflow/tensorflow/python/keras/preprocessing/dataset_utils.py,31,function,"Make list of all files in the subdirs of `directory`, with their labels. Args: directory: The target directory (string). labels: Either ""inferred"" (labels are generated from the directory structure), or a list/tuple of integer labels of the same size as the number of valid files found in the directory. Labels should be sorted according to the alphanumeric order of the image file paths (obtained via `os.walk(directory)` in Python). formats: Allowlist of file extensions to index (e.g. "".jpg"", "".txt""). class_names: Only valid if ""labels"" is ""inferred"". This is the explict list of class names (must match names of subdirectories). Used to control the order of the classes (otherwise alphanumerical order is used). shuffle: Whether to shuffle the data. Default: True. If set to False, sorts the data in alphanumeric order. seed: Optional random seed for shuffling. follow_links: Whether to visits subdirectories pointed to by symlinks. Returns: tuple (file_paths, labels, class_names). file_paths: list of file paths (strings). labels: list of matching integer labels (same length as file_paths) class_names: names of the classes corresponding to these labels, in order." 6429,iter_valid_files,tensorflow/tensorflow/python/keras/preprocessing/dataset_utils.py,123,function, 6430,index_subdirectory,tensorflow/tensorflow/python/keras/preprocessing/dataset_utils.py,131,function,"Recursively walks directory and list image paths and their class index. Arguments: directory: string, target directory. class_indices: dict mapping class names to their index. follow_links: boolean, whether to recursively follow subdirectories (if False, we only list top-level images in `directory`). formats: Allowlist of file extensions to index (e.g. "".jpg"", "".txt""). Returns: tuple `(filenames, labels)`. `filenames` is a list of relative file paths, and `labels` is a list of integer labels corresponding to these files." 6431,get_training_or_validation_split,tensorflow/tensorflow/python/keras/preprocessing/dataset_utils.py,159,function,"Potentially restict samples & labels to a training or validation split. Args: samples: List of elements. labels: List of corresponding labels. validation_split: Float, fraction of data to reserve for validation. subset: Subset of the data to return. Either ""training"", ""validation"", or None. If None, we return all of the data. Returns: tuple (samples, labels), potentially restricted to the specified subset." 6432,labels_to_dataset,tensorflow/tensorflow/python/keras/preprocessing/dataset_utils.py,191,function, 6433,check_validation_split_arg,tensorflow/tensorflow/python/keras/preprocessing/dataset_utils.py,201,function,Raise errors in case of invalid argument values. 6434,smart_resize,tensorflow/tensorflow/python/keras/preprocessing/image.py,55,function,"Resize images to a target size without aspect ratio distortion. TensorFlow image datasets typically yield images that have each a different size. However, these images need to be batched before they can be processed by Keras layers. To be batched, images need to share the same height and width. You could simply do: ````python size = (200, 200) ds = ds.map(lambda img: tf.image.resize(img, size)) ``` However, if you do this, you distort the aspect ratio of your images, since in general they do not all have the same aspect ratio as `size`. This is fine in many cases, but not always (e.g. for GANs this can be a problem). Note that passing the argument `preserve_aspect_ratio=True` to `resize` will preserve the aspect ratio, but at the cost of no longer respecting the provided target size. Because `tf.image.resize` doesn't crop images, your output images will still have different sizes. This calls for: ```python size = (200, 200) ds = ds.map(lambda img: smart_resize(img, size)) ``` Your output images will actually be `(200, 200)`, and will not be distorted. Instead, the parts of the image that do not fit within the target size get cropped out. The resizing process is: 1. Take the largest centered crop of the image that has the same aspect ratio as the target size. For instance, if `size=(200, 200)` and the input image has size `(340, 500)`, we take a crop of `(340, 340)` centered along the width. 2. Resize the cropped image to the target size. In the example above, we resize the `(340, 340)` crop to `(200, 200)`. Arguments: x: Input image (as a tensor or NumPy array). Must be in format `(height, width, channels)`. size: Tuple of `(height, width)` integer. Target size. interpolation: String, interpolation to use for resizing. Defaults to `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`. Returns: Array with shape `(size[0], size[1], channels)`. If the input image was a NumPy array, the output is a NumPy array, and if it was a TF tensor, the output is a TF tensor." 6435,array_to_img,tensorflow/tensorflow/python/keras/preprocessing/image.py,152,function,"Converts a 3D Numpy array to a PIL Image instance. Usage: ```python from PIL import Image img = np.random.random(size=(100, 100, 3)) pil_img = tf.keras.preprocessing.image.array_to_img(img) ``` Arguments: x: Input Numpy array. data_format: Image data format, can be either ""channels_first"" or ""channels_last"". Defaults to `None`, in which case the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it defaults to ""channels_last""). scale: Whether to rescale image values to be within `[0, 255]`. Defaults to `True`. dtype: Dtype to use. Default to `None`, in which case the global setting `tf.keras.backend.floatx()` is used (unless you changed it, it defaults to ""float32"") Returns: A PIL Image instance. Raises: ImportError: if PIL is not available. ValueError: if invalid `x` or `data_format` is passed." 6436,img_to_array,tensorflow/tensorflow/python/keras/preprocessing/image.py,195,function,"Converts a PIL Image instance to a Numpy array. Usage: ```python from PIL import Image img_data = np.random.random(size=(100, 100, 3)) img = tf.keras.preprocessing.image.array_to_img(img_data) array = tf.keras.preprocessing.image.img_to_array(img) ``` Arguments: img: Input PIL Image instance. data_format: Image data format, can be either ""channels_first"" or ""channels_last"". Defaults to `None`, in which case the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it defaults to ""channels_last""). dtype: Dtype to use. Default to `None`, in which case the global setting `tf.keras.backend.floatx()` is used (unless you changed it, it defaults to ""float32"") Returns: A 3D Numpy array. Raises: ValueError: if invalid `img` or `data_format` is passed." 6437,save_img,tensorflow/tensorflow/python/keras/preprocessing/image.py,236,function,"Saves an image stored as a Numpy array to a path or file object. Arguments: path: Path or file object. x: Numpy array. data_format: Image data format, either ""channels_first"" or ""channels_last"". file_format: Optional file format override. If omitted, the format to use is determined from the filename extension. If a file object was used instead of a filename, this parameter should always be used. scale: Whether to rescale image values to be within `[0, 255]`. **kwargs: Additional keyword arguments passed to `PIL.Image.save()`." 6438,load_img,tensorflow/tensorflow/python/keras/preprocessing/image.py,265,function,"Loads an image into PIL format. Usage: ``` image = tf.keras.preprocessing.image.load_img(image_path) input_arr = keras.preprocessing.image.img_to_array(image) input_arr = np.array([input_arr]) # Convert single image to a batch. predictions = model.predict(input_arr) ``` Arguments: path: Path to image file. grayscale: DEPRECATED use `color_mode=""grayscale""`. color_mode: One of ""grayscale"", ""rgb"", ""rgba"". Default: ""rgb"". The desired image format. target_size: Either `None` (default to original size) or tuple of ints `(img_height, img_width)`. interpolation: Interpolation method used to resample the image if the target size is different from that of the loaded image. Supported methods are ""nearest"", ""bilinear"", and ""bicubic"". If PIL version 1.1.3 or newer is installed, ""lanczos"" is also supported. If PIL version 3.4.0 or newer is installed, ""box"" and ""hamming"" are also supported. By default, ""nearest"" is used. Returns: A PIL Image instance. Raises: ImportError: if PIL is not available. ValueError: if interpolation method is not supported." 6439,Iterator,tensorflow/tensorflow/python/keras/preprocessing/image.py,304,class, 6440,DirectoryIterator,tensorflow/tensorflow/python/keras/preprocessing/image.py,309,class,"Iterator capable of reading images from a directory on disk. Arguments: directory: Path to the directory to read images from. Each subdirectory in this directory will be considered to contain images from one class, or alternatively you could specify class subdirectories via the `classes` argument. image_data_generator: Instance of `ImageDataGenerator` to use for random transformations and normalization. target_size: tuple of integers, dimensions to resize input images to. color_mode: One of `""rgb""`, `""rgba""`, `""grayscale""`. Color mode to read images. classes: Optional list of strings, names of subdirectories containing images from each class (e.g. `[""dogs"", ""cats""]`). It will be computed automatically if not set. class_mode: Mode for yielding the targets: `""binary""`: binary targets (if there are only two classes), `""categorical""`: categorical targets, `""sparse""`: integer targets, `""input""`: targets are images identical to input images (mainly used to work with autoencoders), `None`: no targets get yielded (only input images are yielded). batch_size: Integer, size of a batch. shuffle: Boolean, whether to shuffle the data between epochs. seed: Random seed for data shuffling. data_format: String, one of `channels_first`, `channels_last`. save_to_dir: Optional directory where to save the pictures being yielded, in a viewable format. This is useful for visualizing the random transformations being applied, for debugging purposes. save_prefix: String prefix to use for saving sample images (if `save_to_dir` is set). save_format: Format to use for saving sample images (if `save_to_dir` is set). subset: Subset of data (`""training""` or `""validation""`) if validation_split is set in ImageDataGenerator. interpolation: Interpolation method used to resample the image if the target size is different from that of the loaded image. Supported methods are ""nearest"", ""bilinear"", and ""bicubic"". If PIL version 1.1.3 or newer is installed, ""lanczos"" is also supported. If PIL version 3.4.0 or newer is installed, ""box"" and ""hamming"" are also supported. By default, ""nearest"" is used. dtype: Dtype to use for generated arrays." 6441,NumpyArrayIterator,tensorflow/tensorflow/python/keras/preprocessing/image.py,400,class,"Iterator yielding data from a Numpy array. Arguments: x: Numpy array of input data or tuple. If tuple, the second elements is either another numpy array or a list of numpy arrays, each of which gets passed through as an output without any modifications. y: Numpy array of targets data. image_data_generator: Instance of `ImageDataGenerator` to use for random transformations and normalization. batch_size: Integer, size of a batch. shuffle: Boolean, whether to shuffle the data between epochs. sample_weight: Numpy array of sample weights. seed: Random seed for data shuffling. data_format: String, one of `channels_first`, `channels_last`. save_to_dir: Optional directory where to save the pictures being yielded, in a viewable format. This is useful for visualizing the random transformations being applied, for debugging purposes. save_prefix: String prefix to use for saving sample images (if `save_to_dir` is set). save_format: Format to use for saving sample images (if `save_to_dir` is set). subset: Subset of data (`""training""` or `""validation""`) if validation_split is set in ImageDataGenerator. dtype: Dtype to use for the generated arrays." 6442,DataFrameIterator,tensorflow/tensorflow/python/keras/preprocessing/image.py,463,class,"Iterator capable of reading images from a directory on disk as a dataframe. Arguments: dataframe: Pandas dataframe containing the filepaths relative to `directory` (or absolute paths if `directory` is None) of the images in a string column. It should include other column/s depending on the `class_mode`: - if `class_mode` is `""categorical""` (default value) it must include the `y_col` column with the class/es of each image. Values in column can be string/list/tuple if a single class or list/tuple if multiple classes. - if `class_mode` is `""binary""` or `""sparse""` it must include the given `y_col` column with class values as strings. - if `class_mode` is `""raw""` or `""multi_output""` it should contain the columns specified in `y_col`. - if `class_mode` is `""input""` or `None` no extra column is needed. directory: string, path to the directory to read images from. If `None`, data in `x_col` column should be absolute paths. image_data_generator: Instance of `ImageDataGenerator` to use for random transformations and normalization. If None, no transformations and normalizations are made. x_col: string, column in `dataframe` that contains the filenames (or absolute paths if `directory` is `None`). y_col: string or list, column/s in `dataframe` that has the target data. weight_col: string, column in `dataframe` that contains the sample weights. Default: `None`. target_size: tuple of integers, dimensions to resize input images to. color_mode: One of `""rgb""`, `""rgba""`, `""grayscale""`. Color mode to read images. classes: Optional list of strings, classes to use (e.g. `[""dogs"", ""cats""]`). If None, all classes in `y_col` will be used. class_mode: one of ""binary"", ""categorical"", ""input"", ""multi_output"", ""raw"", ""sparse"" or None. Default: ""categorical"". Mode for yielding the targets: - `""binary""`: 1D numpy array of binary labels, - `""categorical""`: 2D numpy array of one-hot encoded labels. Supports multi-label output. - `""input""`: images identical to input images (mainly used to work with autoencoders), - `""multi_output""`: list with the values of the different columns, - `""raw""`: numpy array of values in `y_col` column(s), - `""sparse""`: 1D numpy array of integer labels, - `None`, no targets are returned (the generator will only yield batches of image data, which is useful to use in `model.predict()`). batch_size: Integer, size of a batch. shuffle: Boolean, whether to shuffle the data between epochs. seed: Random seed for data shuffling. data_format: String, one of `channels_first`, `channels_last`. save_to_dir: Optional directory where to save the pictures being yielded, in a viewable format. This is useful for visualizing the random transformations being applied, for debugging purposes. save_prefix: String prefix to use for saving sample images (if `save_to_dir` is set). save_format: Format to use for saving sample images (if `save_to_dir` is set). subset: Subset of data (`""training""` or `""validation""`) if validation_split is set in ImageDataGenerator. interpolation: Interpolation method used to resample the image if the target size is different from that of the loaded image. Supported methods are ""nearest"", ""bilinear"", and ""bicubic"". If PIL version 1.1.3 or newer is installed, ""lanczos"" is also supported. If PIL version 3.4.0 or newer is installed, ""box"" and ""hamming"" are also supported. By default, ""nearest"" is used. dtype: Dtype to use for the generated arrays. validate_filenames: Boolean, whether to validate image filenames in `x_col`. If `True`, invalid images will be ignored. Disabling this option can lead to speed-up in the instantiation of this class. Default: `True`." 6443,ImageDataGenerator,tensorflow/tensorflow/python/keras/preprocessing/image.py,581,class,"Generate batches of tensor image data with real-time data augmentation. The data will be looped over (in batches). Arguments: featurewise_center: Boolean. Set input mean to 0 over the dataset, feature-wise. samplewise_center: Boolean. Set each sample mean to 0. featurewise_std_normalization: Boolean. Divide inputs by std of the dataset, feature-wise. samplewise_std_normalization: Boolean. Divide each input by its std. zca_epsilon: epsilon for ZCA whitening. Default is 1e-6. zca_whitening: Boolean. Apply ZCA whitening. rotation_range: Int. Degree range for random rotations. width_shift_range: Float, 1-D array-like or int - float: fraction of total width, if < 1, or pixels if >= 1. - 1-D array-like: random elements from the array. - int: integer number of pixels from interval `(-width_shift_range, +width_shift_range)` - With `width_shift_range=2` possible values are integers `[-1, 0, +1]`, same as with `width_shift_range=[-1, 0, +1]`, while with `width_shift_range=1.0` possible values are floats in the interval [-1.0, +1.0). height_shift_range: Float, 1-D array-like or int - float: fraction of total height, if < 1, or pixels if >= 1. - 1-D array-like: random elements from the array. - int: integer number of pixels from interval `(-height_shift_range, +height_shift_range)` - With `height_shift_range=2` possible values are integers `[-1, 0, +1]`, same as with `height_shift_range=[-1, 0, +1]`, while with `height_shift_range=1.0` possible values are floats in the interval [-1.0, +1.0). brightness_range: Tuple or list of two floats. Range for picking a brightness shift value from. shear_range: Float. Shear Intensity (Shear angle in counter-clockwise direction in degrees) zoom_range: Float or [lower, upper]. Range for random zoom. If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`. channel_shift_range: Float. Range for random channel shifts. fill_mode: One of {""constant"", ""nearest"", ""reflect"" or ""wrap""}. Default is 'nearest'. Points outside the boundaries of the input are filled according to the given mode: - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k) - 'nearest': aaaaaaaa|abcd|dddddddd - 'reflect': abcddcba|abcd|dcbaabcd - 'wrap': abcdabcd|abcd|abcdabcd cval: Float or Int. Value used for points outside the boundaries when `fill_mode = ""constant""`. horizontal_flip: Boolean. Randomly flip inputs horizontally. vertical_flip: Boolean. Randomly flip inputs vertically. rescale: rescaling factor. Defaults to None. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (after applying all other transformations). preprocessing_function: function that will be applied on each input. The function will run after the image is resized and augmented. The function should take one argument: one image (Numpy tensor with rank 3), and should output a Numpy tensor with the same shape. data_format: Image data format, either ""channels_first"" or ""channels_last"". ""channels_last"" mode means that the images should have shape `(samples, height, width, channels)`, ""channels_first"" mode means that the images should have shape `(samples, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be ""channels_last"". validation_split: Float. Fraction of images reserved for validation (strictly between 0 and 1). dtype: Dtype to use for the generated arrays. Examples: Example of using `.flow(x, y)`: ```python (x_train, y_train), (x_test, y_test) = cifar10.load_data() y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) datagen = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True) # compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied) datagen.fit(x_train) # fits the model on batches with real-time data augmentation: model.fit(datagen.flow(x_train, y_train, batch_size=32), steps_per_epoch=len(x_train) / 32, epochs=epochs) # here's a more ""manual"" example for e in range(epochs): print('Epoch', e) batches = 0 for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32): model.fit(x_batch, y_batch) batches += 1 if batches >= len(x_train) / 32: # we need to break the loop by hand because # the generator loops indefinitely break ``` Example of using `.flow_from_directory(directory)`: ```python train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( 'data/train', target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_directory( 'data/validation', target_size=(150, 150), batch_size=32, class_mode='binary') model.fit( train_generator, steps_per_epoch=2000, epochs=50, validation_data=validation_generator, validation_steps=800) ``` Example of transforming images and masks together. ```python # we create two instances with the same arguments data_gen_args = dict(featurewise_center=True, featurewise_std_normalization=True, rotation_range=90, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) image_datagen = ImageDataGenerator(**data_gen_args) mask_datagen = ImageDataGenerator(**data_gen_args) # Provide the same seed and keyword arguments to the fit and flow methods seed = 1 image_datagen.fit(images, augment=True, seed=seed) mask_datagen.fit(masks, augment=True, seed=seed) image_generator = image_datagen.flow_from_directory( 'data/images', class_mode=None, seed=seed) mask_generator = mask_datagen.flow_from_directory( 'data/masks', class_mode=None, seed=seed) # combine generators into one which yields image and masks train_generator = zip(image_generator, mask_generator) model.fit( train_generator, steps_per_epoch=2000, epochs=50) ```" 6444,image_dataset_from_directory,tensorflow/tensorflow/python/keras/preprocessing/image_dataset.py,35,function,"Generates a `tf.data.Dataset` from image files in a directory. If your directory structure is: ``` main_directory/ ...class_a/ ......a_image_1.jpg ......a_image_2.jpg ...class_b/ ......b_image_1.jpg ......b_image_2.jpg ``` Then calling `image_dataset_from_directory(main_directory, labels='inferred')` will return a `tf.data.Dataset` that yields batches of images from the subdirectories `class_a` and `class_b`, together with labels 0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`). Supported image formats: jpeg, png, bmp, gif. Animated gifs are truncated to the first frame. Arguments: directory: Directory where the data is located. If `labels` is ""inferred"", it should contain subdirectories, each containing images for a class. Otherwise, the directory structure is ignored. labels: Either ""inferred"" (labels are generated from the directory structure), or a list/tuple of integer labels of the same size as the number of image files found in the directory. Labels should be sorted according to the alphanumeric order of the image file paths (obtained via `os.walk(directory)` in Python). label_mode: - 'int': means that the labels are encoded as integers (e.g. for `sparse_categorical_crossentropy` loss). - 'categorical' means that the labels are encoded as a categorical vector (e.g. for `categorical_crossentropy` loss). - 'binary' means that the labels (there can be only 2) are encoded as `float32` scalars with values 0 or 1 (e.g. for `binary_crossentropy`). - None (no labels). class_names: Only valid if ""labels"" is ""inferred"". This is the explict list of class names (must match names of subdirectories). Used to control the order of the classes (otherwise alphanumerical order is used). color_mode: One of ""grayscale"", ""rgb"", ""rgba"". Default: ""rgb"". Whether the images will be converted to have 1, 3, or 4 channels. batch_size: Size of the batches of data. Default: 32. image_size: Size to resize images to after they are read from disk. Defaults to `(256, 256)`. Since the pipeline processes batches of images that must all have the same size, this must be provided. shuffle: Whether to shuffle the data. Default: True. If set to False, sorts the data in alphanumeric order. seed: Optional random seed for shuffling and transformations. validation_split: Optional float between 0 and 1, fraction of data to reserve for validation. subset: One of ""training"" or ""validation"". Only used if `validation_split` is set. interpolation: String, the interpolation method used when resizing images. Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`. follow_links: Whether to visits subdirectories pointed to by symlinks. Defaults to False. Returns: A `tf.data.Dataset` object. - If `label_mode` is None, it yields `float32` tensors of shape `(batch_size, image_size[0], image_size[1], num_channels)`, encoding images (see below for rules regarding `num_channels`). - Otherwise, it yields a tuple `(images, labels)`, where `images` has shape `(batch_size, image_size[0], image_size[1], num_channels)`, and `labels` follows the format described below. Rules regarding labels format: - if `label_mode` is `int`, the labels are an `int32` tensor of shape `(batch_size,)`. - if `label_mode` is `binary`, the labels are a `float32` tensor of 1s and 0s of shape `(batch_size, 1)`. - if `label_mode` is `categorial`, the labels are a `float32` tensor of shape `(batch_size, num_classes)`, representing a one-hot encoding of the class index. Rules regarding number of channels in the yielded images: - if `color_mode` is `grayscale`, there's 1 channel in the image tensors. - if `color_mode` is `rgb`, there are 3 channel in the image tensors. - if `color_mode` is `rgba`, there are 4 channel in the image tensors." 6445,paths_and_labels_to_dataset,tensorflow/tensorflow/python/keras/preprocessing/image_dataset.py,209,function,Constructs a dataset of images and labels. 6446,path_to_image,tensorflow/tensorflow/python/keras/preprocessing/image_dataset.py,227,function, 6447,ImageDatasetFromDirectoryTest,tensorflow/tensorflow/python/keras/preprocessing/image_dataset_test.py,39,class, 6448,_generate_test_images,tensorflow/tensorflow/python/keras/preprocessing/image_test.py,41,function, 6449,TestImage,tensorflow/tensorflow/python/keras/preprocessing/image_test.py,59,class, 6450,TimeseriesGenerator,tensorflow/tensorflow/python/keras/preprocessing/sequence.py,34,class,"Utility class for generating batches of temporal data. This class takes in a sequence of data-points gathered at equal intervals, along with time series parameters such as stride, length of history, etc., to produce batches for training/validation. # Arguments data: Indexable generator (such as list or Numpy array) containing consecutive data points (timesteps). The data should be at 2D, and axis 0 is expected to be the time dimension. targets: Targets corresponding to timesteps in `data`. It should have same length as `data`. length: Length of the output sequences (in number of timesteps). sampling_rate: Period between successive individual timesteps within sequences. For rate `r`, timesteps `data[i]`, `data[i-r]`, ... `data[i - length]` are used for create a sample sequence. stride: Period between successive output sequences. For stride `s`, consecutive output samples would be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc. start_index: Data points earlier than `start_index` will not be used in the output sequences. This is useful to reserve part of the data for test or validation. end_index: Data points later than `end_index` will not be used in the output sequences. This is useful to reserve part of the data for test or validation. shuffle: Whether to shuffle output samples, or instead draw them in chronological order. reverse: Boolean: if `true`, timesteps in each output sample will be in reverse chronological order. batch_size: Number of timeseries samples in each batch (except maybe the last one). # Returns A [Sequence](/utils/#sequence) instance. # Examples ```python from keras.preprocessing.sequence import TimeseriesGenerator import numpy as np data = np.array([[i] for i in range(50)]) targets = np.array([[i] for i in range(50)]) data_gen = TimeseriesGenerator(data, targets, length=10, sampling_rate=2, batch_size=2) assert len(data_gen) == 20 batch_0 = data_gen[0] x, y = batch_0 assert np.array_equal(x, np.array([[[0], [2], [4], [6], [8]], [[1], [3], [5], [7], [9]]])) assert np.array_equal(y, np.array([[10], [11]])) ```" 6451,pad_sequences,tensorflow/tensorflow/python/keras/preprocessing/sequence.py,93,function,"Pads sequences to the same length. This function transforms a list (of length `num_samples`) of sequences (lists of integers) into a 2D Numpy array of shape `(num_samples, num_timesteps)`. `num_timesteps` is either the `maxlen` argument if provided, or the length of the longest sequence in the list. Sequences that are shorter than `num_timesteps` are padded with `value` until they are `num_timesteps` long. Sequences longer than `num_timesteps` are truncated so that they fit the desired length. The position where padding or truncation happens is determined by the arguments `padding` and `truncating`, respectively. Pre-padding or removing values from the beginning of the sequence is the default. >>> sequence = [[1], [2, 3], [4, 5, 6]] >>> tf.keras.preprocessing.sequence.pad_sequences(sequence) array([[0, 0, 1], [0, 2, 3], [4, 5, 6]], dtype=int32) >>> tf.keras.preprocessing.sequence.pad_sequences(sequence, value=-1) array([[-1, -1, 1], [-1, 2, 3], [ 4, 5, 6]], dtype=int32) >>> tf.keras.preprocessing.sequence.pad_sequences(sequence, padding='post') array([[1, 0, 0], [2, 3, 0], [4, 5, 6]], dtype=int32) >>> tf.keras.preprocessing.sequence.pad_sequences(sequence, maxlen=2) array([[0, 1], [2, 3], [5, 6]], dtype=int32) Arguments: sequences: List of sequences (each sequence is a list of integers). maxlen: Optional Int, maximum length of all sequences. If not provided, sequences will be padded to the length of the longest individual sequence. dtype: (Optional, defaults to int32). Type of the output sequences. To pad sequences with variable length strings, you can use `object`. padding: String, 'pre' or 'post' (optional, defaults to 'pre'): pad either before or after each sequence. truncating: String, 'pre' or 'post' (optional, defaults to 'pre'): remove values from sequences larger than `maxlen`, either at the beginning or at the end of the sequences. value: Float or String, padding value. (Optional, defaults to 0.) Returns: Numpy array with shape `(len(sequences), maxlen)` Raises: ValueError: In case of invalid values for `truncating` or `padding`, or in case of invalid shape for a `sequences` entry." 6452,TestSequence,tensorflow/tensorflow/python/keras/preprocessing/sequence_test.py,29,class, 6453,text_to_word_sequence,tensorflow/tensorflow/python/keras/preprocessing/text.py,32,function,"Converts a text to a sequence of words (or tokens). This function transforms a string of text into a list of words while ignoring `filters` which include punctuations by default. >>> sample_text = 'This is a sample sentence.' >>> tf.keras.preprocessing.text.text_to_word_sequence(sample_text) ['this', 'is', 'a', 'sample', 'sentence'] Arguments: input_text: Input text (string). filters: list (or concatenation) of characters to filter out, such as punctuation. Default: `'!""#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n'`, includes basic punctuation, tabs, and newlines. lower: boolean. Whether to convert the input to lowercase. split: str. Separator for word splitting. Returns: A list of words (or tokens)." 6454,one_hot,tensorflow/tensorflow/python/keras/preprocessing/text.py,61,function,"One-hot encodes a text into a list of word indexes of size `n`. This function receives as input a string of text and returns a list of encoded integers each corresponding to a word (or token) in the given input string. Arguments: input_text: Input text (string). n: int. Size of vocabulary. filters: list (or concatenation) of characters to filter out, such as punctuation. Default: ``!""#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n``, includes basic punctuation, tabs, and newlines. lower: boolean. Whether to set the text to lowercase. split: str. Separator for word splitting. Returns: List of integers in `[1, n]`. Each integer encodes a word (unicity non-guaranteed)." 6455,text_dataset_from_directory,tensorflow/tensorflow/python/keras/preprocessing/text_dataset.py,30,function,"Generates a `tf.data.Dataset` from text files in a directory. If your directory structure is: ``` main_directory/ ...class_a/ ......a_text_1.txt ......a_text_2.txt ...class_b/ ......b_text_1.txt ......b_text_2.txt ``` Then calling `text_dataset_from_directory(main_directory, labels='inferred')` will return a `tf.data.Dataset` that yields batches of texts from the subdirectories `class_a` and `class_b`, together with labels 0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`). Only `.txt` files are supported at this time. Arguments: directory: Directory where the data is located. If `labels` is ""inferred"", it should contain subdirectories, each containing text files for a class. Otherwise, the directory structure is ignored. labels: Either ""inferred"" (labels are generated from the directory structure), or a list/tuple of integer labels of the same size as the number of text files found in the directory. Labels should be sorted according to the alphanumeric order of the text file paths (obtained via `os.walk(directory)` in Python). label_mode: - 'int': means that the labels are encoded as integers (e.g. for `sparse_categorical_crossentropy` loss). - 'categorical' means that the labels are encoded as a categorical vector (e.g. for `categorical_crossentropy` loss). - 'binary' means that the labels (there can be only 2) are encoded as `float32` scalars with values 0 or 1 (e.g. for `binary_crossentropy`). - None (no labels). class_names: Only valid if ""labels"" is ""inferred"". This is the explict list of class names (must match names of subdirectories). Used to control the order of the classes (otherwise alphanumerical order is used). batch_size: Size of the batches of data. Default: 32. max_length: Maximum size of a text string. Texts longer than this will be truncated to `max_length`. shuffle: Whether to shuffle the data. Default: True. If set to False, sorts the data in alphanumeric order. seed: Optional random seed for shuffling and transformations. validation_split: Optional float between 0 and 1, fraction of data to reserve for validation. subset: One of ""training"" or ""validation"". Only used if `validation_split` is set. follow_links: Whether to visits subdirectories pointed to by symlinks. Defaults to False. Returns: A `tf.data.Dataset` object. - If `label_mode` is None, it yields `string` tensors of shape `(batch_size,)`, containing the contents of a batch of text files. - Otherwise, it yields a tuple `(texts, labels)`, where `texts` has shape `(batch_size,)` and `labels` follows the format described below. Rules regarding labels format: - if `label_mode` is `int`, the labels are an `int32` tensor of shape `(batch_size,)`. - if `label_mode` is `binary`, the labels are a `float32` tensor of 1s and 0s of shape `(batch_size, 1)`. - if `label_mode` is `categorial`, the labels are a `float32` tensor of shape `(batch_size, num_classes)`, representing a one-hot encoding of the class index." 6456,paths_and_labels_to_dataset,tensorflow/tensorflow/python/keras/preprocessing/text_dataset.py,171,function,Constructs a dataset of text strings and labels. 6457,path_to_string_content,tensorflow/tensorflow/python/keras/preprocessing/text_dataset.py,186,function, 6458,TextDatasetFromDirectoryTest,tensorflow/tensorflow/python/keras/preprocessing/text_dataset_test.py,32,class, 6459,TestText,tensorflow/tensorflow/python/keras/preprocessing/text_test.py,28,class, 6460,timeseries_dataset_from_array,tensorflow/tensorflow/python/keras/preprocessing/timeseries.py,30,function,"Creates a dataset of sliding windows over a timeseries provided as array. This function takes in a sequence of data-points gathered at equal intervals, along with time series parameters such as length of the sequences/windows, spacing between two sequence/windows, etc., to produce batches of timeseries inputs and targets. Arguments: data: Numpy array or eager tensor containing consecutive data points (timesteps). Axis 0 is expected to be the time dimension. targets: Targets corresponding to timesteps in `data`. It should have same length as `data`. `targets[i]` should be the target corresponding to the window that starts at index `i` (see example 2 below). Pass None if you don't have target data (in this case the dataset will only yield the input data). sequence_length: Length of the output sequences (in number of timesteps). sequence_stride: Period between successive output sequences. For stride `s`, output samples would start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc. sampling_rate: Period between successive individual timesteps within sequences. For rate `r`, timesteps `data[i], data[i + r], ... data[i + sequence_length]` are used for create a sample sequence. batch_size: Number of timeseries samples in each batch (except maybe the last one). shuffle: Whether to shuffle output samples, or instead draw them in chronological order. seed: Optional int; random seed for shuffling. start_index: Optional int; data points earlier (exclusive) than `start_index` will not be used in the output sequences. This is useful to reserve part of the data for test or validation. end_index: Optional int; data points later (exclusive) than `end_index` will not be used in the output sequences. This is useful to reserve part of the data for test or validation. Returns: A tf.data.Dataset instance. If `targets` was passed, the dataset yields tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields only `batch_of_sequences`. Example 1: Consider indices `[0, 1, ... 99]`. With `sequence_length=10, sampling_rate=2, sequence_stride=3`, `shuffle=False`, the dataset will yield batches of sequences composed of the following indices: ``` First sequence: [0 2 4 6 8 10 12 14 16 18] Second sequence: [3 5 7 9 11 13 15 17 19 21] Third sequence: [6 8 10 12 14 16 18 20 22 24] ... Last sequence: [78 80 82 84 86 88 90 92 94 96] ``` In this case the last 3 data points are discarded since no full sequence can be generated to include them (the next sequence would have started at index 81, and thus its last step would have gone over 99). Example 2: temporal regression. Consider an array `data` of scalar values, of shape `(steps,)`. To generate a dataset that uses the past 10 timesteps to predict the next timestep, you would use: ```python input_data = data[:-10] targets = data[10:] dataset = tf.keras.preprocessing.timeseries_dataset_from_array( input_data, targets, sequence_length=10) for batch in dataset: inputs, targets = batch assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9] assert np.array_equal(targets[0], data[10]) # Corresponding target: step 10 break ```" 6461,sequences_from_indices,tensorflow/tensorflow/python/keras/preprocessing/timeseries.py,202,function, 6462,TimeseriesDatasetTest,tensorflow/tensorflow/python/keras/preprocessing/timeseries_test.py,28,class, 6463,save_model_to_hdf5,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,56,function,"Saves a model to a HDF5 file. The saved model contains: - the model's configuration (topology) - the model's weights - the model's optimizer's state (if any) Thus the saved model can be reinstantiated in the exact same state, without any of the code used for model definition or training. Arguments: model: Keras model instance to be saved. filepath: One of the following: - String, path where to save the model - `h5py.File` object where to save the model overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user with a manual prompt. include_optimizer: If True, save optimizer's state together. Raises: ImportError: if h5py is not available." 6464,load_model_from_hdf5,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,133,function,"Loads a model saved via `save_model_to_hdf5`. Arguments: filepath: One of the following: - String, path to the saved model - `h5py.File` object from which to load the model custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. compile: Boolean, whether to compile the model after loading. Returns: A Keras model instance. If an optimizer was found as part of the saved model, the model is already compiled. Otherwise, the model is uncompiled and a warning will be displayed. When `compile` is set to False, the compilation is omitted without any warning. Raises: ImportError: if h5py is not available. ValueError: In case of an invalid savefile." 6465,preprocess_weights_for_loading,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,221,function,"Preprocess layer weights between different Keras formats. Converts layers weights from Keras 1 format to Keras 2 and also weights of CuDNN layers in Keras 2. Arguments: layer: Layer instance. weights: List of weights values (Numpy arrays). original_keras_version: Keras version for the weights, as a string. original_backend: Keras backend the weights were trained with, as a string. Returns: A list of weights values (Numpy arrays)." 6466,_convert_rnn_weights,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,411,function,"Converts weights for RNN layers between native and CuDNN format. Input kernels for each gate are transposed and converted between Fortran and C layout, recurrent kernels are transposed. For LSTM biases are summed/ split in half, for GRU biases are reshaped. Weights can be converted in both directions between `LSTM` and`CuDNNSLTM` and between `CuDNNGRU` and `GRU(reset_after=True)`. Default `GRU` is not compatible with `CuDNNGRU`. For missing biases in `LSTM`/`GRU` (`use_bias=False`) no conversion is made. Arguments: layer: Target layer instance. weights: List of source weights values (input kernels, recurrent kernels, [biases]) (Numpy arrays). Returns: A list of converted weights values (Numpy arrays). Raises: ValueError: for incompatible GRU layer/weights or incompatible biases" 6467,save_optimizer_weights_to_hdf5_group,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,578,function,"Saves optimizer weights of a optimizer to a HDF5 group. Arguments: hdf5_group: HDF5 group. optimizer: optimizer instance." 6468,load_optimizer_weights_from_hdf5_group,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,602,function,"Load optimizer weights from a HDF5 group. Arguments: hdf5_group: A pointer to a HDF5 group. Returns: data: List of optimizer weight names." 6469,save_weights_to_hdf5_group,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,617,function,"Saves the weights of a list of layers to a HDF5 group. Arguments: f: HDF5 group. layers: List of layer instances." 6470,load_weights_from_hdf5_group,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,648,function,"Implements topological (order-based) weight loading. Arguments: f: A pointer to a HDF5 group. layers: a list of target layers. Raises: ValueError: in case of mismatch between provided layers and weights file." 6471,load_weights_from_hdf5_group_by_name,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,711,function,"Implements name-based weight loading. (instead of topological weight loading). Layers that have no matching name are skipped. Arguments: f: A pointer to a HDF5 group. layers: a list of target layers. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weights. Raises: ValueError: in case of mismatch between provided layers and weights file and skip_match=False." 6472,save_attributes_to_hdf5_group,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,793,function,"Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Arguments: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved." 6473,load_attributes_from_hdf5_group,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,835,function,"Loads attributes of the specified name from the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Arguments: group: A pointer to a HDF5 group. name: A name of the attributes to load. Returns: data: Attributes data." 6474,_legacy_weights,tensorflow/tensorflow/python/keras/saving/hdf5_format.py,861,function,"DO NOT USE. For legacy reason, the layer.weights was in the order of [self.trainable_weights + self.non_trainable_weights], and this order was used for preserving the weights in h5 format. The new order of layer.weights are the same as layer.get_weights() which is more intuitive for user. To keep supporting the existing saved h5 file, this method should be used to save/load weights. In future version, we will delete this method and introduce a breaking change for h5 and stay with the new order for weights. Args: layer: a `tf.keras.Model` or `tf.keras.layers.Layer` instance. Returns: A list of variables with the order of trainable_weights, followed by non_trainable_weights." 6475,TestWeightSavingAndLoading,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,58,class, 6476,TestWholeModelSaving,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,374,class, 6477,_make_graph_network,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,866,function, 6478,_make_sequential,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,873,function, 6479,_make_sequential_built,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,881,function, 6480,_make_sequential_graph_network,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,887,function, 6481,_make_sequential_input_shape,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,895,function, 6482,_make_subclassed,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,902,class, 6483,_make_subclassed_built,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,922,class, 6484,TestWholeModelSavingWithNesting,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,930,class,Tests saving a whole model that contains other models. 6485,SubclassedModel,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,973,class, 6486,TestWeightSavingAndLoadingTFFormat,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,984,class, 6487,DummySubclassModel,tensorflow/tensorflow/python/keras/saving/hdf5_format_test.py,1274,class, 6488,MyMeanAbsoluteError,tensorflow/tensorflow/python/keras/saving/losses_serialization_test.py,45,class, 6489,my_mae,tensorflow/tensorflow/python/keras/saving/losses_serialization_test.py,55,function, 6490,_get_multi_io_model,tensorflow/tensorflow/python/keras/saving/losses_serialization_test.py,59,function, 6491,LossesSerialization,tensorflow/tensorflow/python/keras/saving/losses_serialization_test.py,117,class, 6492,MyMeanAbsoluteError,tensorflow/tensorflow/python/keras/saving/metrics_serialization_test.py,45,class, 6493,_my_mae,tensorflow/tensorflow/python/keras/saving/metrics_serialization_test.py,52,function, 6494,_get_multi_io_model,tensorflow/tensorflow/python/keras/saving/metrics_serialization_test.py,56,function, 6495,MetricsSerialization,tensorflow/tensorflow/python/keras/saving/metrics_serialization_test.py,150,class, 6496,model_from_config,tensorflow/tensorflow/python/keras/saving/model_config.py,35,function,"Instantiates a Keras model from its config. Arguments: config: Configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). Raises: TypeError: if `config` is not a dictionary." 6497,model_from_yaml,tensorflow/tensorflow/python/keras/saving/model_config.py,59,function,"Parses a yaml model configuration file and returns a model instance. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> try: ... import yaml ... config = model.to_yaml() ... loaded_model = tf.keras.models.model_from_yaml(config) ... except ImportError: ... pass Arguments: yaml_string: YAML string or open file encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). Raises: ImportError: if yaml module is not found." 6498,model_from_json,tensorflow/tensorflow/python/keras/saving/model_config.py,100,function,"Parses a JSON model configuration string and returns a model instance. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> config = model.to_json() >>> loaded_model = tf.keras.models.model_from_json(config) Arguments: json_string: JSON string encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled)." 6499,save_model,tensorflow/tensorflow/python/keras/saving/save.py,49,function,"Saves a model as a TensorFlow SavedModel or HDF5 file. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> model.save('/tmp/model') >>> loaded_model = tf.keras.models.load_model('/tmp/model') >>> x = tf.random.uniform((10, 3)) >>> assert np.allclose(model.predict(x), loaded_model.predict(x)) The saved model contains: - the model's configuration (topology) - the model's weights - the model's optimizer's state (if any) Thus the saved model can be reinstantiated in the exact same state, without any of the code used for model definition or training. Note that the model weights may have different scoped names after being loaded. Scoped names include the model/layer names, such as `""dense_1/kernel:0""`. It is recommended that you use the layer properties to access specific variables, e.g. `model.get_layer(""dense_1"").kernel`. _SavedModel serialization_ The SavedModel serialization path uses `tf.saved_model.save` to save the model and all trackable objects attached to the model (e.g. layers and variables). `@tf.function`-decorated methods are also saved. Additional trackable objects and functions are added to the SavedModel to allow the model to be loaded back as a Keras Model object. Arguments: model: Keras model instance to be saved. filepath: One of the following: - String or `pathlib.Path` object, path where to save the model - `h5py.File` object where to save the model overwrite: Whether we should overwrite any existing model at the target location, or instead ask the user with a manual prompt. include_optimizer: If True, save optimizer's state together. save_format: Either 'tf' or 'h5', indicating whether to save the model to Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5' in TF 1.X. signatures: Signatures to save with the SavedModel. Applicable to the 'tf' format only. Please see the `signatures` argument in `tf.saved_model.save` for details. options: Optional `tf.saved_model.SaveOptions` object that specifies options for saving to SavedModel. Raises: ImportError: If save format is hdf5, and h5py is not available." 6500,load_model,tensorflow/tensorflow/python/keras/saving/save.py,139,function,"Loads a model saved via `model.save()`. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> model.save('/tmp/model') >>> loaded_model = tf.keras.models.load_model('/tmp/model') >>> x = tf.random.uniform((10, 3)) >>> assert np.allclose(model.predict(x), loaded_model.predict(x)) Note that the model weights may have different scoped names after being loaded. Scoped names include the model/layer names, such as `""dense_1/kernel:0""`. It is recommended that you use the layer properties to access specific variables, e.g. `model.get_layer(""dense_1"").kernel`. Arguments: filepath: One of the following: - String or `pathlib.Path` object, path to the saved model - `h5py.File` object from which to load the model custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. compile: Boolean, whether to compile the model after loading. options: Optional `tf.saved_model.LoadOptions` object that specifies options for loading from SavedModel. Returns: A Keras model instance. If the original model was compiled, and saved with the optimizer, then the returned model will be compiled. Otherwise, the model will be left uncompiled. In the case that an uncompiled model is returned, a warning is displayed if the `compile` argument is set to `True`. Raises: ImportError: if loading from an hdf5 file and h5py is not available. IOError: In case of an invalid savefile." 6501,TestSaveModel,tensorflow/tensorflow/python/keras/saving/save_test.py,53,class, 6502,export_saved_model,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,69,function,"Exports a `tf.keras.Model` as a Tensorflow SavedModel. Note that at this time, subclassed models can only be saved using `serving_only=True`. The exported `SavedModel` is a standalone serialization of Tensorflow objects, and is supported by TF language APIs and the Tensorflow Serving system. To load the model, use the function `tf.keras.experimental.load_from_saved_model`. The `SavedModel` contains: 1. a checkpoint containing the model weights. 2. a `SavedModel` proto containing the Tensorflow backend graph. Separate graphs are saved for prediction (serving), train, and evaluation. If the model has not been compiled, then only the graph computing predictions will be exported. 3. the model's json config. If the model is subclassed, this will only be included if the model's `get_config()` method is overwritten. Example: ```python import tensorflow as tf # Create a tf.keras model. model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(1, input_shape=[10])) model.summary() # Save the tf.keras model in the SavedModel format. path = '/tmp/simple_keras_model' tf.keras.experimental.export_saved_model(model, path) # Load the saved keras model back. new_model = tf.keras.experimental.load_from_saved_model(path) new_model.summary() ``` Args: model: A `tf.keras.Model` to be saved. If the model is subclassed, the flag `serving_only` must be set to True. saved_model_path: a string specifying the path to the SavedModel directory. custom_objects: Optional dictionary mapping string names to custom classes or functions (e.g. custom loss functions). as_text: bool, `False` by default. Whether to write the `SavedModel` proto in text format. Currently unavailable in serving-only mode. input_signature: A possibly nested sequence of `tf.TensorSpec` objects, used to specify the expected model inputs. See `tf.function` for more details. serving_only: bool, `False` by default. When this is true, only the prediction graph is saved. Raises: NotImplementedError: If the model is a subclassed model, and serving_only is False. ValueError: If the input signature cannot be inferred from the model. AssertionError: If the SavedModel directory already exists and isn't empty." 6503,_export_model_json,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,149,function,Saves model configuration as a json string under assets folder. 6504,_export_model_variables,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,158,function,Saves model weights in checkpoint format under variables folder. 6505,_save_v1_format,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,166,function,Exports model to v1 SavedModel format. 6506,_get_var_list,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,224,function,Returns list of all checkpointed saveable objects in the model. 6507,create_placeholder,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,230,function, 6508,_export_mode,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,234,function,"Exports a model, and optionally saves new vars from the clone model. Args: mode: A `tf.estimator.ModeKeys` string. has_saved_vars: A `boolean` indicating whether the SavedModel has already exported variables. builder: A `SavedModelBuilder` object. model: A `tf.keras.Model` object. custom_objects: A dictionary mapping string names to custom classes or functions. checkpoint_path: String path to checkpoint. input_signature: Nested TensorSpec containing the expected inputs. Can be `None`, in which case the signature will be inferred from the model. Raises: ValueError: If the train/eval mode is being exported, but the model does not have an optimizer." 6509,_create_signature_def_map,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,329,function,Creates a SignatureDef map from a Keras model. 6510,_assert_same_non_optimizer_objects,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,367,function,Asserts model and clone contain the same trackable objects. 6511,load_from_saved_model,tensorflow/tensorflow/python/keras/saving/saved_model_experimental.py,379,function,"Loads a keras Model from a SavedModel created by `export_saved_model()`. This function reinstantiates model state by: 1) loading model topology from json (this will eventually come from metagraph). 2) loading model weights from checkpoint. Example: ```python import tensorflow as tf # Create a tf.keras model. model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(1, input_shape=[10])) model.summary() # Save the tf.keras model in the SavedModel format. path = '/tmp/simple_keras_model' tf.keras.experimental.export_saved_model(model, path) # Load the saved keras model back. new_model = tf.keras.experimental.load_from_saved_model(path) new_model.summary() ``` Args: saved_model_path: a string specifying the path to an existing SavedModel. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: a keras.Model instance." 6512,TestModelSavingandLoading,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,47,class, 6513,LayerWithLearningPhase,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,203,class, 6514,functional_model,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,222,function, 6515,sequential_model,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,231,function, 6516,sequential_model_without_input_shape,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,240,function, 6517,Subclassed,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,249,class, 6518,subclassed_model,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,262,function, 6519,load_model,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,266,function, 6520,TestModelSavedModelExport,tensorflow/tensorflow/python/keras/saving/saved_model_experimental_test.py,280,class, 6521,extract_model_metrics,tensorflow/tensorflow/python/keras/saving/saving_utils.py,37,function,"Convert metrics from a Keras model `compile` API to dictionary. This is used for converting Keras models to Estimators and SavedModels. Args: model: A `tf.keras.Model` object. Returns: Dictionary mapping metric names to metric instances. May return `None` if the model does not contain any metrics." 6522,model_input_signature,tensorflow/tensorflow/python/keras/saving/saving_utils.py,57,function,"Inspect model to get its input signature. The model's input signature is a list with a single (possibly-nested) object. This is due to the Keras-enforced restriction that tensor inputs must be passed in as the first argument. For example, a model with input {'feature1': , 'feature2': } will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}] Args: model: Keras Model object. keep_original_batch_size: A boolean indicating whether we want to keep using the original batch size or set it to None. Default is `False`, which means that the batch dim of the returned input signature will always be set to `None`. Returns: A list containing either a single TensorSpec or an object with nested TensorSpecs. This list does not contain the `training` argument." 6523,raise_model_input_error,tensorflow/tensorflow/python/keras/saving/saving_utils.py,92,function, 6524,trace_model_call,tensorflow/tensorflow/python/keras/saving/saving_utils.py,100,function,"Trace the model call to create a tf.function for exporting a Keras model. Args: model: A Keras model. input_signature: optional, a list of tf.TensorSpec objects specifying the inputs to the model. Returns: A tf.function wrapping the model's call function with input signatures set. Raises: ValueError: if input signature cannot be inferred from the model." 6525,model_metadata,tensorflow/tensorflow/python/keras/saving/saving_utils.py,147,function,Returns a dictionary containing the model metadata. 6526,should_overwrite,tensorflow/tensorflow/python/keras/saving/saving_utils.py,196,function,Returns whether the filepath should be overwritten. 6527,compile_args_from_training_config,tensorflow/tensorflow/python/keras/saving/saving_utils.py,204,function,Return model.compile arguments from training config. 6528,_deserialize_nested_config,tensorflow/tensorflow/python/keras/saving/saving_utils.py,245,function,Deserializes arbitrary Keras `config` using `deserialize_fn`. 6529,_serialize_nested_config,tensorflow/tensorflow/python/keras/saving/saving_utils.py,270,function,Serialized a nested structure of Keras objects. 6530,_deserialize_metric,tensorflow/tensorflow/python/keras/saving/saving_utils.py,281,function,"Deserialize metrics, leaving special strings untouched." 6531,_enforce_names_consistency,tensorflow/tensorflow/python/keras/saving/saving_utils.py,291,function,Enforces that either all specs have names or none do. 6532,try_build_compiled_arguments,tensorflow/tensorflow/python/keras/saving/saving_utils.py,313,function, 6533,TraceModelCallTest,tensorflow/tensorflow/python/keras/saving/saving_utils_test.py,54,class, 6534,_import_and_infer,tensorflow/tensorflow/python/keras/saving/saving_utils_test.py,249,function,Import a SavedModel into a TF 1.x-style graph and run `signature_key`. 6535,ModelSaveTest,tensorflow/tensorflow/python/keras/saving/saving_utils_test.py,272,class, 6536,ExtractModelMetricsTest,tensorflow/tensorflow/python/keras/saving/saving_utils_test.py,297,class, 6537,SavedModelSaver,tensorflow/tensorflow/python/keras/saving/saved_model/base_serialization.py,29,class,"Saver defining the methods and properties used to serialize Keras objects. " 6538,Encoder,tensorflow/tensorflow/python/keras/saving/saved_model/json_utils.py,43,class,JSON encoder and decoder that handles TensorShapes and tuples. 6539,_encode_tuple,tensorflow/tensorflow/python/keras/saving/saved_model/json_utils.py,56,function, 6540,decode,tensorflow/tensorflow/python/keras/saving/saved_model/json_utils.py,68,function, 6541,_decode_helper,tensorflow/tensorflow/python/keras/saving/saved_model/json_utils.py,72,function,A decoding helper that is TF-object aware. 6542,get_json_type,tensorflow/tensorflow/python/keras/saving/saved_model/json_utils.py,84,function,"Serializes any object to a JSON-serializable structure. Arguments: obj: the object to serialize Returns: JSON-serializable structure representing `obj`. Raises: TypeError: if `obj` cannot be serialized." 6543,JsonUtilsTest,tensorflow/tensorflow/python/keras/saving/saved_model/json_utils_test.py,27,class, 6544,LayerSavedModelSaver,tensorflow/tensorflow/python/keras/saving/saved_model/layer_serialization.py,31,class,Implements Layer SavedModel serialization. 6545,get_config,tensorflow/tensorflow/python/keras/saving/saved_model/layer_serialization.py,113,function, 6546,InputLayerSavedModelSaver,tensorflow/tensorflow/python/keras/saving/saved_model/layer_serialization.py,125,class,InputLayer serialization. 6547,RNNSavedModelSaver,tensorflow/tensorflow/python/keras/saving/saved_model/layer_serialization.py,151,class,RNN layer serialization. 6548,load,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,93,function,"Loads Keras objects from a SavedModel. Any Keras layer or model saved to the SavedModel will be loaded back as Keras objects. Other objects are loaded as regular trackable objects (same as `tf.saved_model.load`). Currently, Keras saving/loading only retains the Keras object's weights, losses, and call function. The loaded model can be re-compiled, but the original optimizer, compiled loss functions, and metrics are not retained. This is temporary, and `model.save` will soon be able to serialize compiled models. Args: path: Path to SavedModel. compile: If true, compile the model after loading it. options: Optional `tf.saved_model.LoadOptions` object that specifies options for loading from SavedModel. Returns: Object loaded from SavedModel." 6549,_is_graph_network,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,146,function,Determines whether the layer is a graph network. 6550,KerasObjectLoader,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,157,class,"Loader that recreates Keras objects (e.g. layers, models). Layers and models are revived from either the config or SavedModel following these rules: 1. If object is a graph network (i.e. Sequential or Functional) then it will be initialized using the structure from the config only after the children layers have been created. Graph networks must be initialized with inputs and outputs, so all child layers must be created beforehand. 2. If object's config exists and the class can be found, then revive from config. 3. Object may have already been created if its parent was revived from config. In this case, do nothing. 4. If nothing of the above applies, compose the various artifacts from the SavedModel to create a subclassed layer or model. At this time, custom metrics are not supported." 6551,_finalize_saved_model_layers,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,682,function,Runs the final steps of loading Keras Layers from SavedModel. 6552,_unable_to_call_layer_due_to_serialization_issue,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,721,function,"Replaces the `layer.call` if the layer was not fully serialized. Keras Model/Layer serialization is relatively relaxed because SavedModels are not always loaded back as keras models. Thus, when there is an issue tracing a non-signature function, a warning is logged instead of raising an error. This results in a SavedModel where the model's call function is saved, but the internal layer call functions are not. When deserialized with `tf.keras.models.load_model`, the internal layers which do not have serialized call functions should raise an error when called. Args: layer: Layer without the serialized call function. Raises: ValueError" 6553,_finalize_config_layers,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,756,function,Runs the final steps of loading Keras Layers from config. 6554,_finalize_metric,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,786,function, 6555,_restore_layer_unconditional_losses,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,792,function,Restore unconditional losses from SavedModel. 6556,_restore_layer_activation_loss,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,805,function,Restore actiation loss from SavedModel. 6557,revive_custom_object,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,820,function,Revives object from SavedModel. 6558,_restore_layer_metrics,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,849,function, 6559,RevivedLayer,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,860,class,Keras layer loaded from a SavedModel. 6560,_revive_setter,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,908,function,Setter function that saves some attributes to separate dictionary. 6561,RevivedInputLayer,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,930,class,InputLayer loaded from a SavedModel. 6562,recursively_deserialize_keras_object,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,952,function,Deserialize Keras object from a nested structure. 6563,infer_inputs_from_restored_call_function,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,969,function,"Returns TensorSpec of inputs from a restored call function. Args: fn: Restored layer call function. It is assumed that the inputs are entirely in the first argument. Returns: TensorSpec of call function inputs." 6564,RevivedNetwork,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,989,class,Keras network of layers loaded from a SavedModel. 6565,_set_network_attributes_from_metadata,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,1015,function,Sets attributes recorded in the metadata. 6566,_maybe_add_serialized_attributes,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,1026,function, 6567,_get_keras_attr,tensorflow/tensorflow/python/keras/saving/saved_model/load.py,1035,function, 6568,MetricSavedModelSaver,tensorflow/tensorflow/python/keras/saving/saved_model/metric_serialization.py,25,class,Metric serialization. 6569,ModelSavedModelSaver,tensorflow/tensorflow/python/keras/saving/saved_model/model_serialization.py,27,class,Model SavedModel serialization. 6570,SequentialSavedModelSaver,tensorflow/tensorflow/python/keras/saving/saved_model/model_serialization.py,62,class, 6571,NetworkSavedModelSaver,tensorflow/tensorflow/python/keras/saving/saved_model/network_serialization.py,25,class,Network serialization. 6572,SubclassedModelNoConfig,tensorflow/tensorflow/python/keras/saving/saved_model/revive_test.py,47,class, 6573,SubclassedModelWithConfig,tensorflow/tensorflow/python/keras/saving/saved_model/revive_test.py,75,class, 6574,CustomLayerNoConfig,tensorflow/tensorflow/python/keras/saving/saved_model/revive_test.py,86,class, 6575,CustomLayerWithConfig,tensorflow/tensorflow/python/keras/saving/saved_model/revive_test.py,110,class, 6576,TestModelRevive,tensorflow/tensorflow/python/keras/saving/saved_model/revive_test.py,118,class, 6577,save,tensorflow/tensorflow/python/keras/saving/saved_model/save.py,40,function,"Saves a model as a SavedModel to the filepath. Args: model: Keras model instance to be saved. filepath: String path to save the model. overwrite: whether to overwrite the existing filepath. include_optimizer: If True, save the model's optimizer state. signatures: Signatures to save with the SavedModel. Applicable to the 'tf' format only. Please see the `signatures` argument in `tf.saved_model.save` for details. options: Optional `tf.saved_model.SaveOptions` object that specifies options for saving to SavedModel. Raises: ValueError: if the model's inputs have not been defined." 6578,should_skip_serialization,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,72,function,Skip serializing extra objects and functions if layer inputs aren't set. 6579,wrap_layer_objects,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,83,function,"Returns extra trackable objects to attach to the serialized layer. Args: layer: Keras Layer object. serialization_cache: Dictionary shared between all objects during serialization. Returns: A dictionary containing all checkpointable objects from a SerializedAttributes object. See LayerAttributes and ModelAttributes for entire list of objects" 6580,wrap_layer_functions,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,134,function,"Returns dict of wrapped layer call function and losses in tf.functions. Args: layer: Keras Layer object. serialization_cache: Dictionary shared between all objects during serialization. Returns: A dictionary containing all keras tf.functions to serialize. See LayerAttributes and ModelAttributes for the list of all attributes." 6581,default_save_signature,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,202,function, 6582,_replace_child_layer_functions,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,210,function,"Replaces functions in the children layers with wrapped tf.functions. This step allows functions from parent layers to reference the wrapped functions from their children layers instead of retracing the ops. This function also resets all losses stored in the layer. These are stored in the returned dictionary. Use `_restore_child_layer_functions` to restore the original attributes. Args: layer: Keras Layer object. serialization_cache: Dictionary shared between all objects during serialization. Returns: Dictionary mapping layer objects -> original functions and losses: { Child layer 1: { 'losses': Original losses, 'call': Original call function '_activity_regularizer': Original activity regularizer}, Child layer 2: ... }" 6583,_restore_child_layer_functions,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,296,function,Restores attributes replaced with `_replace_child_layer_functions`. 6584,_reset_layer_losses,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,309,function,"Resets losses of layer and its sublayers, and returns original losses." 6585,_restore_layer_losses,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,321,function, 6586,LayerCallCollection,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,329,class,"Groups wrapped layer call functions. This is used to ensure that all layer call functions are traced with the same inputs- - call - call_and_return_conditional_losses - call_and_return_all_conditional_losses" 6587,layer_call_wrapper,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,509,function,"Ensures layer losses are kept the same, and runs method in call context." 6588,LayerCall,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,532,class,Function that triggers traces of other functions in the same collection. 6589,_wrap_call_and_conditional_losses,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,552,function,"Wraps call function that returns a tuple of (outputs, losses). The losses returned are conditional on the inputs passed to the call function. Unconditional losses (e.g. weight regularizeration) are wrapped separately. Args: layer: a Keras layer object Returns: python call function that returns outputs and conditional losses -- excludes activity regularizer" 6590,_extract_outputs_from_fn,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,581,function,Returns a function that returns only call function outputs. 6591,_append_activity_regularizer_loss,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,590,function,Appends activity regularizer loss to losses returned by the wrapped fn. 6592,_create_call_fn_decorator,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,600,function, 6593,_wrap_unconditional_loss,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,611,function,"Wraps callable/unconditional loss, returning a serializable function." 6594,_wrap_activity_regularizer,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,622,function,Wraps the activity regularizer. 6595,_get_layer_call_method,tensorflow/tensorflow/python/keras/saving/saved_model/save_impl.py,634,function, 6596,LayerWithLearningPhase,tensorflow/tensorflow/python/keras/saving/saved_model/saved_model_test.py,69,class, 6597,LayerWithLoss,tensorflow/tensorflow/python/keras/saving/saved_model/saved_model_test.py,88,class, 6598,LayerWithUpdate,tensorflow/tensorflow/python/keras/saving/saved_model/saved_model_test.py,95,class, 6599,GlobalLayerThatShouldFailIfNotAdded,tensorflow/tensorflow/python/keras/saving/saved_model/saved_model_test.py,112,class, 6600,TestModelSavingAndLoadingV2,tensorflow/tensorflow/python/keras/saving/saved_model/saved_model_test.py,117,class, 6601,TestLayerCallTracing,tensorflow/tensorflow/python/keras/saving/saved_model/saved_model_test.py,862,class, 6602,MetricTest,tensorflow/tensorflow/python/keras/saving/saved_model/saved_model_test.py,949,class, 6603,SerializedAttributes,tensorflow/tensorflow/python/keras/saving/saved_model/serialized_attributes.py,45,class,"Class that tracks and validates all serialization attributes. Keras models contain many Python-defined components. For example, the trainable_variable property lists the model's trainable variables by recursively retrieving the trainable variables from each of the child layers. Another example is model.call, a python function that calls child layers and adds ops to the backend graph. Only Tensorflow checkpointable objects and functions can be serialized to SavedModel. Serializing a Keras model as-is results in a checkpointable object that does not resemble a Keras model at all. Thus, extra checkpointable objects and functions must be created during serialization. **Defining new serialized attributes** Child classes should be defined using: SerializedAttributes.with_attributes( 'name', checkpointable_objects=[...], functions=[...], copy_from=[...]) This class is used to cache generated checkpointable objects and functions, ensuring that new objects and functions are generated a single time. **Usage during serialization** Each Layer/Model object should have a corresponding instance of SerializedAttributes. Create a new instance by calling `SerializedAttributes.new(obj)`. Objects and functions may be saved using `.set_and_validate_checkpointable_objects`/`.set_and_and_validate_functions`. The properties `.checkpointable_objects` and `.functions` returns the cached values. **Adding/changing attributes to save to SavedModel** 1. Change the call to `SerializedAttributes.with_attributes` in the correct class: - CommonEndpoints: Base attributes to be added during serialization. If these attributes are present in a Trackable object, it can be deserialized to a Keras Model. - LayerAttributes: Attributes to serialize for Layer objects. - ModelAttributes: Attributes to serialize for Model objects. 2. Update class docstring 3. Update arguments to any calls to `set_and_validate_*`. For example, if `call_raw_tensors` is added to the ModelAttributes function list, then a `call_raw_tensors` function should be passed to `set_and_validate_functions`. **Common endpoints vs other attributes** Only common endpoints are attached directly to the root object. Keras-specific attributes are saved to a separate trackable object with the name ""keras_api"". The number of objects attached to the root is limited because any naming conflicts will cause user code to break. Another reason is that this will only affect users who call `tf.saved_model.load` instead of `tf.keras.models.load_model`. These are advanced users who are likely to have defined their own tf.functions and trackable objects. The added Keras-specific attributes are kept out of the way in the ""keras_api"" namespace. Properties defined in this class may be used to filter out keras-specific attributes: - `functions_to_serialize`: Returns dict of functions to attach to the root object. - `checkpointable_objects_to_serialize`: Returns dict of objects to attach to the root object (including separate trackable object containing keras-specific attributes) All changes to the serialized attributes must be backwards-compatible, so attributes should not be removed or modified without sufficient justification." 6604,CommonEndpoints,tensorflow/tensorflow/python/keras/saving/saved_model/serialized_attributes.py,221,class,"Common endpoints shared by all models loadable by Keras. List of all attributes: variables: List of all variables in the model and its sublayers. trainable_variables: List of all trainable variables in the model and its sublayers. regularization_losses: List of all unconditional losses (losses not dependent on the inputs) in the model and its sublayers. __call__: Function that takes inputs and returns the outputs of the model call function. call_and_return_all_conditional_losses: Function that returns a tuple of (call function outputs, list of all losses that depend on the inputs). _default_save_signature: Traced model call function. This is only included if the top level exported object is a Keras model." 6605,LayerAttributes,tensorflow/tensorflow/python/keras/saving/saved_model/serialized_attributes.py,244,class,"Layer checkpointable objects + functions that are saved to the SavedModel. List of all attributes: All attributes from CommonEndpoints non_trainable_variables: List of non-trainable variables in the layer and its sublayers. layers: List of all sublayers. metrics: List of all metrics in the layer and its sublayers. call_and_return_conditional_losses: Function that takes inputs and returns a tuple of (outputs of the call function, list of input-dependent losses). The list of losses excludes the activity regularizer function, which is separate to allow the deserialized Layer object to define a different activity regularizer. activity_regularizer_fn: Callable that returns the activity regularizer loss layer_regularization_losses: List of losses owned only by this layer. layer_metrics: List of metrics owned by this layer." 6606,ModelAttributes,tensorflow/tensorflow/python/keras/saving/saved_model/serialized_attributes.py,270,class,"Model checkpointable objects + functions that are saved to the SavedModel. List of all attributes: All attributes from LayerAttributes (including CommonEndpoints)" 6607,MetricAttributes,tensorflow/tensorflow/python/keras/saving/saved_model/serialized_attributes.py,282,class,"Attributes that are added to Metric objects when saved to SavedModel. List of all attributes: variables: list of all variables" 6608,RNNAttributes,tensorflow/tensorflow/python/keras/saving/saved_model/serialized_attributes.py,296,class,"RNN checkpointable objects + functions that are saved to the SavedModel. List of all attributes: All attributes from LayerAttributes (including CommonEndpoints) states: List of state variables" 6609,use_wrapped_call,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,40,function,"Creates fn that adds the losses returned by call_fn & returns the outputs. Args: layer: A Keras layer object call_fn: tf.function that takes layer inputs (and possibly a training arg), and returns a tuple of (outputs, list of losses). default_training_value: Default value of the training kwarg. If `None`, the default is `K.learning_phase()`. return_method: Whether to return a method bound to the layer. Returns: function that calls call_fn and returns the outputs. Losses returned by call_fn are added to the layer losses." 6610,layer_uses_training_bool,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,99,function,Returns whether this layer or any of its children uses the training arg. 6611,list_all_layers,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,116,function, 6612,list_all_layers_and_sublayers,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,124,function, 6613,maybe_add_training_arg,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,131,function,"Decorate call and optionally adds training argument. If a layer expects a training argument, this function ensures that 'training' is present in the layer args or kwonly args, with the default training value. Args: original_call: Original call function. wrapped_call: Wrapped call function. expects_training_arg: Whether to include 'training' argument. default_training_value: Default value of the training kwarg to include in the arg spec. If `None`, the default is `K.learning_phase()`. Returns: Tuple of ( function that calls `wrapped_call` and sets the training arg, Argspec of returned function or `None` if the argspec is unchanged)" 6614,get_training_arg_index,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,202,function,"Returns the index of 'training' in the layer call function arguments. Args: call_fn: Call function. Returns: - n: index of 'training' in the call function arguments. - -1: if 'training' is not found in the arguments, but layer.call accepts variable keyword arguments - None: if layer doesn't expect a training argument." 6615,set_training_arg,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,223,function, 6616,get_training_arg,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,233,function, 6617,remove_training_arg,tensorflow/tensorflow/python/keras/saving/saved_model/utils.py,242,function, 6618,get_ctl_train_step,tensorflow/tensorflow/python/keras/tests/add_loss_correctness_test.py,44,function, 6619,TestAddLossCorrectness,tensorflow/tensorflow/python/keras/tests/add_loss_correctness_test.py,65,class, 6620,get_tpu_cluster_resolver,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,57,function, 6621,get_tpu_strategy,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,66,function, 6622,LayerForScalarSummary,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,73,class,A pass-through layer that only records scalar values to summary. 6623,LayerForImageSummary,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,81,class,A pass-through layer that only records image values to summary. 6624,LayerForHistogramSummary,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,89,class,A pass-through layer that records histogram values to summary. 6625,CustomModel,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,97,class,Custom model with summary ops in model call definition. 6626,get_image_dataset,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,124,function, 6627,mnist_model,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,133,function,Creates a MNIST model. 6628,AutoOutsideCompilationWithKerasTest,tensorflow/tensorflow/python/keras/tests/automatic_outside_compilation_test.py,156,class, 6629,VariablesToConstantsTest,tensorflow/tensorflow/python/keras/tests/convert_to_constants_test.py,41,class, 6630,LayerWithLosses,tensorflow/tensorflow/python/keras/tests/custom_training_loop_test.py,35,class, 6631,LayerWithMetrics,tensorflow/tensorflow/python/keras/tests/custom_training_loop_test.py,49,class, 6632,LayerWithTrainingArg,tensorflow/tensorflow/python/keras/tests/custom_training_loop_test.py,61,class, 6633,add_loss_step,tensorflow/tensorflow/python/keras/tests/custom_training_loop_test.py,71,function, 6634,batch_norm_step,tensorflow/tensorflow/python/keras/tests/custom_training_loop_test.py,92,function, 6635,add_metric_step,tensorflow/tensorflow/python/keras/tests/custom_training_loop_test.py,115,function, 6636,CustomTrainingLoopTest,tensorflow/tensorflow/python/keras/tests/custom_training_loop_test.py,145,class, 6637,TestGetConfigBackwardsCompatible,tensorflow/tensorflow/python/keras/tests/get_config_test.py,29,class, 6638,ConvertVariablesToConstantsTest,tensorflow/tensorflow/python/keras/tests/graph_util_test.py,35,class, 6639,KerasIntegrationTest,tensorflow/tensorflow/python/keras/tests/integration_test.py,38,class, 6640,VectorClassificationIntegrationTest,tensorflow/tensorflow/python/keras/tests/integration_test.py,58,class, 6641,SequentialIntegrationTest,tensorflow/tensorflow/python/keras/tests/integration_test.py,129,class, 6642,TimeseriesClassificationIntegrationTest,tensorflow/tensorflow/python/keras/tests/integration_test.py,181,class, 6643,ImageClassificationIntegrationTest,tensorflow/tensorflow/python/keras/tests/integration_test.py,247,class, 6644,ActivationV2IntegrationTest,tensorflow/tensorflow/python/keras/tests/integration_test.py,284,class,"Tests activation function V2 in model exporting and loading. This test is to verify in TF 2.x, when 'tf.nn.softmax' is used as an activation function, its model exporting and loading work as expected. Check b/123041942 for details." 6645,MemoryCheckerTest,tensorflow/tensorflow/python/keras/tests/memory_checker_test.py,28,class, 6646,SingleLayerNet,tensorflow/tensorflow/python/keras/tests/memory_test.py,35,class,Simple keras model used to ensure that there are no leaks. 6647,MemoryTest,tensorflow/tensorflow/python/keras/tests/memory_test.py,46,class, 6648,basic_sequential,tensorflow/tensorflow/python/keras/tests/model_architectures.py,29,function,Basic sequential model. 6649,basic_sequential_deferred,tensorflow/tensorflow/python/keras/tests/model_architectures.py,38,function,Sequential model with deferred input shape. 6650,stacked_rnn,tensorflow/tensorflow/python/keras/tests/model_architectures.py,47,function,Stacked RNN model. 6651,lstm,tensorflow/tensorflow/python/keras/tests/model_architectures.py,57,function,LSTM model. 6652,multi_input_multi_output,tensorflow/tensorflow/python/keras/tests/model_architectures.py,68,function,Multi-input Multi-ouput model. 6653,nested_sequential_in_functional,tensorflow/tensorflow/python/keras/tests/model_architectures.py,85,function,A sequential model nested in a functional model. 6654,seq_to_seq,tensorflow/tensorflow/python/keras/tests/model_architectures.py,99,function,Sequence to sequence model. 6655,shared_layer_functional,tensorflow/tensorflow/python/keras/tests/model_architectures.py,121,function,Shared layer in a functional model. 6656,shared_sequential,tensorflow/tensorflow/python/keras/tests/model_architectures.py,140,function,Shared sequential model in a functional model. 6657,MySubclassModel,tensorflow/tensorflow/python/keras/tests/model_architectures.py,156,class,A subclass model. 6658,nested_subclassed_model,tensorflow/tensorflow/python/keras/tests/model_architectures.py,181,function,A subclass model nested in another subclass model. 6659,nested_subclassed_in_functional_model,tensorflow/tensorflow/python/keras/tests/model_architectures.py,203,function,A subclass model nested in a functional model. 6660,nested_functional_in_subclassed_model,tensorflow/tensorflow/python/keras/tests/model_architectures.py,214,function,A functional model nested in a subclass model. 6661,shared_layer_subclassed_model,tensorflow/tensorflow/python/keras/tests/model_architectures.py,240,function,Shared layer in a subclass model. 6662,functional_with_keyword_args,tensorflow/tensorflow/python/keras/tests/model_architectures.py,261,function,A functional model with keyword args. 6663,get_models,tensorflow/tensorflow/python/keras/tests/model_architectures.py,292,function,Get all models excluding the specificed ones. 6664,TestModelArchitectures,tensorflow/tensorflow/python/keras/tests/model_architectures_test.py,35,class, 6665,ModelSubclassCompiledTest,tensorflow/tensorflow/python/keras/tests/model_subclassing_compiled_test.py,39,class, 6666,ModelSubclassingTest,tensorflow/tensorflow/python/keras/tests/model_subclassing_test.py,51,class, 6667,GraphSpecificModelSubclassingTests,tensorflow/tensorflow/python/keras/tests/model_subclassing_test.py,480,class, 6668,CustomCallSignatureTests,tensorflow/tensorflow/python/keras/tests/model_subclassing_test.py,608,class, 6669,SimpleConvTestModel,tensorflow/tensorflow/python/keras/tests/model_subclassing_test_util.py,26,class, 6670,get_multi_io_subclass_model,tensorflow/tensorflow/python/keras/tests/model_subclassing_test_util.py,42,function,Creates MultiIOModel for the tests of subclass model. 6671,NestedTestModel1,tensorflow/tensorflow/python/keras/tests/model_subclassing_test_util.py,61,class,"A model subclass nested inside a model subclass. " 6672,NestedTestModel2,tensorflow/tensorflow/python/keras/tests/model_subclassing_test_util.py,81,class,"A model subclass with a functional-API graph network inside. " 6673,get_nested_model_3,tensorflow/tensorflow/python/keras/tests/model_subclassing_test_util.py,109,function, 6674,CustomCallModel,tensorflow/tensorflow/python/keras/tests/model_subclassing_test_util.py,136,class, 6675,TrainingNoDefaultModel,tensorflow/tensorflow/python/keras/tests/model_subclassing_test_util.py,151,class, 6676,TrainingMaskingModel,tensorflow/tensorflow/python/keras/tests/model_subclassing_test_util.py,161,class, 6677,_NumpyFunctionCallback,tensorflow/tensorflow/python/keras/tests/op_callbacks_test.py,50,class, 6678,OpCallbacksTest,tensorflow/tensorflow/python/keras/tests/op_callbacks_test.py,133,class, 6679,_ModelWithOptimizerUsingDefun,tensorflow/tensorflow/python/keras/tests/saved_model_test.py,38,class, 6680,MemoryTests,tensorflow/tensorflow/python/keras/tests/saved_model_test.py,60,class, 6681,NonLayerTrackable,tensorflow/tensorflow/python/keras/tests/saver_test.py,39,class, 6682,MyModel,tensorflow/tensorflow/python/keras/tests/saver_test.py,47,class,A concrete Model for testing. 6683,TrackableCompatibilityTests,tensorflow/tensorflow/python/keras/tests/saver_test.py,62,class, 6684,SerializationTests,tensorflow/tensorflow/python/keras/tests/serialization_util_test.py,35,class, 6685,SummaryOpsTest,tensorflow/tensorflow/python/keras/tests/summary_ops_test.py,36,class, 6686,events_from_file,tensorflow/tensorflow/python/keras/tests/summary_ops_test.py,110,function,"Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.Event protos in the event file." 6687,events_from_logdir,tensorflow/tensorflow/python/keras/tests/summary_ops_test.py,128,function,"Returns all events in the single eventfile in logdir. Args: logdir: The directory in which the single event file is sought. Returns: A list of all tf.Event protos from the single event file. Raises: AssertionError: If logdir does not contain exactly one file." 6688,Bias,tensorflow/tensorflow/python/keras/tests/temporal_sample_weights_correctness_test.py,32,class,Layer that add a bias to its inputs. 6689,get_multi_io_temporal_model,tensorflow/tensorflow/python/keras/tests/temporal_sample_weights_correctness_test.py,45,function, 6690,get_compiled_multi_io_model_temporal,tensorflow/tensorflow/python/keras/tests/temporal_sample_weights_correctness_test.py,58,function, 6691,run_with_different_sample_weight_mode_inputs,tensorflow/tensorflow/python/keras/tests/temporal_sample_weights_correctness_test.py,70,function,"Executes the given function with different sample weight mode inputs. Args: fn: Training or eval function to execute. partial_sw: Boolean flag to indicate whether temporal sample weight mode should be set partially just for one output." 6692,TestMetricsCorrectnessMultiIOTemporal,tensorflow/tensorflow/python/keras/tests/temporal_sample_weights_correctness_test.py,105,class, 6693,HasList,tensorflow/tensorflow/python/keras/tests/tracking_test.py,45,class, 6694,ListTests,tensorflow/tensorflow/python/keras/tests/tracking_test.py,76,class, 6695,ListWrapperTest,tensorflow/tensorflow/python/keras/tests/tracking_test.py,235,class, 6696,HasMapping,tensorflow/tensorflow/python/keras/tests/tracking_test.py,245,class, 6697,MappingTests,tensorflow/tensorflow/python/keras/tests/tracking_test.py,268,class, 6698,HasTuple,tensorflow/tensorflow/python/keras/tests/tracking_test.py,400,class, 6699,TupleTests,tensorflow/tensorflow/python/keras/tests/tracking_test.py,418,class, 6700,InterfaceTests,tensorflow/tensorflow/python/keras/tests/tracking_test.py,549,class, 6701,MyModel,tensorflow/tensorflow/python/keras/tests/tracking_util_test.py,56,class,A concrete Model for testing. 6702,NonLayerTrackable,tensorflow/tensorflow/python/keras/tests/tracking_util_test.py,71,class, 6703,InterfaceTests,tensorflow/tensorflow/python/keras/tests/tracking_util_test.py,79,class, 6704,CheckpointingTests,tensorflow/tensorflow/python/keras/tests/tracking_util_test.py,118,class, 6705,_ManualScope,tensorflow/tensorflow/python/keras/tests/tracking_util_test.py,712,class, 6706,TemplateTests,tensorflow/tensorflow/python/keras/tests/tracking_util_test.py,725,class, 6707,CheckpointCompatibilityTests,tensorflow/tensorflow/python/keras/tests/tracking_util_test.py,782,class, 6708,NonLayerTrackable,tensorflow/tensorflow/python/keras/tests/tracking_util_with_v1_optimizers_test.py,49,class, 6709,MyModel,tensorflow/tensorflow/python/keras/tests/tracking_util_with_v1_optimizers_test.py,58,class,A concrete Model for testing. 6710,CheckpointingTests,tensorflow/tensorflow/python/keras/tests/tracking_util_with_v1_optimizers_test.py,73,class, 6711,CheckpointCompatibilityTests,tensorflow/tensorflow/python/keras/tests/tracking_util_with_v1_optimizers_test.py,581,class, 6712,NonLayerTrackable,tensorflow/tensorflow/python/keras/tests/tracking_util_xla_test.py,32,class, 6713,Subclassed,tensorflow/tensorflow/python/keras/tests/tracking_util_xla_test.py,40,class,A concrete Model for testing. 6714,CheckpointingTests,tensorflow/tensorflow/python/keras/tests/tracking_util_xla_test.py,55,class, 6715,Layer,tensorflow/tensorflow/python/keras/type/types.py,34,class,"This is the class from which all layers inherit. A layer is a callable object that takes as input one or more tensors and that outputs one or more tensors. It involves *computation*, defined in the `call()` method, and a *state* (weight variables), defined either in the constructor `__init__()` or in the `build()` method. Users will just instantiate a layer and then treat it as a callable. We recommend that descendants of `Layer` implement the following methods: * `__init__()`: Defines custom layer attributes, and creates layer state variables that do not depend on input shapes, using `add_weight()`. * `build(self, input_shape)`: This method can be used to create weights that depend on the shape(s) of the input(s), using `add_weight()`. `__call__()` will automatically build the layer (if it has not been built yet) by calling `build()`. * `call(self, *args, **kwargs)`: Called in `__call__` after making sure `build()` has been called. `call()` performs the logic of applying the layer to the input tensors (which should be passed in as argument). Two reserved keyword arguments you can optionally use in `call()` are: - `training` (boolean, whether the call is in inference mode or training mode) - `mask` (boolean tensor encoding masked timesteps in the input, used in RNN layers) * `get_config(self)`: Returns a dictionary containing the configuration used to initialize this layer. If the keys differ from the arguments in `__init__`, then override `from_config(self)` as well. This method is used when saving the layer or a model that contains this layer. Examples: Here's a basic example: a layer with two variables, `w` and `b`, that returns `y = w . x + b`. It shows how to implement `build()` and `call()`. Variables set as attributes of a layer are tracked as weights of the layers (in `layer.weights`). ```python class SimpleDense(Layer): def __init__(self, units=32): super(SimpleDense, self).__init__() self.units = units def build(self, input_shape): # Create the state of the layer (weights) w_init = tf.random_normal_initializer() self.w = tf.Variable( initial_value=w_init(shape=(input_shape[-1], self.units), dtype='float32'), trainable=True) b_init = tf.zeros_initializer() self.b = tf.Variable( initial_value=b_init(shape=(self.units,), dtype='float32'), trainable=True) def call(self, inputs): # Defines the computation from inputs to outputs return tf.matmul(inputs, self.w) + self.b # Instantiates the layer. linear_layer = SimpleDense(4) # This will also call `build(input_shape)` and create the weights. y = linear_layer(tf.ones((2, 2))) assert len(linear_layer.weights) == 2 # These weights are trainable, so they're listed in `trainable_weights`: assert len(linear_layer.trainable_weights) == 2 ``` Note that the method `add_weight()` offers a shortcut to create weights: ```python class SimpleDense(Layer): def __init__(self, units=32): super(SimpleDense, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b ``` Besides trainable weights, updated via backpropagation during training, layers can also have non-trainable weights. These weights are meant to be updated manually during `call()`. Here's a example layer that computes the running sum of its inputs: ```python class ComputeSum(Layer): def __init__(self, input_dim): super(ComputeSum, self).__init__() # Create a non-trainable weight. self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False) def call(self, inputs): self.total.assign_add(tf.reduce_sum(inputs, axis=0)) return self.total my_sum = ComputeSum(2) x = tf.ones((2, 2)) y = my_sum(x) print(y.numpy()) # [2. 2.] y = my_sum(x) print(y.numpy()) # [4. 4.] assert my_sum.weights == [my_sum.total] assert my_sum.non_trainable_weights == [my_sum.total] assert my_sum.trainable_weights == [] ``` For more information about creating layers, see the guide [Writing custom layers and models with Keras]( https://www.tensorflow.org/guide/keras/custom_layers_and_models) Arguments: trainable: Boolean, whether the layer's variables should be trainable. name: String name of the layer. dtype: The dtype of the layer's computations and weights (default of `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type of the first input in TensorFlow 1). dynamic: Set this to `True` if your layer should only be run eagerly, and should not be used to generate a static computation graph. This would be the case for a Tree-RNN or a recursive network, for example, or generally for any layer that manipulates tensors using Python control flow. If `False`, we assume that the layer can safely be used to generate a static computation graph. Attributes: name: The name of the layer (string). dtype: The dtype of the layer's computations and weights. If mixed precision is used with a `tf.keras.mixed_precision.experimental.Policy`, this is instead just the dtype of the layer's weights, as the computations are done in a different dtype. updates: List of update ops of this layer. losses: List of losses added by this layer. trainable_weights: List of variables to be included in backprop. non_trainable_weights: List of variables that should not be included in backprop. weights: The concatenation of the lists trainable_weights and non_trainable_weights (in this order). trainable: Whether the layer should be trained (boolean). input_spec: Optional (list of) `InputSpec` object(s) specifying the constraints on inputs that can be accepted by the layer. Each layer has a dtype, which is typically the dtype of the layer's computations and variables. A layer's dtype can be queried via the `Layer.dtype` property. The dtype is specified with the `dtype` constructor argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()` if no dtype is passed. `floatx()` itself defaults to ""float32"". Additionally, layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed precision is used, layers may have different computation and variable dtypes. See `tf.keras.mixed_precision.experimental.Policy` for details on layer dtypes." 6716,ToDense,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,52,class,Create a dense (standard) tensor from the given input tensor. 6717,ToRagged,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,78,class,Create a ragged tensor based on a given dense tensor. 6718,ToSparse,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,91,class,Create a sparse tensor based on a given dense tensor. 6719,_SubclassModel,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,101,class,A Keras subclass model. 6720,get_model_from_layers_with_input,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,125,function,Builds a model from a sequence of layers. 6721,get_test_mode_kwargs,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,163,function, 6722,CompositeTensorInternalTest,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,172,class, 6723,CompositeTensorOutputTest,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,219,class, 6724,get_input_name,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,289,function, 6725,get_kwargs,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,299,function, 6726,prepare_inputs,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,308,function, 6727,SparseTensorInputTest,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,332,class, 6728,ScipySparseTensorInputTest,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,375,class, 6729,RaggedTensorInputTest,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,497,class, 6730,RaggedTensorInputValidationTest,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,542,class, 6731,CompositeTensorModelPredictTest,tensorflow/tensorflow/python/keras/utils/composite_tensor_support_test.py,607,class, 6732,InXlaContext,tensorflow/tensorflow/python/keras/utils/control_flow_util.py,26,function, 6733,GraphOrParentsInXlaContext,tensorflow/tensorflow/python/keras/utils/control_flow_util.py,31,function, 6734,IsInWhileLoop,tensorflow/tensorflow/python/keras/utils/control_flow_util.py,40,function, 6735,GetContainingWhileContext,tensorflow/tensorflow/python/keras/utils/control_flow_util.py,45,function,"Returns the first ancestor WhileContext of `ctxt`. Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a while loop. Args: ctxt: ControlFlowContext stop_ctxt: ControlFlowContext, optional. If provided, the search will end if it sees stop_ctxt. Returns: `ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing `ctxt`, or None if `ctxt` is not in a while loop. If `stop_ctxt` is not `None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal." 6736,GetContainingXLAContext,tensorflow/tensorflow/python/keras/utils/control_flow_util.py,67,function,"Returns the first ancestor XLAContext of `ctxt`. Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a while loop. Args: ctxt: ControlFlowContext Returns: `ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing `ctxt`, or None if `ctxt` is not in a while loop." 6737,convert_data_format,tensorflow/tensorflow/python/keras/utils/conv_utils.py,28,function, 6738,normalize_tuple,tensorflow/tensorflow/python/keras/utils/conv_utils.py,51,function,"Transforms a single integer or iterable of integers into an integer tuple. Arguments: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. ""strides"" or ""kernel_size"". This is only used to format error messages. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed." 6739,conv_output_length,tensorflow/tensorflow/python/keras/utils/conv_utils.py,90,function,"Determines output length of a convolution given input length. Arguments: input_length: integer. filter_size: integer. padding: one of ""same"", ""valid"", ""full"", ""causal"" stride: integer. dilation: dilation rate, integer. Returns: The output length (integer)." 6740,conv_input_length,tensorflow/tensorflow/python/keras/utils/conv_utils.py,116,function,"Determines input length of a convolution given output length. Arguments: output_length: integer. filter_size: integer. padding: one of ""same"", ""valid"", ""full"". stride: integer. Returns: The input length (integer)." 6741,deconv_output_length,tensorflow/tensorflow/python/keras/utils/conv_utils.py,140,function,"Determines output length of a transposed convolution given input length. Arguments: input_length: Integer. filter_size: Integer. padding: one of `""same""`, `""valid""`, `""full""`. output_padding: Integer, amount of padding along the output dimension. Can be set to `None` in which case the output length is inferred. stride: Integer. dilation: Integer. Returns: The output length (integer)." 6742,normalize_data_format,tensorflow/tensorflow/python/keras/utils/conv_utils.py,189,function, 6743,normalize_padding,tensorflow/tensorflow/python/keras/utils/conv_utils.py,200,function, 6744,convert_kernel,tensorflow/tensorflow/python/keras/utils/conv_utils.py,211,function,"Converts a Numpy kernel matrix from Theano format to TensorFlow format. Also works reciprocally, since the transformation is its own inverse. This is used for converting legacy Theano-saved model files. Arguments: kernel: Numpy array (3D, 4D or 5D). Returns: The converted kernel. Raises: ValueError: in case of invalid kernel shape or invalid data_format." 6745,conv_kernel_mask,tensorflow/tensorflow/python/keras/utils/conv_utils.py,236,function,"Compute a mask representing the connectivity of a convolution operation. Assume a convolution with given parameters is applied to an input having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries indicating pairs of input and output locations that are connected by a weight. Example: >>> input_shape = (4,) >>> kernel_shape = (2,) >>> strides = (1,) >>> padding = ""valid"" >>> conv_kernel_mask(input_shape, kernel_shape, strides, padding) array([[ True, False, False], [ True, True, False], [False, True, True], [False, False, True]]) where rows and columns correspond to inputs and outputs respectively. Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `""same""` or `""valid""`. `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. Returns: A boolean 2N-D `np.ndarray` of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)` is the spatial shape of the output. `True` entries in the mask represent pairs of input-output locations that are connected by a weight. Raises: ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the same number of dimensions. NotImplementedError: if `padding` is not in {`""same""`, `""valid""`}." 6746,conv_kernel_idxs,tensorflow/tensorflow/python/keras/utils/conv_utils.py,315,function,"Yields output-input tuples of indices in a CNN layer. The generator iterates over all `(output_idx, input_idx)` tuples, where `output_idx` is an integer index in a flattened tensor representing a single output image of a convolutional layer that is connected (via the layer weights) to the respective single input image at `input_idx` Example: >>> input_shape = (2, 2) >>> kernel_shape = (2, 1) >>> strides = (1, 1) >>> padding = ""valid"" >>> filters_in = 1 >>> filters_out = 1 >>> data_format = ""channels_last"" >>> list(conv_kernel_idxs(input_shape, kernel_shape, strides, padding, ... filters_in, filters_out, data_format)) [(0, 0), (0, 2), (1, 1), (1, 3)] Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `""same""` or `""valid""`. `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. filters_in: `int`, number if filters in the input to the layer. filters_out: `int', number if filters in the output of the layer. data_format: string, ""channels_first"" or ""channels_last"". Yields: The next tuple `(output_idx, input_idx)`, where `output_idx` is an integer index in a flattened tensor representing a single output image of a convolutional layer that is connected (via the layer weights) to the respective single input image at `input_idx`. Raises: ValueError: if `data_format` is neither `""channels_last""` nor `""channels_first""`, or if number of strides, input, and kernel number of dimensions do not match. NotImplementedError: if `padding` is neither `""same""` nor `""valid""`." 6747,conv_connected_inputs,tensorflow/tensorflow/python/keras/utils/conv_utils.py,409,function,"Return locations of the input connected to an output position. Assume a convolution with given parameters is applied to an input having N spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method returns N ranges specifying the input region that was convolved with the kernel to produce the output at position `output_position = (p_out1, ..., p_outN)`. Example: >>> input_shape = (4, 4) >>> kernel_shape = (2, 1) >>> output_position = (1, 1) >>> strides = (1, 1) >>> padding = ""valid"" >>> conv_connected_inputs(input_shape, kernel_shape, output_position, ... strides, padding) [range(1, 3), range(1, 2)] Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. output_position: tuple of size N: `(p_out1, ..., p_outN)`, a single position in the output of the convolution. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `""same""` or `""valid""`. `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. Returns: N ranges `[[p_in_left1, ..., p_in_right1], ..., [p_in_leftN, ..., p_in_rightN]]` specifying the region in the input connected to output_position." 6748,conv_output_shape,tensorflow/tensorflow/python/keras/utils/conv_utils.py,468,function,"Return the output shape of an N-D convolution. Forces dimensions where input is empty (size 0) to remain empty. Args: input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the input. kernel_shape: tuple of size N, spatial shape of the convolutional kernel / receptive field. strides: tuple of size N, strides along each spatial dimension. padding: type of padding, string `""same""` or `""valid""`. `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. Returns: tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output." 6749,_get_const_output_shape,tensorflow/tensorflow/python/keras/utils/conv_utils_test.py,30,function, 6750,TestBasicConvUtilsTest,tensorflow/tensorflow/python/keras/utils/conv_utils_test.py,55,class, 6751,TestConvUtils,tensorflow/tensorflow/python/keras/utils/conv_utils_test.py,164,class, 6752,urlretrieve,tensorflow/tensorflow/python/keras/utils/data_utils.py,71,function,"Replacement for `urlretrieve` for Python 2. Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy `urllib` module, known to have issues with proxy management. Arguments: url: url to retrieve. filename: where to store the retrieved data locally. reporthook: a hook function that will be called once on establishment of the network connection and once after each block read thereafter. The hook will be passed three arguments; a count of blocks transferred so far, a block size in bytes, and the total size of the file. data: `data` argument passed to `urlopen`." 6753,is_generator_or_sequence,tensorflow/tensorflow/python/keras/utils/data_utils.py,111,function,Check if `x` is a Keras generator type. 6754,_extract_archive,tensorflow/tensorflow/python/keras/utils/data_utils.py,119,function,"Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats. Arguments: file_path: path to the archive file path: path to extract the archive file archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. Returns: True if a match was found and an archive extraction was completed, False otherwise." 6755,get_file,tensorflow/tensorflow/python/keras/utils/data_utils.py,169,function,"Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `~/.keras`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.keras/datasets/example.txt`. Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. Example: ```python path_to_downloaded_file = tf.keras.utils.get_file( ""flower_photos"", ""https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"", untar=True) ``` Arguments: fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. untar: Deprecated in favor of `extract` argument. boolean, whether the file should be decompressed md5_hash: Deprecated in favor of `file_hash` argument. md5 hash of the file for verification file_hash: The expected hash string of the file after download. The sha256 and md5 hash algorithms are both supported. cache_subdir: Subdirectory under the Keras cache dir where the file is saved. If an absolute path `/path/to/folder` is specified the file will be saved at that location. hash_algorithm: Select the hash algorithm to verify the file. options are `'md5'`, `'sha256'`, and `'auto'`. The default 'auto' detects the hash algorithm in use. extract: True tries extracting the file as an Archive, like tar or zip. archive_format: Archive format to try for extracting the file. Options are `'auto'`, `'tar'`, `'zip'`, and `None`. `'tar'` includes tar, tar.gz, and tar.bz files. The default `'auto'` corresponds to `['tar', 'zip']`. None or an empty list will return no matches found. cache_dir: Location to store cached files, when None it defaults to the default directory `~/.keras/`. Returns: Path to the downloaded file" 6756,_makedirs_exist_ok,tensorflow/tensorflow/python/keras/utils/data_utils.py,300,function, 6757,_hash_file,tensorflow/tensorflow/python/keras/utils/data_utils.py,312,function,"Calculates a file sha256 or md5 hash. Example: ```python _hash_file('/path/to/file.zip') 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` Arguments: fpath: path to the file being validated algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`. The default `'auto'` detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: The file hash" 6758,validate_file,tensorflow/tensorflow/python/keras/utils/data_utils.py,343,function,"Validates a file against a sha256 or md5 hash. Arguments: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: Whether the file is valid" 6759,ThreadsafeIter,tensorflow/tensorflow/python/keras/utils/data_utils.py,368,class,Wrap an iterator with a lock and propagate exceptions to all threads. 6760,threadsafe_generator,tensorflow/tensorflow/python/keras/utils/data_utils.py,402,function, 6761,Sequence,tensorflow/tensorflow/python/keras/utils/data_utils.py,412,class,"Base object for fitting to a sequence of data, such as a dataset. Every `Sequence` must implement the `__getitem__` and the `__len__` methods. If you want to modify your dataset between epochs you may implement `on_epoch_end`. The method `__getitem__` should return a complete batch. Notes: `Sequence` are a safer way to do multiprocessing. This structure guarantees that the network will only train once on each sample per epoch which is not the case with generators. Examples: ```python from skimage.io import imread from skimage.transform import resize import numpy as np import math # Here, `x_set` is list of path to the images # and `y_set` are the associated classes. class CIFAR10Sequence(Sequence): def __init__(self, x_set, y_set, batch_size): self.x, self.y = x_set, y_set self.batch_size = batch_size def __len__(self): return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ resize(imread(file_name), (200, 200)) for file_name in batch_x]), np.array(batch_y) ```" 6762,iter_sequence_infinite,tensorflow/tensorflow/python/keras/utils/data_utils.py,490,function,"Iterates indefinitely over a Sequence. Arguments: seq: `Sequence` instance. Yields: Batches of data from the `Sequence`." 6763,dont_use_multiprocessing_pool,tensorflow/tensorflow/python/keras/utils/data_utils.py,520,function, 6764,get_pool_class,tensorflow/tensorflow/python/keras/utils/data_utils.py,532,function, 6765,get_worker_id_queue,tensorflow/tensorflow/python/keras/utils/data_utils.py,543,function,Lazily create the queue to track worker ids. 6766,init_pool,tensorflow/tensorflow/python/keras/utils/data_utils.py,551,function, 6767,terminate_keras_multiprocessing_pools,tensorflow/tensorflow/python/keras/utils/data_utils.py,559,function,"Destroy Keras' multiprocessing pools to prevent deadlocks. In general multiprocessing.Pool can interact quite badly with other, seemingly unrelated, parts of a codebase due to Pool's reliance on fork. This method cleans up all pools which are known to belong to Keras (and thus can be safely terminated). Args: grace_period: Time (in seconds) to wait for process cleanup to propagate. use_sigkill: Boolean of whether or not to perform a cleanup pass using SIGKILL. Returns: A list of human readable strings describing all issues encountered. It is up to the caller to decide whether to treat this as an error condition." 6768,get_index,tensorflow/tensorflow/python/keras/utils/data_utils.py,665,function,"Get the value from the Sequence `uid` at index `i`. To allow multiple Sequences to be used at the same time, we use `uid` to get a specific one. A single Sequence would cause the validation to overwrite the training Sequence. Arguments: uid: int, Sequence identifier i: index Returns: The value at index `i`." 6769,SequenceEnqueuer,tensorflow/tensorflow/python/keras/utils/data_utils.py,683,class,"Base class to enqueue inputs. The task of an Enqueuer is to use parallelism to speed up preprocessing. This is done with processes or threads. Example: ```python enqueuer = SequenceEnqueuer(...) enqueuer.start() datas = enqueuer.get() for data in datas: # Use the inputs; training, evaluating, predicting. # ... stop sometime. enqueuer.stop() ``` The `enqueuer.get()` should be an infinite stream of datas." 6770,OrderedEnqueuer,tensorflow/tensorflow/python/keras/utils/data_utils.py,812,class,"Builds a Enqueuer from a Sequence. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: sequence: A `tf.keras.utils.data_utils.Sequence` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch" 6771,init_pool_generator,tensorflow/tensorflow/python/keras/utils/data_utils.py,903,function,"Initializer function for pool workers. Args: gens: State which should be made available to worker processes. random_seed: An optional value with which to seed child processes. id_queue: A multiprocessing Queue of worker ids. This is used to indicate that a worker process was created by Keras and can be terminated using the cleanup_all_keras_forkpools utility." 6772,next_sample,tensorflow/tensorflow/python/keras/utils/data_utils.py,930,function,"Gets the next value from the generator `uid`. To allow multiple generators to be used at the same time, we use `uid` to get a specific one. A single generator would cause the validation to overwrite the training generator. Arguments: uid: int, generator identifier Returns: The next value of generator `uid`." 6773,GeneratorEnqueuer,tensorflow/tensorflow/python/keras/utils/data_utils.py,947,class,"Builds a queue out of a data generator. The provided generator can be finite in which case the class will throw a `StopIteration` exception. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: generator: a generator function which yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, will be incremented by one for each worker." 6774,TestGetFileAndValidateIt,tensorflow/tensorflow/python/keras/utils/data_utils_test.py,35,class, 6775,TestSequence,tensorflow/tensorflow/python/keras/utils/data_utils_test.py,91,class, 6776,FaultSequence,tensorflow/tensorflow/python/keras/utils/data_utils_test.py,107,class, 6777,create_generator_from_sequence_threads,tensorflow/tensorflow/python/keras/utils/data_utils_test.py,117,function, 6778,create_generator_from_sequence_pcs,tensorflow/tensorflow/python/keras/utils/data_utils_test.py,122,function, 6779,TestEnqueuers,tensorflow/tensorflow/python/keras/utils/data_utils_test.py,127,class, 6780,CustomObjectScope,tensorflow/tensorflow/python/keras/utils/generic_utils.py,53,class,"Exposes custom classes/functions to Keras deserialization internals. Under a scope `with custom_object_scope(objects_dict)`, Keras methods such as `tf.keras.models.load_model` or `tf.keras.models.model_from_config` will be able to deserialize any custom object referenced by a saved config (e.g. a custom layer or metric). Example: Consider a custom regularizer `my_regularizer`: ```python layer = Dense(3, kernel_regularizer=my_regularizer) config = layer.get_config() # Config contains a reference to `my_regularizer` ... # Later: with custom_object_scope({'my_regularizer': my_regularizer}): layer = Dense.from_config(config) ``` Arguments: *args: Dictionary or dictionaries of `{name: object}` pairs." 6781,get_custom_objects,tensorflow/tensorflow/python/keras/utils/generic_utils.py,94,function,"Retrieves a live reference to the global dictionary of custom objects. Updating and clearing custom objects using `custom_object_scope` is preferred, but `get_custom_objects` can be used to directly access the current collection of custom objects. Example: ```python get_custom_objects().clear() get_custom_objects()['MyObject'] = MyObject ``` Returns: Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`)." 6782,serialize_keras_class_and_config,tensorflow/tensorflow/python/keras/utils/generic_utils.py,114,function,Returns the serialization of the class with the given config. 6783,register_keras_serializable,tensorflow/tensorflow/python/keras/utils/generic_utils.py,120,function,"Registers an object with the Keras serialization framework. This decorator injects the decorated class or function into the Keras custom object dictionary, so that it can be serialized and deserialized without needing an entry in the user-provided custom object dict. It also injects a function that Keras will call to get the object's serializable string key. Note that to be serialized and deserialized, classes must implement the `get_config()` method. Functions do not have this requirement. The object will be registered under the key 'package>name' where `name`, defaults to the object name if not passed. Arguments: package: The package that this class belongs to. name: The name to serialize this class under in this package. If None, the class' name will be used. Returns: A decorator that registers the decorated class with the passed names." 6784,get_registered_name,tensorflow/tensorflow/python/keras/utils/generic_utils.py,169,function,"Returns the name registered to an object within the Keras framework. This function is part of the Keras serialization and deserialization framework. It maps objects to the string names associated with those objects for serialization/deserialization. Args: obj: The object to look up. Returns: The name associated with the object, or the default Python name if the object is not registered." 6785,skip_failed_serialization,tensorflow/tensorflow/python/keras/utils/generic_utils.py,190,function, 6786,get_registered_object,tensorflow/tensorflow/python/keras/utils/generic_utils.py,201,function,"Returns the class associated with `name` if it is registered with Keras. This function is part of the Keras serialization and deserialization framework. It maps strings to the objects associated with them for serialization/deserialization. Example: ``` def from_config(cls, config, custom_objects=None): if 'my_custom_object_name' in config: config['hidden_cls'] = tf.keras.utils.get_registered_object( config['my_custom_object_name'], custom_objects=custom_objects) ``` Args: name: The name to look up. custom_objects: A dictionary of custom objects to look the name up in. Generally, custom_objects is provided by the user. module_objects: A dictionary of custom objects to look the name up in. Generally, module_objects is provided by midlevel library implementers. Returns: An instantiable class associated with 'name', or None if no such class exists." 6787,serialize_keras_object,tensorflow/tensorflow/python/keras/utils/generic_utils.py,237,function,Serialize a Keras object into a JSON-compatible representation. 6788,get_custom_objects_by_name,tensorflow/tensorflow/python/keras/utils/generic_utils.py,275,function,Returns the item if it is in either local or global custom objects. 6789,class_and_config_for_serialized_keras_object,tensorflow/tensorflow/python/keras/utils/generic_utils.py,284,function,Returns the class name and config for a serialized keras object. 6790,deserialize_keras_object,tensorflow/tensorflow/python/keras/utils/generic_utils.py,336,function,Turns the serialized form of a Keras object back into an actual object. 6791,func_dump,tensorflow/tensorflow/python/keras/utils/generic_utils.py,393,function,"Serializes a user defined function. Arguments: func: the function to serialize. Returns: A tuple `(code, defaults, closure)`." 6792,func_load,tensorflow/tensorflow/python/keras/utils/generic_utils.py,416,function,"Deserializes a user defined function. Arguments: code: bytecode of the function. defaults: defaults of the function. closure: closure of the function. globs: dictionary of global objects. Returns: A function object." 6793,has_arg,tensorflow/tensorflow/python/keras/utils/generic_utils.py,465,function,"Checks if a callable accepts a given keyword argument. Arguments: fn: Callable to inspect. name: Check if `fn` can be called with `name` as a keyword argument. accept_all: What to return if there is no parameter called `name` but the function accepts a `**kwargs` argument. Returns: bool, whether `fn` accepts a `name` keyword argument." 6794,Progbar,tensorflow/tensorflow/python/keras/utils/generic_utils.py,484,class,"Displays a progress bar. Arguments: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) stateful_metrics: Iterable of string names of metrics that should *not* be averaged over time. Metrics in this list will be displayed as-is. All others will be averaged by the progbar before display. interval: Minimum visual progress update interval (in seconds). unit_name: Display name for step counts (usually ""step"" or ""sample"")." 6795,make_batches,tensorflow/tensorflow/python/keras/utils/generic_utils.py,668,function,"Returns a list of batch indices (tuples of indices). Arguments: size: Integer, total size of the data to slice into batches. batch_size: Integer, batch size. Returns: A list of tuples of array indices." 6796,slice_arrays,tensorflow/tensorflow/python/keras/utils/generic_utils.py,683,function,"Slice an array or list of arrays. This takes an array-like, or a list of array-likes, and outputs: - arrays[start:stop] if `arrays` is an array-like - [x[start:stop] for x in arrays] if `arrays` is a list Can also work on list/array of indices: `slice_arrays(x, indices)` Arguments: arrays: Single array or list of arrays. start: can be an integer index (start index) or a list/array of indices stop: integer (stop index); should be None if `start` was a list. Returns: A slice of the array(s). Raises: ValueError: If the value of start is a list and stop is not None." 6797,to_list,tensorflow/tensorflow/python/keras/utils/generic_utils.py,729,function,"Normalizes a list/tensor into a list. If a tensor is passed, we return a list of size 1 containing the tensor. Arguments: x: target object to be normalized. Returns: A list." 6798,to_snake_case,tensorflow/tensorflow/python/keras/utils/generic_utils.py,746,function, 6799,is_all_none,tensorflow/tensorflow/python/keras/utils/generic_utils.py,756,function, 6800,check_for_unexpected_keys,tensorflow/tensorflow/python/keras/utils/generic_utils.py,765,function, 6801,validate_kwargs,tensorflow/tensorflow/python/keras/utils/generic_utils.py,773,function,Checks that all keyword arguments are in the set of allowed keys. 6802,validate_config,tensorflow/tensorflow/python/keras/utils/generic_utils.py,782,function,Determines whether config appears to be a valid layer config. 6803,default,tensorflow/tensorflow/python/keras/utils/generic_utils.py,787,function,Decorates a method to detect overrides in subclasses. 6804,is_default,tensorflow/tensorflow/python/keras/utils/generic_utils.py,793,function,Check if a method is decorated with the `default` wrapper. 6805,populate_dict_with_module_objects,tensorflow/tensorflow/python/keras/utils/generic_utils.py,798,function, 6806,LazyLoader,tensorflow/tensorflow/python/keras/utils/generic_utils.py,806,class,"Lazily import a module, mainly to avoid pulling in large dependencies." 6807,HasArgTest,tensorflow/tensorflow/python/keras/utils/generic_utils_test.py,29,class, 6808,TestCustomObjectScope,tensorflow/tensorflow/python/keras/utils/generic_utils_test.py,67,class, 6809,SerializeKerasObjectTest,tensorflow/tensorflow/python/keras/utils/generic_utils_test.py,85,class, 6810,SliceArraysTest,tensorflow/tensorflow/python/keras/utils/generic_utils_test.py,358,class, 6811,_path_to_string,tensorflow/tensorflow/python/keras/utils/io_utils.py,40,function, 6812,_path_to_string,tensorflow/tensorflow/python/keras/utils/io_utils.py,46,function, 6813,_path_to_string,tensorflow/tensorflow/python/keras/utils/io_utils.py,53,function, 6814,path_to_string,tensorflow/tensorflow/python/keras/utils/io_utils.py,57,function,"Convert `PathLike` objects to their string representation. If given a non-string typed path object, converts it to its string representation. Depending on the python version used, this function can handle the following arguments: python >= 3.6: Everything supporting the fs path protocol https://www.python.org/dev/peps/pep-0519 python >= 3.4: Only `pathlib.Path` objects If the object passed to `path` is not among the above, then it is returned unchanged. This allows e.g. passthrough of file objects through this function. Args: path: `PathLike` object that represents a path Returns: A string representation of the path argument, if Python support exists." 6815,HDF5Matrix,tensorflow/tensorflow/python/keras/utils/io_utils.py,81,class,"Representation of HDF5 dataset to be used instead of a Numpy array. THIS CLASS IS DEPRECATED. Training with HDF5Matrix may not be optimized for performance, and might not work with every distribution strategy. We recommend using https://github.com/tensorflow/io to load your HDF5 data into a tf.data Dataset and passing that dataset to Keras." 6816,ask_to_proceed_with_overwrite,tensorflow/tensorflow/python/keras/utils/io_utils.py,236,function,"Produces a prompt asking about overwriting a file. Arguments: filepath: the path to the file to be overwritten. Returns: True if we can proceed with overwrite, False otherwise." 6817,create_dataset,tensorflow/tensorflow/python/keras/utils/io_utils_test.py,40,function, 6818,TestIOUtils,tensorflow/tensorflow/python/keras/utils/io_utils_test.py,53,class, 6819,_to_matrix,tensorflow/tensorflow/python/keras/utils/kernelized_utils.py,25,function,"If input tensor is a vector (i.e., has rank 1), converts it to matrix." 6820,_align_matrices,tensorflow/tensorflow/python/keras/utils/kernelized_utils.py,36,function,Aligns x and y tensors to allow computations over pairs of their rows. 6821,inner_product,tensorflow/tensorflow/python/keras/utils/kernelized_utils.py,54,function, 6822,exact_gaussian_kernel,tensorflow/tensorflow/python/keras/utils/kernelized_utils.py,60,function,"Computes exact Gaussian kernel value(s) for tensors x and y and stddev. The Gaussian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v||^2 / (2* stddev^2)) where the norm is the l2-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible." 6823,exact_laplacian_kernel,tensorflow/tensorflow/python/keras/utils/kernelized_utils.py,90,function,"Computes exact Laplacian kernel value(s) for tensors x and y using stddev. The Laplacian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v|| / stddev) where the norm is the l1-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible." 6824,_exact_gaussian,tensorflow/tensorflow/python/keras/utils/kernelized_utils_test.py,30,function, 6825,_exact_laplacian,tensorflow/tensorflow/python/keras/utils/kernelized_utils_test.py,35,function, 6826,KernelizedUtilsTest,tensorflow/tensorflow/python/keras/utils/kernelized_utils_test.py,40,class, 6827,get_source_inputs,tensorflow/tensorflow/python/keras/utils/layer_utils.py,34,function,"Returns the list of input tensors necessary to compute `tensor`. Output will always be a list of tensors (potentially with 1 element). Arguments: tensor: The tensor to start from. layer: Origin layer of the tensor. Will be determined via tensor._keras_history if not provided. node_index: Origin node index of the tensor. Returns: List of input tensors." 6828,validate_string_arg,tensorflow/tensorflow/python/keras/utils/layer_utils.py,72,function,Validates the correctness of a string-based arg. 6829,count_params,tensorflow/tensorflow/python/keras/utils/layer_utils.py,95,function,"Count the total number of scalars composing the weights. Arguments: weights: An iterable containing the weights on which to compute params Returns: The total number of scalars composing the weights" 6830,print_summary,tensorflow/tensorflow/python/keras/utils/layer_utils.py,112,function,"Prints a summary of a model. Arguments: model: Keras model instance. line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. It defaults to `print` (prints to stdout)." 6831,gather_trainable_weights,tensorflow/tensorflow/python/keras/utils/layer_utils.py,274,function,"Lists the trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected trainable weights/variables." 6832,gather_non_trainable_weights,tensorflow/tensorflow/python/keras/utils/layer_utils.py,297,function,"Lists the non-trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected non-trainable weights/variables." 6833,convert_all_kernels_in_model,tensorflow/tensorflow/python/keras/utils/layer_utils.py,333,function,"Converts all convolution kernels in a model from Theano to TensorFlow. Also works from TensorFlow to Theano. This is used for converting legacy Theano-saved model files. Arguments: model: target model for the conversion." 6834,convert_dense_weights_data_format,tensorflow/tensorflow/python/keras/utils/layer_utils.py,360,function,"Utility useful when changing a convnet's `data_format`. When porting the weights of a convnet from one data format to the other, if the convnet includes a `Flatten` layer (applied to the last convolutional feature map) followed by a `Dense` layer, the weights of that `Dense` layer should be updated to reflect the new dimension ordering. Arguments: dense: The target `Dense` layer. previous_feature_map_shape: A shape tuple of 3 integers, e.g. `(512, 7, 7)`. The shape of the convolutional feature map right before the `Flatten` layer that came before the target `Dense` layer. target_data_format: One of ""channels_last"", ""channels_first"". Set it ""channels_last"" if converting a ""channels_first"" model to ""channels_last"", or reciprocally." 6835,is_builtin_layer,tensorflow/tensorflow/python/keras/utils/layer_utils.py,399,function, 6836,remove_squeezable_dimensions,tensorflow/tensorflow/python/keras/utils/losses_utils.py,38,function,"Squeeze last dim if ranks differ from expected by exactly 1. In the common case where we expect shapes to match, `expected_rank_diff` defaults to 0, and we squeeze the last dimension of the larger rank if they differ by 1. But, for example, if `labels` contains class IDs and `predictions` contains 1 probability per class, we expect `predictions` to have 1 more dimension than `labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze `labels` if `rank(predictions) - rank(labels) == 0`, and `predictions` if `rank(predictions) - rank(labels) == 2`. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: labels: Label values, a `Tensor` whose dimensions match `predictions`. predictions: Predicted values, a `Tensor` of arbitrary dimensions. expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`. name: Name of the op. Returns: Tuple of `labels` and `predictions`, possibly with last dim squeezed." 6837,squeeze_or_expand_dimensions,tensorflow/tensorflow/python/keras/utils/losses_utils.py,99,function,"Squeeze or expand last dimension if needed. 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1 (using `remove_squeezable_dimensions`). 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1 from the new rank of `y_pred`. If `sample_weight` is scalar, it is kept scalar. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: y_pred: Predicted values, a `Tensor` of arbitrary dimensions. y_true: Optional label `Tensor` whose dimensions match `y_pred`. sample_weight: Optional weight scalar or `Tensor` whose dimensions match `y_pred`. Returns: Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has the last dimension squeezed, `sample_weight` could be extended by one dimension. If `sample_weight` is None, (y_pred, y_true) is returned." 6838,_safe_mean,tensorflow/tensorflow/python/keras/utils/losses_utils.py,188,function,"Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. num_present: The number of measurable elements in `losses`. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned." 6839,_num_elements,tensorflow/tensorflow/python/keras/utils/losses_utils.py,203,function,Computes the number of elements in `losses` tensor. 6840,reduce_weighted_loss,tensorflow/tensorflow/python/keras/utils/losses_utils.py,209,function,Reduces the individual weighted loss measurements. 6841,compute_weighted_loss,tensorflow/tensorflow/python/keras/utils/losses_utils.py,221,function,"Computes the weighted loss. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as `losses`, or be broadcastable to `losses`. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `SUM_OVER_BATCH_SIZE`. name: Optional name for the op. Raises: ValueError: If the shape of `sample_weight` is not compatible with `losses`. Returns: Weighted loss `Tensor` of the same type as `losses`. If `reduction` is `NONE`, this has the same shape as `losses`; otherwise, it is scalar." 6842,scale_loss_for_distribution,tensorflow/tensorflow/python/keras/utils/losses_utils.py,278,function,Scales and returns the given loss value by the number of replicas. 6843,cast_losses_to_common_dtype,tensorflow/tensorflow/python/keras/utils/losses_utils.py,287,function,"Cast a list of losses to a common dtype. If any loss is floating-point, they will all be casted to the most-precise floating-point loss. Otherwise the losses are not casted. We also skip casting losses if there are any complex losses. Args: losses: A list of losses. Returns: `losses`, but they have been casted to a common dtype." 6844,Reduction,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,48,class,"Types of metrics reduction. Contains the following values: * `SUM`: Scalar sum of weighted values. * `SUM_OVER_BATCH_SIZE`: Scalar sum of weighted values divided by number of elements. * `WEIGHTED_MEAN`: Scalar sum of weighted values divided by sum of weights." 6845,update_state_wrapper,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,63,function,"Decorator to wrap metric `update_state()` with `add_update()`. Args: update_state_fn: function that accumulates metric statistics. Returns: Decorated function that wraps `update_state_fn()` with `add_update()`." 6846,result_wrapper,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,98,function,"Decorator to wrap metric `result()` function in `merge_call()`. Result computation is an idempotent operation that simply calculates the metric value using the state variables. If metric state variables are distributed across replicas/devices and `result()` is requested from the context of one device - This function wraps `result()` in a distribution strategy `merge_call()`. With this, the metric state variables will be aggregated across devices. Args: result_fn: function that computes the metric result. Returns: Decorated function that wraps `result_fn()` in distribution strategy `merge_call()`." 6847,weakmethod,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,154,function,Creates a weak reference to the bound method. 6848,assert_thresholds_range,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,169,function, 6849,parse_init_thresholds,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,178,function, 6850,ConfusionMatrix,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,185,class, 6851,AUCCurve,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,192,class,Type of AUC Curve (ROC or PR). 6852,AUCSummationMethod,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,207,class,"Type of AUC summation method. https://en.wikipedia.org/wiki/Riemann_sum) Contains the following values: * 'interpolation': Applies mid-point summation scheme for `ROC` curve. For `PR` curve, interpolates (true/false) positives but not the ratio that is precision (see Davis & Goadrich 2006 for details). * 'minoring': Applies left summation for increasing intervals and right summation for decreasing intervals. * 'majoring': Applies right summation for increasing intervals and left summation for decreasing intervals." 6853,update_confusion_matrix_variables,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,237,function,"Returns op to update the given confusion matrix variables. For every pair of values in y_true and y_pred: true_positive: y_true == True and y_pred > thresholds false_negatives: y_true == True and y_pred <= thresholds true_negatives: y_true == False and y_pred <= thresholds false_positive: y_true == False and y_pred > thresholds The results will be weighted and added together. When multiple thresholds are provided, we will repeat the same for every threshold. For estimation of these metrics over a stream of data, the function creates an `update_op` operation that updates the given variables. If `sample_weight` is `None`, weights default to 1. Use weights of 0 to mask values. Args: variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys and corresponding variables to update as values. y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`. y_pred: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A float value, float tensor, python list, or tuple of float thresholds in `[0, 1]`, or NEG_INF (used when top_k is set). top_k: Optional int, indicates that the positive labels should be limited to the top k predictions. class_id: Optional int, limits the prediction and labels to the class specified by this argument. sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must be either `1`, or the same as the corresponding `y_true` dimension). multi_label: Optional boolean indicating whether multidimensional prediction/labels should be treated as multilabel responses, or flattened into a single label. When True, the valus of `variables_to_update` must have a second dimension equal to the number of labels in y_true and y_pred, and those tensors must not be RaggedTensors. label_weights: (optional) tensor of non-negative weights for multilabel data. The weights are applied when calculating TP, FP, FN, and TN without explicit multilabel handling (i.e. when the data is to be flattened). Returns: Update op. Raises: ValueError: If `y_pred` and `y_true` have mismatched shapes, or if `sample_weight` is not `None` and its shape doesn't match `y_pred`, or if `variables_to_update` contains invalid keys." 6854,_filter_top_k,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,460,function,"Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x." 6855,ragged_assert_compatible_and_get_flat_values,tensorflow/tensorflow/python/keras/utils/metrics_utils.py,479,function,"If ragged, it checks the compatibility and then returns the flat_values. Note: If two tensors are dense, it does not check their compatibility. Note: Although two ragged tensors with different ragged ranks could have identical overall rank and dimension sizes and hence be compatible, we do not support those cases. Args: values: A list of potentially ragged tensor of the same ragged_rank. mask: A potentially ragged tensor of the same ragged_rank as elements in Values. Returns: A tuple in which the first element is the list of tensors and the second is the mask tensor. ([Values], mask). Mask and the element in Values are equal to the flat_values of the input arguments (if they were ragged)." 6856,RaggedSizeOpTest,tensorflow/tensorflow/python/keras/utils/metrics_utils_test.py,35,class, 6857,FilterTopKTest,tensorflow/tensorflow/python/keras/utils/metrics_utils_test.py,254,class, 6858,_get_available_devices,tensorflow/tensorflow/python/keras/utils/multi_gpu_utils.py,30,function, 6859,_normalize_device_name,tensorflow/tensorflow/python/keras/utils/multi_gpu_utils.py,34,function, 6860,multi_gpu_model,tensorflow/tensorflow/python/keras/utils/multi_gpu_utils.py,42,function,"Replicates a model on different GPUs. Specifically, this function implements single-machine multi-GPU data parallelism. It works in the following way: - Divide the model's input(s) into multiple sub-batches. - Apply a model copy on each sub-batch. Every model copy is executed on a dedicated GPU. - Concatenate the results (on CPU) into one big batch. E.g. if your `batch_size` is 64 and you use `gpus=2`, then we will divide the input into 2 sub-batches of 32 samples, process each sub-batch on one GPU, then return the full batch of 64 processed samples. This induces quasi-linear speedup on up to 8 GPUs. This function is only available with the TensorFlow backend for the time being. Arguments: model: A Keras model instance. To avoid OOM errors, this model could have been built on CPU, for instance (see usage example below). gpus: Integer >= 2, number of on GPUs on which to create model replicas. cpu_merge: A boolean value to identify whether to force merging model weights under the scope of the CPU or not. cpu_relocation: A boolean value to identify whether to create the model's weights under the scope of the CPU. If the model is not defined under any preceding device scope, you can still rescue it by activating this option. Returns: A Keras `Model` instance which can be used just like the initial `model` argument, but which distributes its workload on multiple GPUs. Example 1: Training models with weights merge on CPU ```python import tensorflow as tf from keras.applications import Xception from keras.utils import multi_gpu_model import numpy as np num_samples = 1000 height = 224 width = 224 num_classes = 1000 # Instantiate the base model (or ""template"" model). # We recommend doing this with under a CPU device scope, # so that the model's weights are hosted on CPU memory. # Otherwise they may end up hosted on a GPU, which would # complicate weight sharing. with tf.device('/cpu:0'): model = Xception(weights=None, input_shape=(height, width, 3), classes=num_classes) # Replicates the model on 8 GPUs. # This assumes that your machine has 8 available GPUs. parallel_model = multi_gpu_model(model, gpus=8) parallel_model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # Generate dummy data. x = np.random.random((num_samples, height, width, 3)) y = np.random.random((num_samples, num_classes)) # This `fit` call will be distributed on 8 GPUs. # Since the batch size is 256, each GPU will process 32 samples. parallel_model.fit(x, y, epochs=20, batch_size=256) # Save model via the template model (which shares the same weights): model.save('my_model.h5') ``` Example 2: Training models with weights merge on CPU using cpu_relocation ```python .. # Not needed to change the device scope for model definition: model = Xception(weights=None, ..) try: model = multi_gpu_model(model, cpu_relocation=True) print(""Training using multiple GPUs.."") except: print(""Training using single GPU or CPU.."") model.compile(..) .. ``` Example 3: Training models with weights merge on GPU (recommended for NV-link) ```python .. # Not needed to change the device scope for model definition: model = Xception(weights=None, ..) try: model = multi_gpu_model(model, cpu_merge=False) print(""Training using multiple GPUs.."") except: print(""Training using single GPU or CPU.."") model.compile(..) .. ``` Raises: ValueError: if the `gpus` argument does not match available devices." 6861,check_if_compatible_devices,tensorflow/tensorflow/python/keras/utils/multi_gpu_utils_test.py,32,function, 6862,TestMultiGPUModel,tensorflow/tensorflow/python/keras/utils/multi_gpu_utils_test.py,42,class, 6863,to_categorical,tensorflow/tensorflow/python/keras/utils/np_utils.py,25,function,"Converts a class vector (integers) to binary class matrix. E.g. for use with categorical_crossentropy. Arguments: y: class vector to be converted into a matrix (integers from 0 to num_classes). num_classes: total number of classes. If `None`, this would be inferred as the (largest number in `y`) + 1. dtype: The data type expected by the input. Default: `'float32'`. Returns: A binary matrix representation of the input. The classes axis is placed last. Example: >>> a = tf.keras.utils.to_categorical([0, 1, 2, 3], num_classes=4) >>> a = tf.constant(a, shape=[4, 4]) >>> print(a) tf.Tensor( [[1. 0. 0. 0.] [0. 1. 0. 0.] [0. 0. 1. 0.] [0. 0. 0. 1.]], shape=(4, 4), dtype=float32) >>> b = tf.constant([.9, .04, .03, .03, ... .3, .45, .15, .13, ... .04, .01, .94, .05, ... .12, .21, .5, .17], ... shape=[4, 4]) >>> loss = tf.keras.backend.categorical_crossentropy(a, b) >>> print(np.around(loss, 5)) [0.10536 0.82807 0.1011 1.77196] >>> loss = tf.keras.backend.categorical_crossentropy(a, a) >>> print(np.around(loss, 5)) [0. 0. 0. 0.] Raises: Value Error: If input contains string value" 6864,normalize,tensorflow/tensorflow/python/keras/utils/np_utils.py,85,function,"Normalizes a Numpy array. Arguments: x: Numpy array to normalize. axis: axis along which to normalize. order: Normalization order (e.g. `order=2` for L2 norm). Returns: A normalized copy of the array." 6865,TestNPUtils,tensorflow/tensorflow/python/keras/utils/np_utils_test.py,27,class, 6866,smart_cond,tensorflow/tensorflow/python/keras/utils/tf_utils.py,44,function,"Return either `true_fn()` if predicate `pred` is true else `false_fn()`. If `pred` is a bool or has a constant value, we return either `true_fn()` or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. Arguments: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using `tf.cond`. Returns: Tensors returned by the call to either `true_fn` or `false_fn`. Raises: TypeError: If `true_fn` or `false_fn` is not callable." 6867,constant_value,tensorflow/tensorflow/python/keras/utils/tf_utils.py,70,function,"Return the bool value for `pred`, or None if `pred` had a dynamic value. Arguments: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. Returns: True or False if `pred` has a constant boolean value, None otherwise. Raises: TypeError: If `pred` is not a Variable, Tensor or bool, or Python integer 1 or 0." 6868,is_tensor_or_tensor_list,tensorflow/tensorflow/python/keras/utils/tf_utils.py,96,function, 6869,get_reachable_from_inputs,tensorflow/tensorflow/python/keras/utils/tf_utils.py,104,function,"Returns the set of tensors/ops reachable from `inputs`. Stops if all targets have been found (target is optional). Only valid in Symbolic mode, not Eager mode. Args: inputs: List of tensors. targets: List of tensors. Returns: A set of tensors reachable from the inputs (includes the inputs themselves)." 6870,map_structure_with_atomic,tensorflow/tensorflow/python/keras/utils/tf_utils.py,159,function,"Maps the atomic elements of a nested structure. Arguments: is_atomic_fn: A function that determines if an element of `nested` is atomic. map_fn: The function to apply to atomic elements of `nested`. nested: A nested structure. Returns: The nested structure, with atomic elements mapped according to `map_fn`. Raises: ValueError: If an element that is neither atomic nor a sequence is encountered." 6871,get_shapes,tensorflow/tensorflow/python/keras/utils/tf_utils.py,194,function,Gets shapes from tensors. 6872,convert_shapes,tensorflow/tensorflow/python/keras/utils/tf_utils.py,202,function,"Converts nested shape representations to desired format. Performs: TensorShapes -> tuples if `to_tuples=True`. tuples of int or None -> TensorShapes if `to_tuples=False`. Valid objects to be converted are: - TensorShapes - tuples with elements of type int or None. - ints - None Arguments: input_shape: A nested structure of objects to be converted to TensorShapes. to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts all tuples representing shapes to TensorShapes. Returns: Nested structure of shapes in desired format. Raises: ValueError: when the input tensor shape can't be converted to tuples, eg unknown tensor shape." 6873,ListWrapper,tensorflow/tensorflow/python/keras/utils/tf_utils.py,253,class,A wrapper for lists to be treated as elements for `nest`. 6874,convert_inner_node_data,tensorflow/tensorflow/python/keras/utils/tf_utils.py,263,function,"Either wraps or unwraps innermost node data lists in `ListWrapper` objects. Arguments: nested: A nested data structure. wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`, unwraps `ListWrapper` objects into lists. Returns: Structure of same type as nested, with lists wrapped/unwrapped." 6875,shape_type_conversion,tensorflow/tensorflow/python/keras/utils/tf_utils.py,308,function,"Decorator that handles tuple/TensorShape conversion. Used in `compute_output_shape` and `build`. Arguments: fn: function to wrap. Returns: Wrapped function." 6876,are_all_symbolic_tensors,tensorflow/tensorflow/python/keras/utils/tf_utils.py,334,function, 6877,is_symbolic_tensor,tensorflow/tensorflow/python/keras/utils/tf_utils.py,341,function,"Returns whether a tensor is symbolic (from a TF graph) or an eager tensor. A Variable can be seen as either: it is considered symbolic when we are in a graph scope, and eager when we are in an eager scope. Arguments: tensor: A tensor instance to test. Returns: True for symbolic tensors, False for eager tensors." 6878,register_symbolic_tensor_type,tensorflow/tensorflow/python/keras/utils/tf_utils.py,373,function,"Allows users to specify types regarded as symbolic `Tensor`s. Used in conjunction with `tf.register_tensor_conversion_function`, calling `tf.keras.utils.register_symbolic_tensor_type(cls)` allows non-`Tensor` objects to be plumbed through Keras layers. Example: ```python # One-time setup. class Foo(object): def __init__(self, input_): self._input = input_ def value(self): return tf.constant(42.) tf.register_tensor_conversion_function( Foo, lambda x, *args, **kwargs: x.value()) tf.keras.utils.register_symbolic_tensor_type(Foo) # User-land. layer = tf.keras.layers.Lambda(lambda input_: Foo(input_)) ``` Arguments: cls: A `class` type which shall be regarded as a symbolic `Tensor`." 6879,type_spec_from_value,tensorflow/tensorflow/python/keras/utils/tf_utils.py,406,function,Grab type_spec without converting array-likes to tensors. 6880,is_ragged,tensorflow/tensorflow/python/keras/utils/tf_utils.py,418,function,Returns true if `tensor` is a ragged tensor or ragged tensor value. 6881,is_tensor_or_variable,tensorflow/tensorflow/python/keras/utils/tf_utils.py,425,function, 6882,assert_no_legacy_layers,tensorflow/tensorflow/python/keras/utils/tf_utils.py,429,function,"Prevent tf.layers.Layers from being used with Keras. Certain legacy layers inherit from their keras analogs; however they are not supported with keras and can lead to subtle and hard to diagnose bugs. Args: layers: A list of layers to check Raises: TypeError: If any elements of layers are tf.layers.Layers" 6883,maybe_init_scope,tensorflow/tensorflow/python/keras/utils/tf_utils.py,455,function,"Open an `init_scope` if in V2 mode and using the keras graph. Arguments: layer: The Layer/Model that is currently active. Yields: None" 6884,graph_context_for_symbolic_tensors,tensorflow/tensorflow/python/keras/utils/tf_utils.py,474,function,Returns graph context manager if any of the inputs is a symbolic tensor. 6885,dataset_is_infinite,tensorflow/tensorflow/python/keras/utils/tf_utils.py,483,function,True if the passed dataset is infinite. 6886,get_tensor_spec,tensorflow/tensorflow/python/keras/utils/tf_utils.py,493,function,Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`. 6887,to_numpy_or_python_type,tensorflow/tensorflow/python/keras/utils/tf_utils.py,522,function,"Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types. For each tensor, it calls `tensor.numpy()`. If the result is a scalar value, it converts it to a Python type, such as a float or int, by calling `result.item()`. Numpy scalars are converted, as Python types are often more convenient to deal with. This is especially useful for bfloat16 Numpy scalars, which don't support as many operations as other Numpy values. Args: tensors: A structure of tensors. Returns: `tensors`, but scalar tensors are converted to Python types and non-scalar tensors are converted to Numpy arrays." 6888,_astuple,tensorflow/tensorflow/python/keras/utils/tf_utils.py,549,function,Converts the given attrs to tuple non-recursively. 6889,TestIsSymbolicTensor,tensorflow/tensorflow/python/keras/utils/tf_utils_test.py,40,class, 6890,ConvertInnerNodeDataTest,tensorflow/tensorflow/python/keras/utils/tf_utils_test.py,155,class, 6891,AttrsTest,tensorflow/tensorflow/python/keras/utils/tf_utils_test.py,167,class, 6892,TestIsRagged,tensorflow/tensorflow/python/keras/utils/tf_utils_test.py,186,class, 6893,ModelVersionSelector,tensorflow/tensorflow/python/keras/utils/version_utils.py,51,class,Chooses between Keras v1 and v2 Model class. 6894,LayerVersionSelector,tensorflow/tensorflow/python/keras/utils/version_utils.py,60,class,Chooses between Keras v1 and v2 Layer class. 6895,TensorBoardVersionSelector,tensorflow/tensorflow/python/keras/utils/version_utils.py,69,class,Chooses between Keras v1 and v2 TensorBoard callback class. 6896,should_use_v2,tensorflow/tensorflow/python/keras/utils/version_utils.py,84,function,Determine if v1 or v2 version should be used. 6897,swap_class,tensorflow/tensorflow/python/keras/utils/version_utils.py,98,function,Swaps in v2_cls or v1_cls depending on graph mode. 6898,disallow_legacy_graph,tensorflow/tensorflow/python/keras/utils/version_utils.py,114,function, 6899,is_v1_layer_or_model,tensorflow/tensorflow/python/keras/utils/version_utils.py,125,function, 6900,SplitUtilsTest,tensorflow/tensorflow/python/keras/utils/version_utils_test.py,37,class, 6901,check_pydot,tensorflow/tensorflow/python/keras/utils/vis_utils.py,44,function,Returns True if PyDot and Graphviz are available. 6902,is_wrapped_model,tensorflow/tensorflow/python/keras/utils/vis_utils.py,57,function, 6903,add_edge,tensorflow/tensorflow/python/keras/utils/vis_utils.py,64,function, 6904,model_to_dot,tensorflow/tensorflow/python/keras/utils/vis_utils.py,70,function,"Convert a Keras model to dot format. Arguments: model: A Keras model instance. show_shapes: whether to display shape information. show_dtype: whether to display layer dtypes. show_layer_names: whether to display layer names. rankdir: `rankdir` argument passed to PyDot, a string specifying the format of the plot: 'TB' creates a vertical plot; 'LR' creates a horizontal plot. expand_nested: whether to expand nested models into clusters. dpi: Dots per inch. subgraph: whether to return a `pydot.Cluster` instance. Returns: A `pydot.Dot` instance representing the Keras model or a `pydot.Cluster` instance representing nested model if `subgraph=True`. Raises: ImportError: if graphviz or pydot are not available." 6905,plot_model,tensorflow/tensorflow/python/keras/utils/vis_utils.py,281,function,"Converts a Keras model to dot format and save to a file. Example: ```python input = tf.keras.Input(shape=(100,), dtype='int32', name='input') x = tf.keras.layers.Embedding( output_dim=512, input_dim=10000, input_length=100)(input) x = tf.keras.layers.LSTM(32)(x) x = tf.keras.layers.Dense(64, activation='relu')(x) x = tf.keras.layers.Dense(64, activation='relu')(x) x = tf.keras.layers.Dense(64, activation='relu')(x) output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x) model = tf.keras.Model(inputs=[input], outputs=[output]) dot_img_file = '/tmp/model_1.png' tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True) ``` Arguments: model: A Keras model instance to_file: File name of the plot image. show_shapes: whether to display shape information. show_dtype: whether to display layer dtypes. show_layer_names: whether to display layer names. rankdir: `rankdir` argument passed to PyDot, a string specifying the format of the plot: 'TB' creates a vertical plot; 'LR' creates a horizontal plot. expand_nested: Whether to expand nested models into clusters. dpi: Dots per inch. Returns: A Jupyter notebook Image object if Jupyter is installed. This enables in-line display of the model plots in notebooks." 6906,ModelToDotFormatTest,tensorflow/tensorflow/python/keras/utils/vis_utils_test.py,28,class, 6907,BaseWrapper,tensorflow/tensorflow/python/keras/wrappers/scikit_learn.py,33,class,"Base class for the Keras scikit-learn wrapper. Warning: This class should not be used directly. Use descendant classes instead. Arguments: build_fn: callable function or class instance **sk_params: model parameters & fitting parameters The `build_fn` should construct, compile and return a Keras model, which will then be used to fit/predict. One of the following three values could be passed to `build_fn`: 1. A function 2. An instance of a class that implements the `__call__` method 3. None. This means you implement a class that inherits from either `KerasClassifier` or `KerasRegressor`. The `__call__` method of the present class will then be treated as the default `build_fn`. `sk_params` takes both model parameters and fitting parameters. Legal model parameters are the arguments of `build_fn`. Note that like all other estimators in scikit-learn, `build_fn` should provide default values for its arguments, so that you could create the estimator without passing any values to `sk_params`. `sk_params` could also accept parameters for calling `fit`, `predict`, `predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`). fitting (predicting) parameters are selected in the following order: 1. Values passed to the dictionary arguments of `fit`, `predict`, `predict_proba`, and `score` methods 2. Values passed to `sk_params` 3. The default values of the `keras.models.Sequential` `fit`, `predict`, `predict_proba` and `score` methods When using scikit-learn's `grid_search` API, legal tunable parameters are those you could pass to `sk_params`, including fitting parameters. In other words, you could use `grid_search` to search for the best `batch_size` or `epochs` as well as the model parameters." 6908,KerasClassifier,tensorflow/tensorflow/python/keras/wrappers/scikit_learn.py,191,class,"Implementation of the scikit-learn classifier API for Keras. " 6909,KerasRegressor,tensorflow/tensorflow/python/keras/wrappers/scikit_learn.py,314,class,"Implementation of the scikit-learn regressor API for Keras. " 6910,build_fn_clf,tensorflow/tensorflow/python/keras/wrappers/scikit_learn_test.py,37,function, 6911,assert_classification_works,tensorflow/tensorflow/python/keras/wrappers/scikit_learn_test.py,50,function, 6912,build_fn_reg,tensorflow/tensorflow/python/keras/wrappers/scikit_learn_test.py,73,function, 6913,assert_regression_works,tensorflow/tensorflow/python/keras/wrappers/scikit_learn_test.py,86,function, 6914,ScikitLearnAPIWrapperTest,tensorflow/tensorflow/python/keras/wrappers/scikit_learn_test.py,103,class, 6915,AckermannTest,tensorflow/tensorflow/python/kernel_tests/ackermann_test.py,28,class, 6916,AddNTest,tensorflow/tensorflow/python/kernel_tests/aggregate_ops_test.py,34,class, 6917,ArgMaxTest,tensorflow/tensorflow/python/kernel_tests/argmax_op_test.py,31,class, 6918,BatchMatrixTransposeTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,56,class, 6919,BooleanMaskTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,124,class, 6920,OperatorShapeTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,317,class, 6921,ReverseV2Test,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,349,class, 6922,MeshgridTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,525,class, 6923,StridedSliceChecker,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,566,class,Check a given tensor against the numpy result. 6924,StridedSliceTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,630,class,Test the strided slice operation with variants of slices. 6925,StridedSliceShapeChecker,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,851,class, 6926,StridedSliceShapeTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,861,class,Test the shape inference of StridedSliceShapes. 6927,GradSliceChecker,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,917,class,Tests that we can compute a gradient for var^2. 6928,StridedSliceGradTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,956,class,Test that strided slice's custom gradient produces correct gradients. 6929,StridedSliceGradTypeTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1005,class,Test varied index types and host located memory. 6930,BenchmarkSlice,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1057,class, 6931,StridedSliceBenchmark,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1066,class,Benchmark new strided slice operation on non-trivial case. 6932,StridedSliceAssignChecker,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1110,class, 6933,SliceAssignTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1145,class, 6934,ShapeSizeRankTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1251,class, 6935,SequenceMaskTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1287,class, 6936,ConcatSliceResourceTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1368,class, 6937,IdentityTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1382,class, 6938,PadTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1408,class, 6939,InvertPermutationTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1423,class, 6940,UnravelIndexTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1436,class, 6941,GuaranteeConstOpTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1470,class, 6942,SnapshotOpTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1506,class, 6943,QuantizeAndDequantizeTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1519,class, 6944,SortedSearchTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1566,class, 6945,BatchGatherNdTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,1785,class, 6946,RepeatTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,2000,class, 6947,TileVariantTest,tensorflow/tensorflow/python/kernel_tests/array_ops_test.py,2029,class, 6948,AsStringOpTest,tensorflow/tensorflow/python/kernel_tests/as_string_op_test.py,29,class, 6949,_upsample_filters,tensorflow/tensorflow/python/kernel_tests/atrous_conv2d_test.py,34,function,"Upsamples the filters by a factor of rate along the spatial dimensions. Args: filters: [h, w, in_depth, out_depth]. Original filters. rate: An int, specifying the upsampling rate. Returns: filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with h_up = h + (h - 1) * (rate - 1) w_up = w + (w - 1) * (rate - 1) containing (rate - 1) zeros between consecutive filter values along the filters' spatial dimensions." 6950,AtrousConv2DTest,tensorflow/tensorflow/python/kernel_tests/atrous_conv2d_test.py,60,class, 6951,AtrousConv2DTransposeTest,tensorflow/tensorflow/python/kernel_tests/atrous_conv2d_test.py,165,class, 6952,AtrousDepthwiseConv2DTest,tensorflow/tensorflow/python/kernel_tests/atrous_conv2d_test.py,204,class, 6953,upsample_filters,tensorflow/tensorflow/python/kernel_tests/atrous_convolution_test.py,36,function,"Upsamples the filters by a factor of rate along the spatial dimensions. Args: filters: spatial_shape + [in_channels, out_channels] Original filters. rate: A list of len(spatial_shape) positive ints, specifying the upsampling rate. Returns: filters_up: output_spatial_shape + [in_channels, out_channels]. Upsampled filters with output_spatial_shape[i] = (spatial_shape[i] - 1) * rate[i] + 1 containing (rate[i] - 1) zeros between consecutive filter values along spatial dimension i." 6954,AtrousConvolutionTest,tensorflow/tensorflow/python/kernel_tests/atrous_convolution_test.py,61,class, 6955,ExtractGlimpseTest,tensorflow/tensorflow/python/kernel_tests/attention_ops_test.py,31,class, 6956,BandedTriangularSolveOpTest,tensorflow/tensorflow/python/kernel_tests/banded_triangular_solve_op_test.py,29,class, 6957,BarrierTest,tensorflow/tensorflow/python/kernel_tests/barrier_ops_test.py,33,class, 6958,Base64OpsTest,tensorflow/tensorflow/python/kernel_tests/base64_ops_test.py,35,class, 6959,GPUBinaryOpsTest,tensorflow/tensorflow/python/kernel_tests/basic_gpu_test.py,40,class, 6960,MathBuiltinUnaryTest,tensorflow/tensorflow/python/kernel_tests/basic_gpu_test.py,92,class, 6961,BroadcastSimpleTest,tensorflow/tensorflow/python/kernel_tests/basic_gpu_test.py,156,class, 6962,GpuMultiSessionMemoryTest,tensorflow/tensorflow/python/kernel_tests/basic_gpu_test.py,236,class,Tests concurrent sessions executing on the same GPU. 6963,GatherTest,tensorflow/tensorflow/python/kernel_tests/batch_gather_op_test.py,34,class, 6964,GetRandomNormalInput,tensorflow/tensorflow/python/kernel_tests/batch_matmul_op_test.py,35,function, 6965,BatchMatmulOpTest,tensorflow/tensorflow/python/kernel_tests/batch_matmul_op_test.py,46,class, 6966,_GetBatchMatmulOpTest,tensorflow/tensorflow/python/kernel_tests/batch_matmul_op_test.py,131,function, 6967,_GetBatchMatmulOpBroadcastingTest,tensorflow/tensorflow/python/kernel_tests/batch_matmul_op_test.py,141,function, 6968,BatchMatmulGradientTest,tensorflow/tensorflow/python/kernel_tests/batch_matmul_op_test.py,151,class, 6969,_GetBatchMatmulGradientTest,tensorflow/tensorflow/python/kernel_tests/batch_matmul_op_test.py,183,function, 6970,_GetBatchMatmulGradientWithBroadcastingTest,tensorflow/tensorflow/python/kernel_tests/batch_matmul_op_test.py,195,function, 6971,BatchMatMulBenchmark,tensorflow/tensorflow/python/kernel_tests/batch_matmul_op_test.py,212,class, 6972,_AsType,tensorflow/tensorflow/python/kernel_tests/batch_scatter_ops_test.py,32,function, 6973,_NumpyUpdate,tensorflow/tensorflow/python/kernel_tests/batch_scatter_ops_test.py,36,function, 6974,ScatterTest,tensorflow/tensorflow/python/kernel_tests/batch_scatter_ops_test.py,47,class, 6975,PythonOpImpl,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,37,class, 6976,CppOpImpl,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,44,class, 6977,BatchToSpaceDepthToSpace,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,51,class, 6978,BatchToSpaceDepthToSpaceCpp,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,69,class, 6979,BatchToSpaceErrorHandlingTest,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,73,class, 6980,BatchToSpaceErrorHandlingCppTest,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,132,class, 6981,BatchToSpaceNDErrorHandlingTest,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,137,class, 6982,BatchToSpaceGradientTest,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,248,class, 6983,BatchToSpaceGradientCppTest,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,303,class, 6984,BatchToSpaceNDGradientTest,tensorflow/tensorflow/python/kernel_tests/batchtospace_op_test.py,307,class, 6985,BcastOpsTest,tensorflow/tensorflow/python/kernel_tests/bcast_ops_test.py,29,class, 6986,SomeRandomBenchmark,tensorflow/tensorflow/python/kernel_tests/benchmark_test.py,41,class,This Benchmark should automatically be registered in the registry. 6987,TestReportingBenchmark,tensorflow/tensorflow/python/kernel_tests/benchmark_test.py,57,class,This benchmark (maybe) reports some stuff. 6988,BenchmarkTest,tensorflow/tensorflow/python/kernel_tests/benchmark_test.py,84,class, 6989,BetaincTest,tensorflow/tensorflow/python/kernel_tests/betainc_op_test.py,36,class, 6990,BiasAddTestBase,tensorflow/tensorflow/python/kernel_tests/bias_op_base.py,39,class, 6991,BiasAddDeterministicTest,tensorflow/tensorflow/python/kernel_tests/bias_op_deterministic_test.py,39,class, 6992,BincountTest,tensorflow/tensorflow/python/kernel_tests/bincount_op_test.py,36,class, 6993,BincountOpTest,tensorflow/tensorflow/python/kernel_tests/bincount_op_test.py,140,class, 6994,SparseBincountOpTest,tensorflow/tensorflow/python/kernel_tests/bincount_op_test.py,334,class, 6995,RaggedBincountOpTest,tensorflow/tensorflow/python/kernel_tests/bincount_op_test.py,501,class, 6996,BitcastTest,tensorflow/tensorflow/python/kernel_tests/bitcast_op_test.py,29,class, 6997,BroadcastToTest,tensorflow/tensorflow/python/kernel_tests/broadcast_to_ops_test.py,32,class, 6998,BucketizationOpTest,tensorflow/tensorflow/python/kernel_tests/bucketize_op_test.py,32,class, 6999,RangeSamplerOpsTest,tensorflow/tensorflow/python/kernel_tests/candidate_sampler_ops_test.py,32,class, 7000,CastOpTest,tensorflow/tensorflow/python/kernel_tests/cast_op_test.py,34,class, 7001,SparseTensorCastTest,tensorflow/tensorflow/python/kernel_tests/cast_op_test.py,184,class, 7002,SaturateCastTest,tensorflow/tensorflow/python/kernel_tests/cast_op_test.py,199,class, 7003,AssertV2Asserts,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,45,class, 7004,AssertProperIterableTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,108,class, 7005,AssertEqualTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,155,class, 7006,AssertNoneEqualTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,319,class, 7007,AssertAllCloseTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,411,class, 7008,AssertLessTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,536,class, 7009,AssertLessEqualTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,614,class, 7010,AssertGreaterTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,685,class, 7011,AssertGreaterEqualTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,756,class, 7012,AssertNegativeTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,829,class, 7013,AssertPositiveTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,879,class, 7014,EnsureShapeTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,926,class, 7015,EnsureShapeBenchmark,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1023,class, 7016,AssertRankTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1092,class, 7017,AssertRankInTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1223,class, 7018,AssertRankAtLeastTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1341,class, 7019,AssertNonNegativeTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1433,class, 7020,AssertNonPositiveTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1469,class, 7021,AssertIntegerTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1505,class, 7022,AssertTypeTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1521,class, 7023,AssertShapesTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1561,class, 7024,IsStrictlyIncreasingTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1906,class, 7025,IsNonDecreasingTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1940,class, 7026,FloatDTypeTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,1973,class, 7027,AssertScalarTest,tensorflow/tensorflow/python/kernel_tests/check_ops_test.py,2027,class, 7028,GenerateVocabRemappingTest,tensorflow/tensorflow/python/kernel_tests/checkpoint_ops_test.py,40,class,Tests for the generate_vocab_remapping() method. 7029,LoadAndRemapMatrixTest,tensorflow/tensorflow/python/kernel_tests/checkpoint_ops_test.py,109,class,Tests for the load_and_remap_matrix() op. 7030,LoadAndRemapMatrixWithMaxRowsTest,tensorflow/tensorflow/python/kernel_tests/checkpoint_ops_test.py,297,class,"Tests for the load_and_remap_matrix() op. (Specifically focused on the max_rows_in_memory arg and its effects on TensorBundle's BundleReader and TensorSlice logic)." 7031,_GradWithInverseL,tensorflow/tensorflow/python/kernel_tests/cholesky_op_test.py,43,function, 7032,TriAngSolveCompositeGrad,tensorflow/tensorflow/python/kernel_tests/cholesky_op_test.py,54,function, 7033,MatrixInverseCompositeGrad,tensorflow/tensorflow/python/kernel_tests/cholesky_op_test.py,75,function, 7034,TriAngInvCompositeGrad,tensorflow/tensorflow/python/kernel_tests/cholesky_op_test.py,80,function, 7035,CholeskyOpTest,tensorflow/tensorflow/python/kernel_tests/cholesky_op_test.py,91,class, 7036,CholeskyGradTest,tensorflow/tensorflow/python/kernel_tests/cholesky_op_test.py,197,class, 7037,CholeskyBenchmark,tensorflow/tensorflow/python/kernel_tests/cholesky_op_test.py,300,class, 7038,ClipTest,tensorflow/tensorflow/python/kernel_tests/clip_ops_test.py,35,class, 7039,CompareAndBitpackTest,tensorflow/tensorflow/python/kernel_tests/compare_and_bitpack_op_test.py,27,class, 7040,ConcatOpTest,tensorflow/tensorflow/python/kernel_tests/concat_op_test.py,37,class, 7041,ConcatOffsetTest,tensorflow/tensorflow/python/kernel_tests/concat_op_test.py,645,class, 7042,CondV2Test,tensorflow/tensorflow/python/kernel_tests/cond_v2_test.py,50,class, 7043,CondV2CollectionTest,tensorflow/tensorflow/python/kernel_tests/cond_v2_test.py,1239,class, 7044,CondV2ContainerTest,tensorflow/tensorflow/python/kernel_tests/cond_v2_test.py,1296,class, 7045,CondV2ColocationGroupAndDeviceTest,tensorflow/tensorflow/python/kernel_tests/cond_v2_test.py,1381,class, 7046,_cond,tensorflow/tensorflow/python/kernel_tests/cond_v2_test.py,1525,function, 7047,_is_old_cond,tensorflow/tensorflow/python/kernel_tests/cond_v2_test.py,1532,function, 7048,_has_node_with_op,tensorflow/tensorflow/python/kernel_tests/cond_v2_test.py,1537,function,Whether any node in `run_metadata.partition_graphs` matches `op_type`. 7049,ConditionalAccumulatorTest,tensorflow/tensorflow/python/kernel_tests/conditional_accumulator_test.py,40,class, 7050,ConfusionMatrixTest,tensorflow/tensorflow/python/kernel_tests/confusion_matrix_test.py,34,class, 7051,RemoveSqueezableDimensionsTest,tensorflow/tensorflow/python/kernel_tests/confusion_matrix_test.py,247,class, 7052,ConstantTest,tensorflow/tensorflow/python/kernel_tests/constant_op_eager_test.py,38,class, 7053,AsTensorTest,tensorflow/tensorflow/python/kernel_tests/constant_op_eager_test.py,315,class, 7054,ZerosTest,tensorflow/tensorflow/python/kernel_tests/constant_op_eager_test.py,327,class, 7055,ZerosLikeTest,tensorflow/tensorflow/python/kernel_tests/constant_op_eager_test.py,390,class, 7056,OnesTest,tensorflow/tensorflow/python/kernel_tests/constant_op_eager_test.py,447,class, 7057,OnesLikeTest,tensorflow/tensorflow/python/kernel_tests/constant_op_eager_test.py,501,class, 7058,FillTest,tensorflow/tensorflow/python/kernel_tests/constant_op_eager_test.py,523,class, 7059,ConstantTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,43,class, 7060,AsTensorTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,292,class, 7061,IdentityOpTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,382,class, 7062,ZerosTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,394,class, 7063,ZerosLikeTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,460,class, 7064,OnesTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,566,class, 7065,OnesLikeTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,644,class, 7066,FillTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,676,class, 7067,PlaceholderTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,765,class, 7068,PlaceholderWithDefaultTest,tensorflow/tensorflow/python/kernel_tests/constant_op_test.py,951,class, 7069,check_consumers,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,90,function,Sanity check on the consumer list of the tensors. 7070,all_fetchables,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,104,function, 7071,all_feedables,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,114,function, 7072,opt_cfg,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,124,function, 7073,isum,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,134,function, 7074,enqueue_print_op,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,143,function,Enqueues an op that prints a message to be captured in the test. 7075,filter_test_messages,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,148,function,Returns a list of messages printed by enqueue_print_op. 7076,tf_function_in_tf2,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,154,function, 7077,ControlFlowTest,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,163,class, 7078,ControlFlowContextCheckTest,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,4647,class, 7079,TupleTest,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,4772,class, 7080,AssertTest,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,4859,class, 7081,WhileOpBenchmark,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,4903,class,Evaluate the performance of while_loop op. 7082,EagerTest,tensorflow/tensorflow/python/kernel_tests/control_flow_ops_py_test.py,5018,class, 7083,ControlFlowUtilTest,tensorflow/tensorflow/python/kernel_tests/control_flow_util_test.py,34,class, 7084,ControlFlowUtilV2Test,tensorflow/tensorflow/python/kernel_tests/control_flow_util_v2_test.py,31,class, 7085,Conv1DTest,tensorflow/tensorflow/python/kernel_tests/conv1d_test.py,30,class, 7086,Conv1DTransposeTest,tensorflow/tensorflow/python/kernel_tests/conv1d_transpose_test.py,33,class, 7087,Conv2DBackpropFilterGradTest,tensorflow/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py,33,class, 7088,Conv2DTransposeTest,tensorflow/tensorflow/python/kernel_tests/conv2d_transpose_test.py,37,class, 7089,Conv3DBackpropFilterV2GradTest,tensorflow/tensorflow/python/kernel_tests/conv3d_backprop_filter_v2_grad_test.py,33,class, 7090,Conv3DTransposeTest,tensorflow/tensorflow/python/kernel_tests/conv3d_transpose_test.py,33,class, 7091,GetTestConfigs,tensorflow/tensorflow/python/kernel_tests/conv_ops_3d_test.py,38,function,"Get all the valid tests configs to run. Returns: all the valid test configs as tuples of data_format and use_gpu." 7092,Conv3DTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_3d_test.py,51,class, 7093,GetShrunkInceptionShapes,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,52,function,"Iterator for smaller versions of convolution shapes in 2015 Inception. Relative to inception, each depth value is `depth // shrink`. Args: shrink: Factor to shrink each depth value by relative to Inception. Yields: Tuple (input_size, filter_size, out_size, stride, padding), the convolution parameters of Inception layers." 7094,GetTestConfigs,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,150,function,"Get all the valid tests configs to run. Returns: all the valid test configs as tuples of data_format and use_gpu." 7095,Conv2DTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,163,class, 7096,DepthwiseConv2DTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,2600,class, 7097,SeparableConv2DTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,2694,class, 7098,DeepConv2DTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,2877,class, 7099,Conv2DBenchmark,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,2924,class, 7100,GetInceptionFwdTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,3145,function, 7101,GetInceptionFwdDilatedConvTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,3160,function, 7102,GetInceptionBackInputTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,3177,function, 7103,GetInceptionBackFilterTest,tensorflow/tensorflow/python/kernel_tests/conv_ops_test.py,3194,function, 7104,CriticalSectionTest,tensorflow/tensorflow/python/kernel_tests/critical_section_test.py,43,class, 7105,CrossOpTest,tensorflow/tensorflow/python/kernel_tests/cross_grad_test.py,28,class, 7106,grouper,tensorflow/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py,34,function,Collect data into fixed-length chunks or blocks. 7107,flatten,tensorflow/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py,41,function,Flatten one level of nesting. 7108,CTCGreedyDecoderTest,tensorflow/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py,46,class, 7109,SimpleSparseTensorFrom,tensorflow/tensorflow/python/kernel_tests/ctc_loss_op_test.py,43,function,"Create a very simple SparseTensor with dimensions (batch, time). Args: x: a list of lists of type int Returns: x_ix and x_val, the indices and values of the SparseTensor<2>." 7110,_ctc_loss_v2,tensorflow/tensorflow/python/kernel_tests/ctc_loss_op_test.py,66,function,Call ctc_loss_v2 with v1 args. 7111,CTCLossTest,tensorflow/tensorflow/python/kernel_tests/ctc_loss_op_test.py,84,class, 7112,CTCLossTestV2,tensorflow/tensorflow/python/kernel_tests/ctc_loss_op_test.py,309,class, 7113,_ctc_loss_v3,tensorflow/tensorflow/python/kernel_tests/ctc_loss_op_test.py,943,function, 7114,CTCLossTestV3,tensorflow/tensorflow/python/kernel_tests/ctc_loss_op_test.py,959,class, 7115,ConvolutionTest,tensorflow/tensorflow/python/kernel_tests/cudnn_deterministic_base.py,66,class, 7116,CumulativeLogsumexpTest,tensorflow/tensorflow/python/kernel_tests/cumulative_logsumexp_test.py,32,class, 7117,_sparsify,tensorflow/tensorflow/python/kernel_tests/cwise_ops_binary_test.py,49,function, 7118,_default_tolerance,tensorflow/tensorflow/python/kernel_tests/cwise_ops_binary_test.py,61,function,"Returns a sensible default tolerance for comparing results of a given type. Args: dtype: A datatype." 7119,BinaryOpTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_binary_test.py,77,class, 7120,ComparisonOpTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_binary_test.py,823,class, 7121,_sparsify,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,56,function, 7122,_default_tolerance,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,68,function,"Returns a sensible default tolerance for comparing results of a given type. Args: dtype: A datatype." 7123,ComparisonOpTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,84,class, 7124,LogicalOpTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,226,class, 7125,SelectOpTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,332,class, 7126,BatchSelectOpTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,636,class,Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+. 7127,MinMaxOpTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,749,class, 7128,MathOpsOverloadTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,828,class, 7129,IsFiniteInfNanTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,920,class, 7130,RoundingTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,975,class, 7131,ComplexMakeRealImagTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,1021,class, 7132,PolyvalTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_test.py,1238,class, 7133,_sparsify,tensorflow/tensorflow/python/kernel_tests/cwise_ops_unary_test.py,46,function, 7134,_default_tolerance,tensorflow/tensorflow/python/kernel_tests/cwise_ops_unary_test.py,58,function,"Returns a sensible default tolerance for comparing results of a given type. Args: dtype: A datatype." 7135,UnaryOpTest,tensorflow/tensorflow/python/kernel_tests/cwise_ops_unary_test.py,74,class, 7136,DecodeBmpOpTest,tensorflow/tensorflow/python/kernel_tests/decode_bmp_op_test.py,28,class, 7137,DecodeCompressedOpTest,tensorflow/tensorflow/python/kernel_tests/decode_compressed_op_test.py,33,class, 7138,DecodeCSVOpTest,tensorflow/tensorflow/python/kernel_tests/decode_csv_op_test.py,30,class, 7139,DecodeImageOpTest,tensorflow/tensorflow/python/kernel_tests/decode_image_op_test.py,35,class, 7140,DecodeJpegBenchmark,tensorflow/tensorflow/python/kernel_tests/decode_jpeg_op_test.py,37,class,Evaluate tensorflow DecodeJpegOp performance. 7141,DecodePngOpTest,tensorflow/tensorflow/python/kernel_tests/decode_png_op_test.py,29,class, 7142,DecodeRawOpTest,tensorflow/tensorflow/python/kernel_tests/decode_raw_op_test.py,30,class, 7143,DenormalTest,tensorflow/tensorflow/python/kernel_tests/denormal_test.py,29,class, 7144,AssignOpTest,tensorflow/tensorflow/python/kernel_tests/dense_update_ops_no_tsan_test.py,31,class, 7145,AssignOpTest,tensorflow/tensorflow/python/kernel_tests/dense_update_ops_test.py,31,class, 7146,DepthToSpaceTest,tensorflow/tensorflow/python/kernel_tests/depthtospace_op_test.py,38,class, 7147,DepthToSpaceGradientTest,tensorflow/tensorflow/python/kernel_tests/depthtospace_op_test.py,337,class, 7148,_DepthwiseConv2dNumpyBasic,tensorflow/tensorflow/python/kernel_tests/depthwise_conv_op_test.py,37,function,"Compute depthwise_conv2d using Numpy. This allows use to test TensorFlow's depthwise_conv2d by comparing to the Numpy version. Args: x1: The input Numpy array, in NHWC format. x2: The filter Numpy array. strides: A Python list of 4 elements representing the strides. Returns: The depthwise conv2d output as a Numpy array." 7149,_DepthwiseConv2dNumpy,tensorflow/tensorflow/python/kernel_tests/depthwise_conv_op_test.py,74,function,"Compute depthwise_conv2d using Numpy. This allows use to test TensorFlow's depthwise_conv2d by comparing to the Numpy version. Unlike `_DepthwiseConv2dNumpyBasic`, this supports more advanced features like padding. Args: x1: The input Numpy array. x2: The filter Numpy array. strides: A Python list of 4 elements representing the strides. padding: The padding. ""SAME"", ""VALID"", or a list of explicit paddings. data_format: ""NHWC"" or ""NCHW"". dilations: A list of 2 elements, representing the dilations. Returns: The depthwise conv2d as a Numpy array." 7150,ConfigsToTest,tensorflow/tensorflow/python/kernel_tests/depthwise_conv_op_test.py,141,function,"Iterator for different convolution shapes, strides and paddings. Returns: List of tuples (input_size, filter_size, out_size, stride, padding, dilations), the depthwise convolution parameters." 7151,ConfigsToTestExplicit,tensorflow/tensorflow/python/kernel_tests/depthwise_conv_op_test.py,169,function,"Iterator for different convolution shapes, strides and explicit paddings. Returns: List of tuples (input_size, filter_size, out_size, stride, padding, dilations), the depthwise convolution parameters." 7152,CheckGradConfigsToTest,tensorflow/tensorflow/python/kernel_tests/depthwise_conv_op_test.py,206,function,"Iterator for different convolution shapes, strides and paddings. compute_gradient_error() is very expensive. So the configs should be relatively small. Returns: List of tuples (input_size, filter_size, out_size, stride, padding, dilations), the depthwise convolution parameters." 7153,CheckGradConfigsToTestExplicit,tensorflow/tensorflow/python/kernel_tests/depthwise_conv_op_test.py,234,function,"Iterator for different convolution shapes, strides and explicit paddings. compute_gradient_error() is very expensive. So the configs should be relatively small. Returns: List of tuples (input_size, filter_size, out_size, stride, padding, dilations), the depthwise convolution parameters." 7154,DepthwiseConv2DTest,tensorflow/tensorflow/python/kernel_tests/depthwise_conv_op_test.py,267,class, 7155,DeterminantOpTest,tensorflow/tensorflow/python/kernel_tests/determinant_op_test.py,36,class, 7156,MatrixDeterminantBenchmark,tensorflow/tensorflow/python/kernel_tests/determinant_op_test.py,166,class, 7157,zip_to_first_list_length,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,39,function, 7158,repack_diagonals,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,45,function, 7159,repack_diagonals_in_tests,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,82,function, 7160,square_cases,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,99,function, 7161,tall_cases,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,177,function, 7162,fat_cases,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,264,function, 7163,all_tests,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,323,function, 7164,MatrixDiagTest,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,327,class, 7165,MatrixSetDiagTest,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,580,class, 7166,MatrixDiagPartTest,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,774,class, 7167,DiagTest,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,930,class, 7168,DiagPartOpTest,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,1075,class, 7169,DiagGradOpTest,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,1156,class, 7170,DiagGradPartOpTest,tensorflow/tensorflow/python/kernel_tests/diag_op_test.py,1177,class, 7171,DivisionTestCase,tensorflow/tensorflow/python/kernel_tests/division_future_test.py,32,class, 7172,DivisionTestCase,tensorflow/tensorflow/python/kernel_tests/division_past_test.py,32,class, 7173,DrawBoundingBoxOpTest,tensorflow/tensorflow/python/kernel_tests/draw_bounding_box_op_test.py,32,class, 7174,DuplicateOpTest,tensorflow/tensorflow/python/kernel_tests/duplicate_op_test.py,29,class, 7175,DynamicPartitionTest,tensorflow/tensorflow/python/kernel_tests/dynamic_partition_op_test.py,36,class, 7176,DynamicStitchTestBase,tensorflow/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py,34,class, 7177,DynamicStitchTest,tensorflow/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py,227,class, 7178,ParallelDynamicStitchTest,tensorflow/tensorflow/python/kernel_tests/dynamic_stitch_op_test.py,234,class, 7179,ConstantOf,tensorflow/tensorflow/python/kernel_tests/edit_distance_op_test.py,30,function, 7180,EditDistanceTest,tensorflow/tensorflow/python/kernel_tests/edit_distance_op_test.py,38,class, 7181,_AddTest,tensorflow/tensorflow/python/kernel_tests/eig_op_test.py,35,function, 7182,EigTest,tensorflow/tensorflow/python/kernel_tests/eig_op_test.py,42,class, 7183,SortEigenValues,tensorflow/tensorflow/python/kernel_tests/eig_op_test.py,96,function, 7184,SortEigenDecomposition,tensorflow/tensorflow/python/kernel_tests/eig_op_test.py,101,function, 7185,EquilibrateEigenVectorPhases,tensorflow/tensorflow/python/kernel_tests/eig_op_test.py,108,function,"Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`. Eigenvectors are only unique up to an arbitrary phase. This function rotates x such that it matches y. Precondition: The columns of x and y differ by a multiplicative complex phase factor only. Args: x: `np.ndarray` with Eigenvectors y: `np.ndarray` with Eigenvectors Returns: `np.ndarray` containing an equilibrated version of x." 7186,_GetEigTest,tensorflow/tensorflow/python/kernel_tests/eig_op_test.py,127,function, 7187,EigGradTest,tensorflow/tensorflow/python/kernel_tests/eig_op_test.py,192,class, 7188,_GetEigGradTest,tensorflow/tensorflow/python/kernel_tests/eig_op_test.py,196,function, 7189,EinsumOpTest,tensorflow/tensorflow/python/kernel_tests/einsum_op_test.py,38,class, 7190,EinsumGradTest,tensorflow/tensorflow/python/kernel_tests/einsum_op_test.py,290,class, 7191,EinsumBenchmark,tensorflow/tensorflow/python/kernel_tests/einsum_op_test.py,397,class, 7192,_AsLong,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,49,function,Casts arrays elements to long type. Used to convert from numpy tf. 7193,ScatterAddSubTest,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,54,class, 7194,_PName,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,136,function, 7195,_EmbeddingParams,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,140,function, 7196,_EmbeddingParamsAsPartitionedVariable,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,170,function, 7197,_EmbeddingResult,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,188,function, 7198,EmbeddingLookupTest,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,245,class, 7199,EmbeddingLookupSparseTest,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,653,class, 7200,SafeEmbeddingLookupSparseTest,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,795,class, 7201,DynamicStitchOpTest,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,1033,class, 7202,ParallelDynamicStitchOpTest,tensorflow/tensorflow/python/kernel_tests/embedding_ops_test.py,1120,class, 7203,ExtractImagePatchesGradTest,tensorflow/tensorflow/python/kernel_tests/extract_image_patches_grad_test.py,34,class,Gradient-checking for ExtractImagePatches op. 7204,ExtractImagePatches,tensorflow/tensorflow/python/kernel_tests/extract_image_patches_op_test.py,28,class,Functional tests for ExtractImagePatches op. 7205,ExtractVolumePatchesGradTest,tensorflow/tensorflow/python/kernel_tests/extract_volume_patches_grad_test.py,35,class,Gradient-checking for ExtractVolumePatches op. 7206,ExtractVolumePatches,tensorflow/tensorflow/python/kernel_tests/extract_volume_patches_op_test.py,27,class,Functional tests for ExtractVolumePatches op. 7207,FIFOQueueTest,tensorflow/tensorflow/python/kernel_tests/fifo_queue_test.py,48,class, 7208,GPUCompatibleFIFOQueueTests,tensorflow/tensorflow/python/kernel_tests/fifo_queue_test.py,403,class, 7209,UnconvertedFIFOQueueTests,tensorflow/tensorflow/python/kernel_tests/fifo_queue_test.py,435,class, 7210,FIFOQueueParallelTests,tensorflow/tensorflow/python/kernel_tests/fifo_queue_test.py,771,class, 7211,FIFOQueueDictTest,tensorflow/tensorflow/python/kernel_tests/fifo_queue_test.py,1640,class, 7212,FIFOQueueWithTimeoutTest,tensorflow/tensorflow/python/kernel_tests/fifo_queue_test.py,1694,class, 7213,QueueContainerTest,tensorflow/tensorflow/python/kernel_tests/fifo_queue_test.py,1728,class, 7214,FIFOQueueBenchmark,tensorflow/tensorflow/python/kernel_tests/fifo_queue_test.py,1738,class,Benchmark FIFOQueue operations. 7215,FingerprintTest,tensorflow/tensorflow/python/kernel_tests/fingerprint_op_test.py,29,class, 7216,FractionalAvgTest,tensorflow/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py,36,class, 7217,FractionalAvgPoolGradTest,tensorflow/tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py,314,class,"Tests for FractionalAvgPoolGrad. Two types of tests for FractionalAvgPoolGrad. 1) Test fractional_avg_pool_grad() directly. This type of test relies on gen_nn_ops.avg_pool_grad() returns the correct result. For example: * input_tensor_shape = (1, 10, 10, 1) * window_size = (1, 2, 2, 1) * stride_size = (1, 2, 2, 1) * padding: not really important, since 10/2 is divisible avg pooling should generate the same result as fractional avg pooling with: * row_sequence = [0, 2, 4, 6, 8, 10] * col_sequence = [0, 2, 4, 6, 8, 10] * overlapping = False This also means their gradients in such case will be the same. Similarly, when * input_tensor_shape = (1, 7, 7, 1) * window_size = (1, 3, 3, 1) * stride_size = (1, 2, 2, 1) * padding: not important avg pooling should generate the same result as fractional avg pooling with: * row_sequence = [0, 2, 4, 7] * col_sequence = [0, 2, 4, 7] * overlapping = True 2) Test through compute_gradient_error()" 7218,FractionalMaxPoolTest,tensorflow/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py,36,class, 7219,FractionalMaxPoolGradTest,tensorflow/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py,311,class,"Tests for FractionalMaxPoolGrad. Two types of tests for FractionalMaxPoolGrad. 1) Test fractional_max_pool_grad() directly. This type of test relies on gen_nn_ops.max_pool_grad() returns the correct result. For example: * input_tensor_shape = (1, 10, 10, 1) * window_size = (1, 2, 2, 1) * stride_size = (1, 2, 2, 1) * padding: not really import, since 10/2 is divisible max pooling should generate the same result as fractional max pooling with: * row_sequence = [0, 2, 4, 6, 8, 10] * col_sequence = [0, 2, 4, 6, 8, 10] * overlapping = False This also means their gradients in such case will be the same. Similarly, when * input_tensor_shape = (1, 7, 7, 1) * window_size = (1, 3, 3, 1) * stride_size = (1, 2, 2, 1) * padding: not important max pooling should generate the same result as fractional max pooling with: * row_sequence = [0, 2, 4, 7] * col_sequence = [0, 2, 4, 7] * overlapping = True 2) Test through compute_gradient_error()" 7220,simple_scoped_fn,tensorflow/tensorflow/python/kernel_tests/functional_ops_test.py,50,function,"Simple function: (a, x) -> 2(x+a), but with ""2"" as a variable in scope." 7221,FunctionalOpsTest,tensorflow/tensorflow/python/kernel_tests/functional_ops_test.py,62,class, 7222,PartitionedCallTest,tensorflow/tensorflow/python/kernel_tests/functional_ops_test.py,972,class, 7223,FunctionalOpsCaseTest,tensorflow/tensorflow/python/kernel_tests/functional_ops_test.py,1178,class, 7224,NoReferenceCycleTests,tensorflow/tensorflow/python/kernel_tests/garbage_collection_test.py,32,class, 7225,GatherNdTest,tensorflow/tensorflow/python/kernel_tests/gather_nd_op_test.py,39,class, 7226,GatherNdOpBenchmark,tensorflow/tensorflow/python/kernel_tests/gather_nd_op_test.py,387,class, 7227,_to_str_elements,tensorflow/tensorflow/python/kernel_tests/gather_op_test.py,43,function,Converts the inner list elements to strings. 7228,GatherTest,tensorflow/tensorflow/python/kernel_tests/gather_op_test.py,51,class, 7229,GradientCorrectnessTest,tensorflow/tensorflow/python/kernel_tests/gradient_correctness_test.py,31,class, 7230,SliceTest,tensorflow/tensorflow/python/kernel_tests/huge_slice_op_test.py,29,class, 7231,IdentityNOpTest,tensorflow/tensorflow/python/kernel_tests/identity_n_op_py_test.py,29,class, 7232,IdentityOpTest,tensorflow/tensorflow/python/kernel_tests/identity_op_py_test.py,33,class, 7233,InTopKTest,tensorflow/tensorflow/python/kernel_tests/in_topk_op_test.py,28,class, 7234,identicaltest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,43,function,"Tests if two initializations are identical to within tiny tolerances. Args: tc: An instance of TensorFlowTestCase. init1: An Initializer that generates a tensor of a given shape init2: An Initializer that generates a tensor of a given shape shape: Shape of the tensor to initialize or `None` to use a vector of length 100. Returns: True or False as determined by test." 7235,duplicated_initializer,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,65,function,"Tests duplicated random initializer within the same graph. This test generates two random kernels from the same initializer to the same graph, and checks if the results are close enough. Even given the same global, seed, two different instances of random kernels should generate different results. Args: tc: An instance of TensorFlowTestCase. init: An Initializer that generates a tensor of a given shape graph_seed: A graph-level seed to use. shape: Shape of the tensor to initialize or `None` to use a vector of length 100. Returns: True or False as determined by test." 7236,_init_sampler,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,92,function,"Returns a func to generate a random tensor of shape [num]. Args: tc: An instance of TensorFlowTestCase. init: An Initializer that generates a tensor of a given shape num: Size of 1D tensor to create. Returns: Function to generate a random tensor." 7237,ConstantInitializersTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,111,class, 7238,RandomNormalInitializationTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,257,class, 7239,TruncatedNormalInitializationTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,287,class, 7240,RandomUniformInitializationTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,321,class, 7241,UniformUnitScalingInitializationTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,343,class, 7242,VarianceScalingInitializationTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,391,class, 7243,RangeTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,461,class, 7244,LinSpaceTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,547,class, 7245,LinSpaceNdTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,617,class, 7246,DeviceTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,761,class, 7247,OrthogonalInitializerTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,777,class, 7248,ConvolutionDeltaOrthogonalInitializerTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,838,class, 7249,ConvolutionOrthogonal1dInitializerTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,948,class, 7250,ConvolutionOrthogonal2dInitializerTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,1073,class, 7251,ConvolutionOrthogonal3dInitializerTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,1177,class, 7252,IdentityInitializerTest,tensorflow/tensorflow/python/kernel_tests/init_ops_test.py,1310,class, 7253,InplaceOpsTest,tensorflow/tensorflow/python/kernel_tests/inplace_ops_test.py,32,class, 7254,InvalidOpTest,tensorflow/tensorflow/python/kernel_tests/invalid_op_test.py,28,class, 7255,IoOpsTest,tensorflow/tensorflow/python/kernel_tests/io_ops_test.py,32,class, 7256,LargeConcatOpTest,tensorflow/tensorflow/python/kernel_tests/large_concat_op_test.py,26,class,"Tests that belong in concat_op_test.py, but run over large tensors." 7257,_AddTest,tensorflow/tensorflow/python/kernel_tests/linalg_grad_test.py,34,function, 7258,ShapeTest,tensorflow/tensorflow/python/kernel_tests/linalg_grad_test.py,41,class, 7259,MatrixUnaryFunctorGradientTest,tensorflow/tensorflow/python/kernel_tests/linalg_grad_test.py,58,class, 7260,_GetMatrixUnaryFunctorGradientTest,tensorflow/tensorflow/python/kernel_tests/linalg_grad_test.py,62,function, 7261,MatrixBinaryFunctorGradientTest,tensorflow/tensorflow/python/kernel_tests/linalg_grad_test.py,94,class, 7262,_GetMatrixBinaryFunctorGradientTest,tensorflow/tensorflow/python/kernel_tests/linalg_grad_test.py,98,function, 7263,_GetBandedTriangularSolveGradientTest,tensorflow/tensorflow/python/kernel_tests/linalg_grad_test.py,135,function, 7264,_AddTest,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,38,function, 7265,_RandomPDMatrix,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,45,function,Random positive definite matrix. 7266,CholeskySolveTest,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,53,class, 7267,LogdetTest,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,75,class, 7268,SlogdetTest,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,107,class, 7269,AdjointTest,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,139,class, 7270,EyeTest,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,154,class, 7271,_MatrixRankTest,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,273,class, 7272,MatrixRankStatic32Test,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,325,class, 7273,MatrixRankDynamic64Test,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,331,class, 7274,_PinvTest,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,336,class, 7275,PinvTestDynamic32DefaultRcond,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,388,class, 7276,PinvTestStatic64DefaultRcond,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,395,class, 7277,PinvTestDynamic32CustomtRcond,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,402,class, 7278,PinvTestStatic64CustomRcond,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,409,class, 7279,make_tensor_hiding_attributes,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,415,function, 7280,_LUReconstruct,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,423,class, 7281,LUReconstructStatic,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,456,class, 7282,LUReconstructDynamic,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,461,class, 7283,_LUMatrixInverse,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,465,class, 7284,LUMatrixInverseStatic,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,500,class, 7285,LUMatrixInverseDynamic,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,505,class, 7286,_LUSolve,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,509,class, 7287,LUSolveStatic,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,556,class, 7288,LUSolveDynamic,tensorflow/tensorflow/python/kernel_tests/linalg_ops_test.py,561,class, 7289,ListOpsTest,tensorflow/tensorflow/python/kernel_tests/list_ops_test.py,50,class, 7290,ListDiffTest,tensorflow/tensorflow/python/kernel_tests/listdiff_op_test.py,35,class, 7291,PrintV2LoggingLevelTest,tensorflow/tensorflow/python/kernel_tests/logging_ops_logging_level_test.py,30,class, 7292,LoggingOpsTest,tensorflow/tensorflow/python/kernel_tests/logging_ops_test.py,42,class, 7293,PrintV2Test,tensorflow/tensorflow/python/kernel_tests/logging_ops_test.py,73,class, 7294,PrintGradientTest,tensorflow/tensorflow/python/kernel_tests/logging_ops_test.py,365,class, 7295,BaseLookupTableTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,57,class, 7296,StaticHashTableTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,76,class, 7297,KeyValueTensorInitializerTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,454,class, 7298,DatasetInitializerTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,493,class, 7299,InitializeTableFromFileOpTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,581,class, 7300,StaticVocabularyTableTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,911,class, 7301,DenseHashTableOpTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,1138,class, 7302,IndexTableFromFile,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,1913,class, 7303,IndexTableFromTensor,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,2177,class, 7304,IndexToStringTableFromFileTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,2268,class, 7305,IndexToStringTableFromTensorTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,2381,class, 7306,IdTableWithHashBucketsTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,2425,class, 7307,MutableHashTableOpTest,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,2809,class, 7308,MutableHashTableBenchmark,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,3405,class, 7309,DenseHashTableBenchmark,tensorflow/tensorflow/python/kernel_tests/lookup_ops_test.py,3453,class, 7310,AbsoluteDifferenceLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,42,class, 7311,SoftmaxCrossEntropyLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,117,class, 7312,SparseSoftmaxCrossEntropyLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,231,class, 7313,SigmoidCrossEntropyLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,496,class, 7314,LogLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,655,class, 7315,HingeLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,828,class, 7316,HuberLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,866,class, 7317,MeanSquaredErrorTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,926,class, 7318,MeanPairwiseSquaredErrorTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,1006,class, 7319,CosineDistanceLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,1235,class, 7320,AddLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,1345,class, 7321,ComputeWeightedLossTest,tensorflow/tensorflow/python/kernel_tests/losses_test.py,1359,class, 7322,LRNOpTest,tensorflow/tensorflow/python/kernel_tests/lrn_op_test.py,36,class, 7323,LuOpTest,tensorflow/tensorflow/python/kernel_tests/lu_op_test.py,39,class, 7324,LuBenchmark,tensorflow/tensorflow/python/kernel_tests/lu_op_test.py,238,class, 7325,RollTest,tensorflow/tensorflow/python/kernel_tests/manip_ops_test.py,41,class, 7326,simple_scoped_fn,tensorflow/tensorflow/python/kernel_tests/map_fn_test.py,41,function,"Simple function: (a, x) -> 2(x+a), but with ""2"" as a variable in scope." 7327,MapFnTest,tensorflow/tensorflow/python/kernel_tests/map_fn_test.py,53,class, 7328,MapStageTest,tensorflow/tensorflow/python/kernel_tests/map_stage_op_test.py,31,class, 7329,MatVecTest,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,40,class,"Simple test for matvec, which is sugar on top of matmul." 7330,_AddTest,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,51,function, 7331,_GetTransposedMatrices,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,58,function, 7332,MatMulTest,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,67,class, 7333,_GetMatMulTest,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,71,function, 7334,MatMulGradientTest,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,111,class, 7335,_GetMatMulGradientTest,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,115,function, 7336,MatMulStatsTest,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,147,class, 7337,infix_matmul,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,179,function, 7338,MatMulInfixOperatorTest,tensorflow/tensorflow/python/kernel_tests/matmul_op_test.py,195,class, 7339,_AddTest,tensorflow/tensorflow/python/kernel_tests/matrix_band_part_op_test.py,35,function, 7340,MatrixBandPartTest,tensorflow/tensorflow/python/kernel_tests/matrix_band_part_op_test.py,42,class, 7341,_GetMatrixBandPartTest,tensorflow/tensorflow/python/kernel_tests/matrix_band_part_op_test.py,46,function, 7342,MatrixBandPartGradTest,tensorflow/tensorflow/python/kernel_tests/matrix_band_part_op_test.py,72,class, 7343,_GetMatrixBandPartGradTest,tensorflow/tensorflow/python/kernel_tests/matrix_band_part_op_test.py,76,function, 7344,MatrixBandPartBenchmark,tensorflow/tensorflow/python/kernel_tests/matrix_band_part_op_test.py,93,class, 7345,np_expm,tensorflow/tensorflow/python/kernel_tests/matrix_exponential_op_test.py,38,function,Slow but accurate Taylor series matrix exponential. 7346,ExponentialOpTest,tensorflow/tensorflow/python/kernel_tests/matrix_exponential_op_test.py,50,class, 7347,MatrixExponentialBenchmark,tensorflow/tensorflow/python/kernel_tests/matrix_exponential_op_test.py,163,class, 7348,_TestRandomSmall,tensorflow/tensorflow/python/kernel_tests/matrix_exponential_op_test.py,216,function, 7349,_TestL1Norms,tensorflow/tensorflow/python/kernel_tests/matrix_exponential_op_test.py,227,function, 7350,InverseOpTest,tensorflow/tensorflow/python/kernel_tests/matrix_inverse_op_test.py,36,class, 7351,MatrixInverseBenchmark,tensorflow/tensorflow/python/kernel_tests/matrix_inverse_op_test.py,156,class, 7352,LogarithmOpTest,tensorflow/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py,39,class, 7353,MatrixLogarithmBenchmark,tensorflow/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py,148,class, 7354,_AddTest,tensorflow/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py,39,function, 7355,_GenerateTestData,tensorflow/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py,46,function, 7356,_SolveWithNumpy,tensorflow/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py,61,function, 7357,MatrixSolveLsOpTest,tensorflow/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py,80,class, 7358,_GetSmallMatrixSolveLsOpTests,tensorflow/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py,186,function, 7359,_GetLargeMatrixSolveLsOpTests,tensorflow/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py,233,function, 7360,MatrixSolveLsBenchmark,tensorflow/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py,289,class, 7361,MatrixSolveOpTest,tensorflow/tensorflow/python/kernel_tests/matrix_solve_op_test.py,39,class, 7362,MatrixSolveBenchmark,tensorflow/tensorflow/python/kernel_tests/matrix_solve_op_test.py,145,class, 7363,SquareRootOpTest,tensorflow/tensorflow/python/kernel_tests/matrix_square_root_op_test.py,32,class, 7364,MatrixTriangularSolveOpTest,tensorflow/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py,29,class, 7365,_enqueue_vector,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,46,function, 7366,_binary_2d_label_to_2d_sparse_value,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,55,function,"Convert dense 2D binary indicator to sparse ID. Only 1 values in `labels` are included in result. Args: labels: Dense 2D binary indicator, shape [batch_size, num_classes]. Returns: `SparseTensorValue` of shape [batch_size, num_classes], where num_classes is the number of `1` values in each row of `labels`. Values are indices of `1` values along the last dimension of `labels`." 7367,_binary_2d_label_to_1d_sparse_value,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,89,function,"Convert dense 2D binary indicator to sparse ID. Only 1 values in `labels` are included in result. Args: labels: Dense 2D binary indicator, shape [batch_size, num_classes]. Each row must contain exactly 1 `1` value. Returns: `SparseTensorValue` of shape [batch_size]. Values are indices of `1` values along the last dimension of `labels`. Raises: ValueError: if there is not exactly 1 `1` value per row of `labels`." 7368,_binary_3d_label_to_sparse_value,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,128,function,"Convert dense 3D binary indicator tensor to sparse tensor. Only 1 values in `labels` are included in result. Args: labels: Dense 2D binary indicator tensor. Returns: `SparseTensorValue` whose values are indices along the last dimension of `labels`." 7369,_assert_nan,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,158,function, 7370,_assert_metric_variables,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,162,function, 7371,_test_values,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,170,function, 7372,MeanTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,174,class, 7373,MeanTensorTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,354,class, 7374,AccuracyTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,555,class, 7375,PrecisionTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,752,class, 7376,RecallTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,951,class, 7377,AUCTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,1085,class, 7378,SpecificityAtSensitivityTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,1437,class, 7379,SensitivityAtSpecificityTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,1583,class, 7380,PrecisionRecallThresholdsTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,1710,class, 7381,_test_precision_at_k,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2018,function, 7382,_test_precision_at_top_k,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2049,function, 7383,_test_average_precision_at_k,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2081,function, 7384,SingleLabelPrecisionAtKTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2108,class, 7385,MultiLabelPrecisionAtKTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2156,class, 7386,_test_recall_at_k,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2528,function, 7387,_test_recall_at_top_k,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2559,function, 7388,SingleLabelRecallAtKTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2591,class, 7389,MultiLabel2dRecallAtKTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2717,class, 7390,MultiLabel3dRecallAtKTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2814,class, 7391,MeanAbsoluteErrorTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,2960,class, 7392,MeanRelativeErrorTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,3024,class, 7393,MeanSquaredErrorTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,3115,class, 7394,RootMeanSquaredErrorTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,3301,class, 7395,_reweight,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,3396,function, 7396,MeanCosineDistanceTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,3401,class, 7397,PcntBelowThreshTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,3551,class, 7398,MeanIOUTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,3625,class, 7399,MeanPerClassAccuracyTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,3944,class, 7400,FalseNegativesTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,4180,class, 7401,FalseNegativesAtThresholdsTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,4233,class, 7402,FalsePositivesTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,4285,class, 7403,FalsePositivesAtThresholdsTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,4338,class, 7404,TrueNegativesTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,4392,class, 7405,TrueNegativesAtThresholdsTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,4445,class, 7406,TruePositivesTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,4497,class, 7407,TruePositivesAtThresholdsTest,tensorflow/tensorflow/python/kernel_tests/metrics_test.py,4550,class, 7408,DilationTest,tensorflow/tensorflow/python/kernel_tests/morphological_ops_test.py,31,class, 7409,ErosionTest,tensorflow/tensorflow/python/kernel_tests/morphological_ops_test.py,307,class, 7410,ConfigsToTest,tensorflow/tensorflow/python/kernel_tests/neon_depthwise_conv_op_test.py,32,function,"Iterator for different convolution shapes, strides and paddings. Yields: Tuple (input_size, filter_size, out_size, stride, padding), the depthwise convolution parameters." 7411,CheckGradConfigsToTest,tensorflow/tensorflow/python/kernel_tests/neon_depthwise_conv_op_test.py,56,function,"Iterator for different convolution shapes, strides and paddings. compute_gradient_error() is very expensive. So the configs should be relatively small. Yields: Tuple (input_size, filter_size, out_size, stride, padding), the depthwise convolution parameters." 7412,DepthwiseConv2DTest,tensorflow/tensorflow/python/kernel_tests/neon_depthwise_conv_op_test.py,83,class, 7413,_AddTest,tensorflow/tensorflow/python/kernel_tests/norm_op_test.py,30,function, 7414,NormOpTest,tensorflow/tensorflow/python/kernel_tests/norm_op_test.py,37,class, 7415,_GetNormOpTest,tensorflow/tensorflow/python/kernel_tests/norm_op_test.py,67,function, 7416,_AddTest,tensorflow/tensorflow/python/kernel_tests/normalize_op_test.py,28,function, 7417,_Normalize,tensorflow/tensorflow/python/kernel_tests/normalize_op_test.py,36,function, 7418,NormalizeOpTest,tensorflow/tensorflow/python/kernel_tests/normalize_op_test.py,57,class, 7419,_GetNormalizeOpTest,tensorflow/tensorflow/python/kernel_tests/normalize_op_test.py,61,function, 7420,NthElementTest,tensorflow/tensorflow/python/kernel_tests/nth_element_op_test.py,32,class, 7421,VerifyTensorAllFiniteTest,tensorflow/tensorflow/python/kernel_tests/numerics_test.py,34,class, 7422,NumericsTest,tensorflow/tensorflow/python/kernel_tests/numerics_test.py,68,class, 7423,OneHotTest,tensorflow/tensorflow/python/kernel_tests/one_hot_op_test.py,29,class, 7424,PadOpTest,tensorflow/tensorflow/python/kernel_tests/pad_op_test.py,32,class, 7425,PaddingFIFOQueueTest,tensorflow/tensorflow/python/kernel_tests/padding_fifo_queue_test.py,39,class, 7426,QueueFromListTest,tensorflow/tensorflow/python/kernel_tests/padding_fifo_queue_test.py,1598,class, 7427,empty_sparse,tensorflow/tensorflow/python/kernel_tests/parse_single_example_op_test.py,50,function, 7428,flatten,tensorflow/tensorflow/python/kernel_tests/parse_single_example_op_test.py,57,function,Flatten one level of nesting. 7429,flatten_values_tensors_or_sparse,tensorflow/tensorflow/python/kernel_tests/parse_single_example_op_test.py,62,function,Flatten each SparseTensor object into 3 Tensors for session.run(). 7430,_compare_output_to_expected,tensorflow/tensorflow/python/kernel_tests/parse_single_example_op_test.py,69,function, 7431,ParseExampleTest,tensorflow/tensorflow/python/kernel_tests/parse_single_example_op_test.py,90,class, 7432,ParseSingleExampleTest,tensorflow/tensorflow/python/kernel_tests/parse_single_example_op_test.py,850,class, 7433,flatten,tensorflow/tensorflow/python/kernel_tests/parsing_ops_test.py,61,function,Flatten one level of nesting. 7434,_compare_output_to_expected,tensorflow/tensorflow/python/kernel_tests/parsing_ops_test.py,66,function, 7435,ParseExampleTest,tensorflow/tensorflow/python/kernel_tests/parsing_ops_test.py,82,class, 7436,ParseSingleExampleTest,tensorflow/tensorflow/python/kernel_tests/parsing_ops_test.py,1180,class, 7437,ParseSequenceExampleTest,tensorflow/tensorflow/python/kernel_tests/parsing_ops_test.py,1419,class, 7438,DecodeRawTest,tensorflow/tensorflow/python/kernel_tests/parsing_ops_test.py,2291,class, 7439,DecodeJSONExampleTest,tensorflow/tensorflow/python/kernel_tests/parsing_ops_test.py,2362,class, 7440,ParseTensorOpTest,tensorflow/tensorflow/python/kernel_tests/parsing_ops_test.py,2449,class, 7441,PartitionerCreatorsTest,tensorflow/tensorflow/python/kernel_tests/partitioned_variables_test.py,40,class, 7442,_IotaInitializer,tensorflow/tensorflow/python/kernel_tests/partitioned_variables_test.py,310,function, 7443,PartitionedVariablesTestCase,tensorflow/tensorflow/python/kernel_tests/partitioned_variables_test.py,319,class, 7444,pool_direct_single_axis,tensorflow/tensorflow/python/kernel_tests/pool_test.py,34,function,"Numpy implementation of pooling along a single axis. This is intended for testing only, and therefore isn't particularly efficient. See pool_direct below for the meaning of the arguments. Args: input: numpy array. axis: axis along which to perform pooling. window_size: int >= 1. Size of pooling window within axis. pooling_type: either ""MAX"" or ""AVG"". padding: either ""SAME"" or ""VALID"". dilation_rate: int >= 1. Dilation factor for window, i.e. stride at which to sample input. stride: int >= 1. Stride at which to generate output. Returns: pooling output array of rank N+2. Raises: ValueError: if arguments are invalid." 7445,pool_direct,tensorflow/tensorflow/python/kernel_tests/pool_test.py,99,function,"Numpy implementation of pooling. This is intended for testing only, and therefore isn't particularly efficient. See tensorflow.nn.pool. Args: input: numpy array of rank N+2. window_shape: Sequence of N ints >= 1. pooling_type: either ""MAX"" or ""AVG"". padding: either ""SAME"" or ""VALID"". dilation_rate: Sequence of N ints >= 1. strides: Sequence of N ints >= 1. data_format: If specified and starts with ""NC"", indicates that second dimension, rather than the last dimension, specifies the channel. Returns: pooling output array of rank N+2. Raises: ValueError: if arguments are invalid." 7446,PoolingTest,tensorflow/tensorflow/python/kernel_tests/pool_test.py,146,class, 7447,GetTestConfigs,tensorflow/tensorflow/python/kernel_tests/pooling_ops_3d_test.py,32,function,"Get all the valid tests configs to run. Returns: all the valid test configs as tuples of data_format and use_gpu." 7448,PoolingTest,tensorflow/tensorflow/python/kernel_tests/pooling_ops_3d_test.py,46,class, 7449,GetDeviceScope,tensorflow/tensorflow/python/kernel_tests/pooling_ops_test.py,44,function, 7450,GetTestConfigs,tensorflow/tensorflow/python/kernel_tests/pooling_ops_test.py,53,function,"Get all the valid tests configs to run. Args: include_nchw_vect_c: Whether to include NCHW_VECT_C in the test configs. Returns: all the valid test configs as tuples of data_format and use_gpu." 7451,GetShrunkInceptionMaxPoolShapes,tensorflow/tensorflow/python/kernel_tests/pooling_ops_test.py,80,function,"Iterator for some of the max pool ops in the Inception 2015 model. Args: shrink: Factor to shrink depth relative to Inception. Yields: Tuple (name, input_size, filter_size, out_size, strides, padding)" 7452,PoolingTest,tensorflow/tensorflow/python/kernel_tests/pooling_ops_test.py,107,class, 7453,GetMaxPoolFwdTest,tensorflow/tensorflow/python/kernel_tests/pooling_ops_test.py,1960,function, 7454,GetMaxPoolGradTest,tensorflow/tensorflow/python/kernel_tests/pooling_ops_test.py,1971,function, 7455,GetMaxPoolGradGradTest,tensorflow/tensorflow/python/kernel_tests/pooling_ops_test.py,1983,function, 7456,PriorityQueueTest,tensorflow/tensorflow/python/kernel_tests/priority_queue_test.py,39,class, 7457,np_func,tensorflow/tensorflow/python/kernel_tests/py_func_test.py,48,function, 7458,matmul,tensorflow/tensorflow/python/kernel_tests/py_func_test.py,52,function, 7459,PyFuncTestBase,tensorflow/tensorflow/python/kernel_tests/py_func_test.py,56,class, 7460,PyFuncTest,tensorflow/tensorflow/python/kernel_tests/py_func_test.py,87,class,Encapsulates tests for py_func only. 7461,PyFuncAndEagerPyFuncTest,tensorflow/tensorflow/python/kernel_tests/py_func_test.py,482,class,Encapsulates tests shared between py_func and eager_py_func. 7462,EagerPyFuncTest,tensorflow/tensorflow/python/kernel_tests/py_func_test.py,541,class,Encapsulates tests for eager_py_func only. 7463,_AddTest,tensorflow/tensorflow/python/kernel_tests/qr_op_test.py,41,function, 7464,QrOpTest,tensorflow/tensorflow/python/kernel_tests/qr_op_test.py,48,class, 7465,_GetQrOpTest,tensorflow/tensorflow/python/kernel_tests/qr_op_test.py,83,function, 7466,QrGradOpTest,tensorflow/tensorflow/python/kernel_tests/qr_op_test.py,173,class, 7467,_GetQrGradOpTest,tensorflow/tensorflow/python/kernel_tests/qr_op_test.py,193,function, 7468,QRBenchmark,tensorflow/tensorflow/python/kernel_tests/qr_op_test.py,226,class, 7469,TFCompressionTestCase,tensorflow/tensorflow/python/kernel_tests/reader_ops_test.py,80,class, 7470,IdentityReaderTest,tensorflow/tensorflow/python/kernel_tests/reader_ops_test.py,143,class, 7471,WholeFileReaderTest,tensorflow/tensorflow/python/kernel_tests/reader_ops_test.py,288,class, 7472,TextLineReaderTest,tensorflow/tensorflow/python/kernel_tests/reader_ops_test.py,346,class, 7473,FixedLengthRecordReaderTest,tensorflow/tensorflow/python/kernel_tests/reader_ops_test.py,415,class, 7474,TFRecordReaderTest,tensorflow/tensorflow/python/kernel_tests/reader_ops_test.py,624,class, 7475,AsyncReaderTest,tensorflow/tensorflow/python/kernel_tests/reader_ops_test.py,711,class, 7476,LMDBReaderTest,tensorflow/tensorflow/python/kernel_tests/reader_ops_test.py,753,class, 7477,RecordInputOpTest,tensorflow/tensorflow/python/kernel_tests/record_input_test.py,30,class, 7478,ReduceBenchmarks,tensorflow/tensorflow/python/kernel_tests/reduce_benchmark_test.py,38,class,Benchmarks for reductions. 7479,_input_array,tensorflow/tensorflow/python/kernel_tests/reduce_join_op_test.py,34,function,"Creates an ndarray where each element is the binary of its linear index. Args: num_dims: The number of dimensions to create. Returns: An ndarray of shape [2] * num_dims." 7480,_joined_array,tensorflow/tensorflow/python/kernel_tests/reduce_join_op_test.py,48,function,"Creates an ndarray with the result from reduce_join on input_array. Args: num_dims: The number of dimensions of the original input array. reduce_dim: The dimension to reduce. Returns: An ndarray of shape [2] * (num_dims - 1)." 7481,UnicodeTestCase,tensorflow/tensorflow/python/kernel_tests/reduce_join_op_test.py,68,class,Test case with Python3-compatible string comparator. 7482,ReduceJoinTestHelperTest,tensorflow/tensorflow/python/kernel_tests/reduce_join_op_test.py,76,class,Tests for helper functions. 7483,ReduceJoinTest,tensorflow/tensorflow/python/kernel_tests/reduce_join_op_test.py,98,class, 7484,_powerset,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,42,function,"Helper for generating all possible reduction_axes arguments. Example: powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2) Args: iterable: An iterable of items to generate the powerset of. Returns: The powerset of all items in iterable." 7485,ReducedShapeTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,59,class, 7486,ReductionUnknownShape,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,100,class, 7487,BaseReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,119,class, 7488,SumReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,180,class, 7489,MeanReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,405,class, 7490,EuclideanNormReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,527,class, 7491,ProdReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,612,class, 7492,MinReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,712,class, 7493,MaxReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,831,class, 7494,AllReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,964,class, 7495,AnyReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,1013,class, 7496,CountNonzeroReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test.py,1062,class, 7497,BaseReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test_big.py,30,class, 7498,BigReductionTest,tensorflow/tensorflow/python/kernel_tests/reduction_ops_test_big.py,36,class,Test reductions for sum and boolean all over a wide range of shapes. 7499,RegexFullMatchOpVariantsTest,tensorflow/tensorflow/python/kernel_tests/regex_full_match_op_test.py,34,class, 7500,RegexFullMatchOpTest,tensorflow/tensorflow/python/kernel_tests/regex_full_match_op_test.py,71,class, 7501,RegexReplaceOpVariantsTest,tensorflow/tensorflow/python/kernel_tests/regex_replace_op_test.py,34,class, 7502,as_string,tensorflow/tensorflow/python/kernel_tests/regex_replace_op_test.py,94,function, 7503,as_tensor,tensorflow/tensorflow/python/kernel_tests/regex_replace_op_test.py,98,function, 7504,RegexReplaceTest,tensorflow/tensorflow/python/kernel_tests/regex_replace_op_test.py,102,class, 7505,_elu_grad_grad,tensorflow/tensorflow/python/kernel_tests/relu_op_test.py,41,function, 7506,ReluTest,tensorflow/tensorflow/python/kernel_tests/relu_op_test.py,47,class, 7507,Relu6Test,tensorflow/tensorflow/python/kernel_tests/relu_op_test.py,231,class, 7508,LeakyReluTest,tensorflow/tensorflow/python/kernel_tests/relu_op_test.py,290,class, 7509,EluTest,tensorflow/tensorflow/python/kernel_tests/relu_op_test.py,411,class, 7510,SeluTest,tensorflow/tensorflow/python/kernel_tests/relu_op_test.py,512,class, 7511,CreluTest,tensorflow/tensorflow/python/kernel_tests/relu_op_test.py,596,class, 7512,ReshapeTest,tensorflow/tensorflow/python/kernel_tests/reshape_op_test.py,33,class, 7513,ResourceVariableOpsTest,tensorflow/tensorflow/python/kernel_tests/resource_variable_ops_test.py,62,class, 7514,ReverseSequenceTest,tensorflow/tensorflow/python/kernel_tests/reverse_sequence_op_test.py,33,class, 7515,Plus1RNNCell,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,54,class,"RNN Cell generating (output, new_state) = (input + 1, state + 1)." 7516,DummyMultiDimensionalLSTM,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,69,class,"LSTM Cell generating (output, new_state) = (input + 1, state + 1). The input to this cell may have an arbitrary number of dimensions that follow the preceding 'Time' and 'Batch' dimensions." 7517,NestedRNNCell,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,104,class,"RNN Cell generating (output, new_state) = (input + 1, state + 1). The input, output and state of this cell is a tuple of two tensors." 7518,TestStateSaver,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,124,class, 7519,TestStateSaverWithCounters,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,159,class,"Class wrapper around TestStateSaver. A dummy class used for testing of static_state_saving_rnn. It helps test if save_state and state functions got called same number of time when we evaluate output of rnn cell and state or either of them separately. It inherits from the TestStateSaver and adds the counters for calls of functions." 7520,RNNTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,193,class, 7521,LSTMTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,359,class, 7522,BidirectionalRNNTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,1327,class, 7523,MultiDimensionalLSTMTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,1633,class, 7524,NestedLSTMTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,1745,class, 7525,StateSaverRNNTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,1869,class, 7526,GRUTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,1965,class, 7527,RawRNNTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,2048,class, 7528,DeviceWrapperCell,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,2356,class,Class to ensure cell calculation happens on a specific device. 7529,TensorArrayOnCorrectDeviceTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,2379,class, 7530,RNNCellTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,2490,class, 7531,DropoutWrapperTest,tensorflow/tensorflow/python/kernel_tests/rnn_cell_test.py,3065,class, 7532,Plus1RNNCell,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,52,class,"RNN Cell generating (output, new_state) = (input + 1, state + 1)." 7533,ScalarStateRNNCell,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,67,class,"RNN Cell generating (output, new_state) = (input + 1, state + 1)." 7534,UnbalancedOutputRNNCell,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,85,class,"RNN Cell generating (output, new_state) = (input + 1, state + 1)." 7535,TensorArrayStateRNNCell,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,104,class,RNN Cell its state as a TensorArray. 7536,RNNTest,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,125,class, 7537,_static_vs_dynamic_rnn_benchmark_static,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,362,function, 7538,_static_vs_dynamic_rnn_benchmark_dynamic,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,384,function, 7539,graph_creation_static_vs_dynamic_rnn_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,403,function, 7540,_timer,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,441,function, 7541,static_vs_dynamic_rnn_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,455,function, 7542,_half_seq_len_vs_unroll_half_rnn_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,495,function, 7543,half_seq_len_vs_unroll_half_rnn_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,517,function, 7544,_concat_state_vs_tuple_state_rnn_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,560,function, 7545,concat_state_vs_tuple_state_rnn_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,585,function, 7546,_dynamic_rnn_swap_memory_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,628,function, 7547,dynamic_rnn_swap_memory_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,651,function, 7548,rnn_long_sequence_benchmark,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,685,function, 7549,BenchmarkRNN,tensorflow/tensorflow/python/kernel_tests/rnn_test.py,723,class, 7550,SaveTest,tensorflow/tensorflow/python/kernel_tests/save_restore_ops_test.py,32,class, 7551,ShardedFileOpsTest,tensorflow/tensorflow/python/kernel_tests/save_restore_ops_test.py,44,class, 7552,ShapeInferenceTest,tensorflow/tensorflow/python/kernel_tests/save_restore_ops_test.py,56,class, 7553,ScalarTest,tensorflow/tensorflow/python/kernel_tests/scalar_test.py,37,class, 7554,numpy_reverse,tensorflow/tensorflow/python/kernel_tests/scan_ops_test.py,33,function, 7555,handle_options,tensorflow/tensorflow/python/kernel_tests/scan_ops_test.py,44,function,Adds tf options to numpy scan ops. 7556,CumsumTest,tensorflow/tensorflow/python/kernel_tests/scan_ops_test.py,73,class, 7557,CumprodTest,tensorflow/tensorflow/python/kernel_tests/scan_ops_test.py,197,class, 7558,_AsType,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,44,function, 7559,_FlatInnerDims,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,48,function, 7560,_FlatOuterDims,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,55,function, 7561,_NumpyScatterNd,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,62,function, 7562,_NumpyUpdate,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,79,function, 7563,_NumpyAdd,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,83,function, 7564,_NumpySub,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,87,function, 7565,_NumpyMul,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,91,function, 7566,_NumpyDiv,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,95,function, 7567,_NumpyMin,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,99,function, 7568,_NumpyMax,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,103,function, 7569,StatefulScatterNdTest,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,107,class, 7570,ScatterNdTest,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,413,class, 7571,ScatterNdNonAliasingAddTest,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,714,class, 7572,ScatterNdTensorTest,tensorflow/tensorflow/python/kernel_tests/scatter_nd_ops_test.py,727,class, 7573,_AsType,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,31,function, 7574,_NumpyAdd,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,35,function, 7575,_NumpyAddScalar,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,42,function, 7576,_NumpySub,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,47,function, 7577,_NumpySubScalar,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,52,function, 7578,_NumpyMul,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,57,function, 7579,_NumpyMulScalar,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,62,function, 7580,_NumpyDiv,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,67,function, 7581,_NumpyDivScalar,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,72,function, 7582,_NumpyMin,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,77,function, 7583,_NumpyMinScalar,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,82,function, 7584,_NumpyMax,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,87,function, 7585,_NumpyMaxScalar,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,92,function, 7586,_NumpyUpdate,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,97,function, 7587,_NumpyUpdateScalar,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,102,function, 7588,ScatterTest,tensorflow/tensorflow/python/kernel_tests/scatter_ops_test.py,128,class, 7589,SegmentReductionHelper,tensorflow/tensorflow/python/kernel_tests/segment_reduction_ops_test.py,37,class, 7590,SegmentReductionOpTest,tensorflow/tensorflow/python/kernel_tests/segment_reduction_ops_test.py,87,class, 7591,UnsortedSegmentTest,tensorflow/tensorflow/python/kernel_tests/segment_reduction_ops_test.py,259,class, 7592,SparseSegmentReductionHelper,tensorflow/tensorflow/python/kernel_tests/segment_reduction_ops_test.py,492,class, 7593,SparseSegmentReductionOpTest,tensorflow/tensorflow/python/kernel_tests/segment_reduction_ops_test.py,511,class, 7594,SegmentReductionOpBenchmark,tensorflow/tensorflow/python/kernel_tests/segment_reduction_ops_test.py,938,class, 7595,_AddTest,tensorflow/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py,34,function, 7596,SelfAdjointEigTest,tensorflow/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py,41,class, 7597,SortEigenDecomposition,tensorflow/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py,95,function, 7598,EquilibrateEigenVectorPhases,tensorflow/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py,103,function,"Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`. Eigenvectors are only unique up to an arbitrary phase. This function rotates x such that it matches y. Precondition: The columns of x and y differ by a multiplicative complex phase factor only. Args: x: `np.ndarray` with Eigenvectors y: `np.ndarray` with Eigenvectors Returns: `np.ndarray` containing an equilibrated version of x." 7599,_GetSelfAdjointEigTest,tensorflow/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py,122,function, 7600,SelfAdjointEigGradTest,tensorflow/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py,180,class, 7601,_GetSelfAdjointEigGradTest,tensorflow/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py,184,function, 7602,SessionOpsTest,tensorflow/tensorflow/python/kernel_tests/session_ops_test.py,32,class, 7603,_values,tensorflow/tensorflow/python/kernel_tests/sets_test.py,40,function, 7604,_constant,tensorflow/tensorflow/python/kernel_tests/sets_test.py,46,function, 7605,_dense_to_sparse,tensorflow/tensorflow/python/kernel_tests/sets_test.py,50,function, 7606,SetOpsTest,tensorflow/tensorflow/python/kernel_tests/sets_test.py,71,class, 7607,_sparsify,tensorflow/tensorflow/python/kernel_tests/shape_ops_test.py,39,function, 7608,ShapeOpsTest,tensorflow/tensorflow/python/kernel_tests/shape_ops_test.py,51,class, 7609,TileTest,tensorflow/tensorflow/python/kernel_tests/shape_ops_test.py,421,class, 7610,SliceTest,tensorflow/tensorflow/python/kernel_tests/slice_op_test.py,35,class, 7611,SoftmaxTest,tensorflow/tensorflow/python/kernel_tests/softmax_op_test.py,36,class, 7612,SoftplusTest,tensorflow/tensorflow/python/kernel_tests/softplus_op_test.py,32,class, 7613,SoftsignTest,tensorflow/tensorflow/python/kernel_tests/softsign_op_test.py,31,class, 7614,space_to_batch_direct,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,34,function,"Direct Python implementation of space-to-batch conversion. This is used for tests only. Args: input_array: N-D array block_shape: 1-D array of shape [num_block_dims]. paddings: 2-D array of shape [num_block_dims, 2]. Returns: Converted tensor." 7615,PythonOpImpl,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,75,class, 7616,CppOpImpl,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,86,class, 7617,SpaceToBatchTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,97,class,"Tests input-output pairs for the SpaceToBatch and BatchToSpace ops. This uses the Python compatibility wrapper that forwards to space_to_batch_nd." 7618,SpaceToBatchCppTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,189,class,"Tests input-output pairs for the SpaceToBatch and BatchToSpace ops. This uses the C++ ops." 7619,SpaceToBatchNDTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,197,class,Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops. 7620,SpaceToBatchSpaceToDepth,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,317,class, 7621,SpaceToBatchSpaceToDepthCpp,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,334,class, 7622,SpaceToBatchErrorHandlingTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,338,class, 7623,SpaceToBatchErrorHandlingCppTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,415,class, 7624,SpaceToBatchNDErrorHandlingTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,420,class, 7625,SpaceToBatchGradientTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,524,class, 7626,SpaceToBatchGradientCppTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,579,class, 7627,SpaceToBatchNDGradientTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,583,class, 7628,RequiredSpaceToBatchPaddingsTest,tensorflow/tensorflow/python/kernel_tests/spacetobatch_op_test.py,627,class, 7629,SpaceToDepthTest,tensorflow/tensorflow/python/kernel_tests/spacetodepth_op_test.py,35,class, 7630,SpaceToDepthGradientTest,tensorflow/tensorflow/python/kernel_tests/spacetodepth_op_test.py,344,class, 7631,_sparsify,tensorflow/tensorflow/python/kernel_tests/sparse_add_op_test.py,39,function, 7632,SparseAddTest,tensorflow/tensorflow/python/kernel_tests/sparse_add_op_test.py,51,class, 7633,_s2d_add_vs_sparse_add,tensorflow/tensorflow/python/kernel_tests/sparse_add_op_test.py,217,function, 7634,SparseAddBenchmark,tensorflow/tensorflow/python/kernel_tests/sparse_add_op_test.py,239,class, 7635,SparseConcatTest,tensorflow/tensorflow/python/kernel_tests/sparse_concat_op_test.py,32,class, 7636,_indexedslice,tensorflow/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py,35,function, 7637,IndexedSlicesConditionalAccumulatorTest,tensorflow/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py,47,class, 7638,BaseSparseCrossOpTest,tensorflow/tensorflow/python/kernel_tests/sparse_cross_op_test.py,35,class, 7639,SparseCrossOpTest,tensorflow/tensorflow/python/kernel_tests/sparse_cross_op_test.py,79,class, 7640,SparseCrossV2OpTest,tensorflow/tensorflow/python/kernel_tests/sparse_cross_op_test.py,507,class, 7641,SparseCrossHashedOpTest,tensorflow/tensorflow/python/kernel_tests/sparse_cross_op_test.py,881,class, 7642,RandMatrix,tensorflow/tensorflow/python/kernel_tests/sparse_matmul_op_test.py,31,function, 7643,SparseMatMulTest,tensorflow/tensorflow/python/kernel_tests/sparse_matmul_op_test.py,41,class, 7644,MatMulGradientTest,tensorflow/tensorflow/python/kernel_tests/sparse_matmul_op_test.py,135,class, 7645,_sparsify,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,41,function, 7646,SparseToIndicatorTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,53,class, 7647,SparseMergeTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,113,class, 7648,SparseMergeHighDimTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,227,class, 7649,SparseRetainTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,287,class, 7650,SparseResetShapeTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,332,class, 7651,SparseFillEmptyRowsTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,473,class, 7652,SparseAddTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,606,class, 7653,SparseReduceTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,622,class, 7654,SparseMathOpsTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,790,class, 7655,SparseSoftmaxTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,891,class, 7656,SparseMinimumMaximumTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,955,class, 7657,SparseTransposeTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,1021,class, 7658,SparsePlaceholderTest,tensorflow/tensorflow/python/kernel_tests/sparse_ops_test.py,1043,class, 7659,SparseReorderTest,tensorflow/tensorflow/python/kernel_tests/sparse_reorder_op_test.py,33,class, 7660,SparseReshapeTest,tensorflow/tensorflow/python/kernel_tests/sparse_reshape_op_test.py,34,class, 7661,EmptySparseTensorReshapeTest,tensorflow/tensorflow/python/kernel_tests/sparse_reshape_op_test.py,333,class,"Tests for reshaping 0-sized SparseTensors, compared w/ dense tensors." 7662,SerializeSparseTest,tensorflow/tensorflow/python/kernel_tests/sparse_serialization_ops_test.py,31,class, 7663,SparseSliceOpTest,tensorflow/tensorflow/python/kernel_tests/sparse_slice_op_test.py,31,class, 7664,SparseSplitOpTest,tensorflow/tensorflow/python/kernel_tests/sparse_split_op_test.py,29,class, 7665,SparseTensorDenseMatMulGradientTest,tensorflow/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_grad_test.py,32,class, 7666,_maybe_complex,tensorflow/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py,42,function, 7667,SparseTensorDenseMatMulTest,tensorflow/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py,48,class, 7668,_sparse_tensor_dense_vs_dense_matmul_benchmark_dense,tensorflow/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py,254,function, 7669,_sparse_tensor_dense_vs_dense_matmul_benchmark_sparse,tensorflow/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py,283,function, 7670,sparse_tensor_dense_vs_dense_matmul_benchmark,tensorflow/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py,310,function, 7671,main,tensorflow/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py,389,function, 7672,SparseTensorsMapTest,tensorflow/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py,43,class, 7673,BenchmarkSparseTensorsMapVsSerialization,tensorflow/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py,190,class, 7674,SparseToDenseTest,tensorflow/tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py,31,class, 7675,SparseXentTest,tensorflow/tensorflow/python/kernel_tests/sparse_xent_op_test.py,45,class, 7676,_sparse_vs_dense_xent_benchmark_dense,tensorflow/tensorflow/python/kernel_tests/sparse_xent_op_test.py,281,function, 7677,_sparse_vs_dense_xent_benchmark_sparse,tensorflow/tensorflow/python/kernel_tests/sparse_xent_op_test.py,300,function, 7678,sparse_vs_dense_xent_benchmark,tensorflow/tensorflow/python/kernel_tests/sparse_xent_op_test.py,313,function, 7679,main,tensorflow/tensorflow/python/kernel_tests/sparse_xent_op_test.py,356,function, 7680,SparseMaskTest,tensorflow/tensorflow/python/kernel_tests/sparsemask_op_test.py,28,class, 7681,SplitOpTest,tensorflow/tensorflow/python/kernel_tests/split_op_test.py,37,class, 7682,np_split_squeeze,tensorflow/tensorflow/python/kernel_tests/stack_op_test.py,34,function, 7683,StackOpTest,tensorflow/tensorflow/python/kernel_tests/stack_op_test.py,43,class, 7684,AutomaticStackingTest,tensorflow/tensorflow/python/kernel_tests/stack_op_test.py,271,class, 7685,StackOpTest,tensorflow/tensorflow/python/kernel_tests/stack_ops_test.py,34,class, 7686,StackOpRefTest,tensorflow/tensorflow/python/kernel_tests/stack_ops_test.py,175,class,Tests for deprecated non-resource variant of stack ops. 7687,StageTest,tensorflow/tensorflow/python/kernel_tests/stage_op_test.py,30,class, 7688,StringsToBytesOpTest,tensorflow/tensorflow/python/kernel_tests/string_bytes_split_op_test.py,33,class, 7689,StringFormatOpTest,tensorflow/tensorflow/python/kernel_tests/string_format_op_test.py,33,class, 7690,StringJoinOpTest,tensorflow/tensorflow/python/kernel_tests/string_join_op_test.py,25,class, 7691,StringLengthOpTest,tensorflow/tensorflow/python/kernel_tests/string_length_op_test.py,26,class, 7692,StringLowerOpTest,tensorflow/tensorflow/python/kernel_tests/string_lower_op_test.py,26,class,Test cases for tf.strings.lower. 7693,StringSplitOpTest,tensorflow/tensorflow/python/kernel_tests/string_split_op_test.py,37,class, 7694,StringSplitV2OpTest,tensorflow/tensorflow/python/kernel_tests/string_split_op_test.py,234,class, 7695,StringStripOpTest,tensorflow/tensorflow/python/kernel_tests/string_strip_op_test.py,25,class,Test cases for tf.strings.strip. 7696,StringToHashBucketOpTest,tensorflow/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py,28,class, 7697,StringToNumberOpTest,tensorflow/tensorflow/python/kernel_tests/string_to_number_op_test.py,30,class, 7698,StringUpperOpTest,tensorflow/tensorflow/python/kernel_tests/string_upper_op_test.py,26,class,Test cases for tf.strings.upper. 7699,SubstrOpTest,tensorflow/tensorflow/python/kernel_tests/substr_op_test.py,30,class, 7700,SummaryOpsCoreTest,tensorflow/tensorflow/python/kernel_tests/summary_ops_test.py,50,class, 7701,SummaryWriterTest,tensorflow/tensorflow/python/kernel_tests/summary_ops_test.py,677,class, 7702,SummaryOpsTest,tensorflow/tensorflow/python/kernel_tests/summary_ops_test.py,954,class, 7703,events_from_file,tensorflow/tensorflow/python/kernel_tests/summary_ops_test.py,1210,function,"Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.Event protos in the event file." 7704,events_from_logdir,tensorflow/tensorflow/python/kernel_tests/summary_ops_test.py,1228,function,"Returns all events in the single eventfile in logdir. Args: logdir: The directory in which the single event file is sought. Returns: A list of all tf.Event protos from the single event file. Raises: AssertionError: If logdir does not contain exactly one file." 7705,to_numpy,tensorflow/tensorflow/python/kernel_tests/summary_ops_test.py,1246,function, 7706,SummaryV1AudioOpTest,tensorflow/tensorflow/python/kernel_tests/summary_v1_audio_op_test.py,30,class, 7707,SummaryV1ImageOpTest,tensorflow/tensorflow/python/kernel_tests/summary_v1_image_op_test.py,34,class, 7708,SummaryV1OpsTest,tensorflow/tensorflow/python/kernel_tests/summary_v1_ops_test.py,35,class, 7709,SummaryV1TensorOpTest,tensorflow/tensorflow/python/kernel_tests/summary_v1_tensor_op_test.py,33,class, 7710,_AddTest,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,42,function, 7711,SvdOpTest,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,49,class, 7712,_GetSvdOpTest,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,90,function, 7713,SvdGradOpTest,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,201,class, 7714,_NormalizingSvd,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,205,function, 7715,_GetSvdGradOpTest,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,227,function, 7716,SvdGradGradOpTest,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,267,class, 7717,_GetSvdGradGradOpTest,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,271,function, 7718,SVDBenchmark,tensorflow/tensorflow/python/kernel_tests/svd_op_test.py,314,class, 7719,TemplateMirroredStrategyTest,tensorflow/tensorflow/python/kernel_tests/template_mirrored_strategy_test.py,30,class, 7720,variable_scoped_function,tensorflow/tensorflow/python/kernel_tests/template_test.py,39,function, 7721,internally_variable_scoped_function,tensorflow/tensorflow/python/kernel_tests/template_test.py,45,function, 7722,function_with_create,tensorflow/tensorflow/python/kernel_tests/template_test.py,51,function,Creates a variable as a side effect using tf.Variable. 7723,function_with_side_create,tensorflow/tensorflow/python/kernel_tests/template_test.py,58,function,Creates a variable as a side effect using tf.get_variable. 7724,variable_scoped_function_with_local_variable,tensorflow/tensorflow/python/kernel_tests/template_test.py,65,function, 7725,TemplateTest,tensorflow/tensorflow/python/kernel_tests/template_test.py,72,class, 7726,_make_converter,tensorflow/tensorflow/python/kernel_tests/tensor_array_ops_test.py,52,function, 7727,_make_ta,tensorflow/tensorflow/python/kernel_tests/tensor_array_ops_test.py,65,function, 7728,TensorArrayTest,tensorflow/tensorflow/python/kernel_tests/tensor_array_ops_test.py,72,class, 7729,TensorArrayBenchmark,tensorflow/tensorflow/python/kernel_tests/tensor_array_ops_test.py,1798,class, 7730,TensorPriorityTest,tensorflow/tensorflow/python/kernel_tests/tensor_priority_test.py,26,class, 7731,_add_test,tensorflow/tensorflow/python/kernel_tests/tensordot_op_test.py,35,function, 7732,TensordotTest,tensorflow/tensorflow/python/kernel_tests/tensordot_op_test.py,42,class, 7733,_get_tensordot_tests,tensorflow/tensorflow/python/kernel_tests/tensordot_op_test.py,143,function, 7734,TopKTest,tensorflow/tensorflow/python/kernel_tests/topk_op_test.py,40,class, 7735,TopKBenchmark,tensorflow/tensorflow/python/kernel_tests/topk_op_test.py,224,class, 7736,TraceTest,tensorflow/tensorflow/python/kernel_tests/trace_op_test.py,27,class, 7737,TransposeTest,tensorflow/tensorflow/python/kernel_tests/transpose_op_test.py,35,class, 7738,TridiagonalMulOpTest,tensorflow/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py,39,class, 7739,flags,tensorflow/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py,53,function, 7740,_tfconst,tensorflow/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py,63,function, 7741,_tf_ones,tensorflow/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py,67,function, 7742,TridiagonalSolveOpTest,tensorflow/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py,71,class, 7743,decor,tensorflow/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py,746,function, 7744,_nested_encode,tensorflow/tensorflow/python/kernel_tests/unicode_decode_op_test.py,37,function,Encode each string in a nested list with `encoding`. 7745,_nested_codepoints,tensorflow/tensorflow/python/kernel_tests/unicode_decode_op_test.py,45,function,Replace each string in a nested list with a list of its codepoints. 7746,_nested_offsets,tensorflow/tensorflow/python/kernel_tests/unicode_decode_op_test.py,58,function,Replace each string in a nested list with a list of start offsets. 7747,_nested_splitchars,tensorflow/tensorflow/python/kernel_tests/unicode_decode_op_test.py,73,function,Replace each string in a nested list with a list of char substrings. 7748,_make_sparse_tensor,tensorflow/tensorflow/python/kernel_tests/unicode_decode_op_test.py,86,function, 7749,UnicodeDecodeTest,tensorflow/tensorflow/python/kernel_tests/unicode_decode_op_test.py,93,class, 7750,UnicodeSplitTest,tensorflow/tensorflow/python/kernel_tests/unicode_decode_op_test.py,447,class, 7751,UnicodeEncodeOpTest,tensorflow/tensorflow/python/kernel_tests/unicode_encode_op_test.py,36,class, 7752,UnicodeScriptOpTest,tensorflow/tensorflow/python/kernel_tests/unicode_script_op_test.py,30,class, 7753,UnicodeScriptBenchmarks,tensorflow/tensorflow/python/kernel_tests/unicode_script_op_test.py,61,class, 7754,UnicodeTranscodeOpTest,tensorflow/tensorflow/python/kernel_tests/unicode_transcode_op_test.py,33,class, 7755,UniqueTest,tensorflow/tensorflow/python/kernel_tests/unique_op_test.py,29,class, 7756,UniqueWithCountsTest,tensorflow/tensorflow/python/kernel_tests/unique_op_test.py,110,class, 7757,UnicodeTestCase,tensorflow/tensorflow/python/kernel_tests/unsorted_segment_join_op_test.py,32,class,Test case with Python3-compatible string comparator. 7758,UnsortedSegmentJoinOpTest,tensorflow/tensorflow/python/kernel_tests/unsorted_segment_join_op_test.py,42,class, 7759,np_split_squeeze,tensorflow/tensorflow/python/kernel_tests/unstack_op_test.py,31,function, 7760,UnstackOpTest,tensorflow/tensorflow/python/kernel_tests/unstack_op_test.py,40,class, 7761,VariableOpTest,tensorflow/tensorflow/python/kernel_tests/variable_ops_test.py,46,class, 7762,run_inside_wrap_function_in_eager_mode,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,48,function,"Decorator to execute the same graph code in eager and graph modes. In graph mode, we just execute the graph_function passed as argument. In eager mode, we wrap the function using wrap_function and then execute the wrapped result. Args: graph_function: python function containing graph code to be wrapped Returns: decorated function" 7763,VariableScopeTest,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,72,class, 7764,axis0_into1_partitioner,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,1375,function, 7765,axis0_into2_partitioner,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,1380,function, 7766,axis0_into3_partitioner,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,1386,function, 7767,VariableScopeWithPartitioningTest,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,1392,class, 7768,VariableScopeWithCustomGetterTest,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,1521,class, 7769,PartitionInfoTest,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,1738,class, 7770,VariableScopeMultithreadedTest,tensorflow/tensorflow/python/kernel_tests/variable_scope_test.py,1801,class, 7771,VariablesTestCase,tensorflow/tensorflow/python/kernel_tests/variables_test.py,47,class, 7772,IsInitializedTest,tensorflow/tensorflow/python/kernel_tests/variables_test.py,633,class, 7773,ObsoleteIsInitializedTest,tensorflow/tensorflow/python/kernel_tests/variables_test.py,683,class, 7774,PartitionedVariableTest,tensorflow/tensorflow/python/kernel_tests/variables_test.py,714,class, 7775,VariableContainerTest,tensorflow/tensorflow/python/kernel_tests/variables_test.py,872,class, 7776,AggregationModesTest,tensorflow/tensorflow/python/kernel_tests/variables_test.py,897,class, 7777,_test_values,tensorflow/tensorflow/python/kernel_tests/weights_broadcast_test.py,32,function, 7778,AssertBroadcastableTest,tensorflow/tensorflow/python/kernel_tests/weights_broadcast_test.py,36,class, 7779,BroadcastWeightsTest,tensorflow/tensorflow/python/kernel_tests/weights_broadcast_test.py,164,class, 7780,WhereOpTest,tensorflow/tensorflow/python/kernel_tests/where_op_test.py,38,class, 7781,WhereBenchmark,tensorflow/tensorflow/python/kernel_tests/where_op_test.py,271,class, 7782,random_gamma,tensorflow/tensorflow/python/kernel_tests/while_v2_test.py,56,function, 7783,random_gamma_with_alpha_beta,tensorflow/tensorflow/python/kernel_tests/while_v2_test.py,60,function, 7784,random_poisson_v2,tensorflow/tensorflow/python/kernel_tests/while_v2_test.py,65,function, 7785,random_poisson_v2_with_lam,tensorflow/tensorflow/python/kernel_tests/while_v2_test.py,69,function, 7786,fill,tensorflow/tensorflow/python/kernel_tests/while_v2_test.py,73,function, 7787,WhileV2Test,tensorflow/tensorflow/python/kernel_tests/while_v2_test.py,77,class, 7788,ScalarShape,tensorflow/tensorflow/python/kernel_tests/while_v2_test.py,1814,function, 7789,GetOptimizedGraph,tensorflow/tensorflow/python/kernel_tests/while_v2_test.py,1818,function, 7790,XentTest,tensorflow/tensorflow/python/kernel_tests/xent_op_test.py,41,class, 7791,XentBenchmark,tensorflow/tensorflow/python/kernel_tests/xent_op_test.py,329,class, 7792,ZeroDivisionTest,tensorflow/tensorflow/python/kernel_tests/zero_division_test.py,28,class, 7793,TrainingPredictionOpsTest,tensorflow/tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py,30,class,Tests prediction ops for training. 7794,PredictionOpsTest,tensorflow/tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py,2159,class,Tests prediction ops for inference. 7795,FeatureContribsOpsTest,tensorflow/tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py,2636,class,Tests feature contribs ops for model understanding. 7796,QuantileOpsTest,tensorflow/tensorflow/python/kernel_tests/boosted_trees/quantile_ops_test.py,40,class, 7797,ResourceOpsTest,tensorflow/tensorflow/python/kernel_tests/boosted_trees/resource_ops_test.py,30,class,Tests resource_ops. 7798,StatsOpsTest,tensorflow/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py,36,class,Tests stats_ops. 7799,BestMultiDimFeatureSplitMultiClassV2Op,tensorflow/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py,1673,class,Tests multi-class/multi-regression for best splits using V2 op. 7800,UpdateTreeEnsembleOpTest,tensorflow/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py,34,class,Tests for growing tree ensemble from split candidates. 7801,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/bernoulli_test.py,36,function, 7802,make_bernoulli,tensorflow/tensorflow/python/kernel_tests/distributions/bernoulli_test.py,48,function, 7803,entropy,tensorflow/tensorflow/python/kernel_tests/distributions/bernoulli_test.py,54,function, 7804,BernoulliTest,tensorflow/tensorflow/python/kernel_tests/distributions/bernoulli_test.py,59,class, 7805,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/beta_test.py,36,function, 7806,BetaTest,tensorflow/tensorflow/python/kernel_tests/distributions/beta_test.py,50,class, 7807,BaseBijectorTest,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,35,class,Tests properties of the Bijector base-class. 7808,IntentionallyMissingError,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,83,class, 7809,BrokenBijector,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,87,class,Forward and inverse are not inverses of each other. 7810,BijectorTestEventNdims,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,118,class, 7811,BijectorCachingTestBase,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,148,class, 7812,BijectorCachingTest,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,188,class,Test caching with BrokenBijector. 7813,ExpOnlyJacobian,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,196,class,Only used for jacobian calculations. 7814,ConstantJacobian,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,213,class,Only used for jacobian calculations. 7815,BijectorReduceEventDimsTest,tensorflow/tensorflow/python/kernel_tests/distributions/bijector_test.py,230,class,Test caching with BrokenBijector. 7816,make_categorical,tensorflow/tensorflow/python/kernel_tests/distributions/categorical_test.py,40,function, 7817,CategoricalTest,tensorflow/tensorflow/python/kernel_tests/distributions/categorical_test.py,46,class, 7818,DirichletMultinomialTest,tensorflow/tensorflow/python/kernel_tests/distributions/dirichlet_multinomial_test.py,35,class, 7819,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/dirichlet_test.py,35,function, 7820,DirichletTest,tensorflow/tensorflow/python/kernel_tests/distributions/dirichlet_test.py,49,class, 7821,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/exponential_test.py,34,function, 7822,ExponentialTest,tensorflow/tensorflow/python/kernel_tests/distributions/exponential_test.py,47,class, 7823,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/gamma_test.py,36,function, 7824,GammaTest,tensorflow/tensorflow/python/kernel_tests/distributions/gamma_test.py,50,class, 7825,IdentityBijectorTest,tensorflow/tensorflow/python/kernel_tests/distributions/identity_bijector_test.py,27,class,Tests correctness of the Y = g(X) = X transformation. 7826,KLTest,tensorflow/tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py,34,class, 7827,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/laplace_test.py,35,function, 7828,LaplaceTest,tensorflow/tensorflow/python/kernel_tests/distributions/laplace_test.py,48,class, 7829,MultinomialTest,tensorflow/tensorflow/python/kernel_tests/distributions/multinomial_test.py,32,class, 7830,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/normal_test.py,42,function, 7831,NormalTest,tensorflow/tensorflow/python/kernel_tests/distributions/normal_test.py,53,class, 7832,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,39,function, 7833,_check_strictly_increasing,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,53,function, 7834,_make_grid,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,58,function,"Returns a uniform grid + noise, reshaped to shape argument." 7835,_value_and_gradient,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,70,function,Calls `fn` and computes the gradient of the result wrt `arg`. 7836,NdtriTest,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,85,class, 7837,NdtrTest,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,151,class, 7838,LogNdtrTestLower,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,226,class, 7839,LogNdtrTestMid,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,238,class, 7840,LogNdtrTestUpper,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,249,class, 7841,NdtrGradientTest,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,263,class, 7842,LogNdtrGradientTest,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,356,class, 7843,ErfInvTest,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,360,class, 7844,LogCDFLaplaceTest,tensorflow/tensorflow/python/kernel_tests/distributions/special_math_test.py,385,class, 7845,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/student_t_test.py,37,function, 7846,StudentTTest,tensorflow/tensorflow/python/kernel_tests/distributions/student_t_test.py,50,class, 7847,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/uniform_test.py,37,function, 7848,UniformTest,tensorflow/tensorflow/python/kernel_tests/distributions/uniform_test.py,49,class, 7849,try_import,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,43,function, 7850,_logit,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,55,function, 7851,AssertCloseTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,60,class, 7852,MaybeGetStaticTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,93,class, 7853,GetLogitsAndProbsTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,123,class, 7854,EmbedCheckCategoricalEventShapeTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,271,class, 7855,EmbedCheckIntegerCastingClosedTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,312,class, 7856,LogCombinationsTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,352,class, 7857,DynamicShapeTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,380,class, 7858,RotateTransposeTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,485,class, 7859,PickVectorTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,524,class, 7860,PreferStaticRankTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,543,class, 7861,PreferStaticShapeTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,588,class, 7862,PreferStaticValueTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,633,class, 7863,FillTriangularTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,679,class, 7864,FillTriangularInverseTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,785,class, 7865,ReduceWeightedLogSumExp,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,809,class, 7866,GenNewSeedTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,906,class, 7867,SoftplusTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,916,class, 7868,ArgumentsTest,tensorflow/tensorflow/python/kernel_tests/distributions/util_test.py,1022,class, 7869,_BadAdder,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py,35,class,Adder that will fail if used. 7870,LinearOperatorAdditionCorrectnessTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py,48,class,"Tests correctness of addition with combinations of a few Adders. Tests here are done with the _DEFAULT_ADDITION_TIERS, which means add_operators should reduce all operators resulting in one single operator. This shows that we are able to correctly combine adders using the tiered system. All Adders should be tested separately, and there is no need to test every Adder within this class." 7871,LinearOperatorOrderOfAdditionTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py,176,class,Test that the order of addition is done as specified by tiers. 7872,AddAndReturnScaledIdentityTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py,265,class, 7873,AddAndReturnDiagTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py,326,class, 7874,AddAndReturnTriLTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py,371,class, 7875,AddAndReturnMatrixTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py,395,class, 7876,LinearOperatorAdjointTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py,37,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7877,LinearOperatorAdjointNonSquareTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py,251,class,Tests done in the base class NonSquareLinearOperatorDerivedClassTest. 7878,AdjointTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py,43,class, 7879,CholeskyTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py,85,class, 7880,MatmulTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py,135,class, 7881,SolveTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py,181,class, 7882,InverseTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py,230,class, 7883,_block_diag_dense,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py,38,function,"Convert a list of blocks, into a dense block diagonal matrix." 7884,SquareLinearOperatorBlockDiagTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py,63,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7885,_block_lower_triangular_dense,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_block_lower_triangular_test.py,38,function,Convert a list of blocks into a dense blockwise lower-triangular matrix. 7886,SquareLinearOperatorBlockLowerTriangularTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_block_lower_triangular_test.py,60,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7887,LinearOperatorCirculantBaseTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py,39,class,Common class for circulant tests. 7888,LinearOperatorCirculantTestSelfAdjointOperator,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py,102,class,"Test of LinearOperatorCirculant when operator is self-adjoint. Real spectrum <==> Self adjoint operator. Note that when the spectrum is real, the operator may still be complex." 7889,LinearOperatorCirculantTestHermitianSpectrum,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py,169,class,"Test of LinearOperatorCirculant when the spectrum is Hermitian. Hermitian spectrum <==> Real valued operator. We test both real and complex dtypes here though. So in some cases the matrix will be complex but with zero imaginary part." 7890,LinearOperatorCirculantTestNonHermitianSpectrum,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py,245,class,"Test of LinearOperatorCirculant when the spectrum is not Hermitian. Non-Hermitian spectrum <==> Complex valued operator. We test only complex dtypes here." 7891,LinearOperatorCirculant2DBaseTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py,394,class,Common class for 2D circulant tests. 7892,LinearOperatorCirculant2DTestHermitianSpectrum,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py,474,class,"Test of LinearOperatorCirculant2D when the spectrum is Hermitian. Hermitian spectrum <==> Real valued operator. We test both real and complex dtypes here though. So in some cases the matrix will be complex but with zero imaginary part." 7893,LinearOperatorCirculant2DTestNonHermitianSpectrum,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py,534,class,"Test of LinearOperatorCirculant when the spectrum is not Hermitian. Non-Hermitian spectrum <==> Complex valued operator. We test only complex dtypes here." 7894,LinearOperatorCirculant3DTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py,660,class,Simple test of the 3D case. See also the 1D and 2D tests. 7895,SquareLinearOperatorCompositionTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py,34,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7896,NonSquareLinearOperatorCompositionTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py,142,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7897,LinearOperatorDiagTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py,34,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7898,SquareLinearOperatorFullMatrixTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py,36,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7899,SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py,122,class,"Most tests done in the base class LinearOperatorDerivedClassTest. In this test, the operator is constructed with hints that invoke the use of a Cholesky decomposition for solves/determinant." 7900,NonSquareLinearOperatorFullMatrixTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py,221,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7901,LinearOperatorHouseholderTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_householder_test.py,35,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7902,LinearOperatorIdentityTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py,38,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7903,LinearOperatorScaledIdentityTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py,281,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7904,LinearOperatorInversionTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_inversion_test.py,35,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7905,_kronecker_dense,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py,38,function,"Convert a list of factors, into a dense Kronecker product." 7906,KroneckerDenseTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py,56,class,Test of `_kronecker_dense` function. 7907,SquareLinearOperatorKroneckerTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py,82,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7908,BaseLinearOperatorLowRankUpdatetest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py,35,class,Base test for this type of operator. 7909,LinearOperatorLowRankUpdatetestWithDiagUseCholesky,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py,176,class,"A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky." 7910,LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py,195,class,"A = L + UDU^H, D !> 0, L > 0 ==> A !> 0 and we cannot use a Cholesky." 7911,LinearOperatorLowRankUpdatetestNoDiagUseCholesky,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py,219,class,"A = L + UU^H, L > 0 ==> A > 0 and we can use a Cholesky." 7912,LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py,238,class,"A = L + UV^H, L > 0 ==> A is not symmetric and we cannot use a Cholesky." 7913,LinearOperatorLowRankUpdatetestWithDiagNotSquare,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py,263,class,"A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky." 7914,LinearOperatorLowRankUpdateBroadcastsShape,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py,273,class,Test that the operator's shape is the broadcast of arguments. 7915,LinearOperatorLowerTriangularTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py,32,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7916,LinearOperatorPermutationTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_permutation_test.py,37,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7917,LinearOperatorShape,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_test.py,37,class,LinearOperator that implements the methods ._shape and _shape_tensor. 7918,LinearOperatorMatmulSolve,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_test.py,65,class,LinearOperator that wraps a [batch] matrix and implements matmul/solve. 7919,LinearOperatorTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_test.py,100,class, 7920,LinearOperatorToeplitzTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_toeplitz_test.py,42,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7921,_LinearOperatorTriDiagBase,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_tridiag_test.py,31,class, 7922,LinearOperatorTriDiagCompactTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_tridiag_test.py,105,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7923,LinearOperatorTriDiagSequenceTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_tridiag_test.py,126,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7924,LinearOperatorTriDiagMatrixTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_tridiag_test.py,160,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7925,AssertZeroImagPartTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,34,class, 7926,AssertNoEntriesWithModulusZeroTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,59,class, 7927,BroadcastMatrixBatchDimsTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,94,class, 7928,MatrixSolveWithBroadcastTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,192,class, 7929,DomainDimensionStubOperator,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,277,class, 7930,AssertCompatibleMatrixDimensionsTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,286,class, 7931,DummyOperatorWithHint,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,305,class, 7932,UseOperatorOrProvidedHintUnlessContradictingTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,311,class, 7933,BlockwiseTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py,348,class, 7934,LinearOperatorZerosTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py,35,class,Most tests done in the base class LinearOperatorDerivedClassTest. 7935,LinearOperatorZerosNotSquareTest,tensorflow/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py,201,class, 7936,_add_test,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/conjugate_gradient_test.py,30,function, 7937,ConjugateGradientTest,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/conjugate_gradient_test.py,37,class, 7938,_get_conjugate_gradient_test,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/conjugate_gradient_test.py,41,function, 7939,dense_to_csr_sparse_matrix,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_dense_mat_mul_grad_test.py,36,function, 7940,_add_test,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_dense_mat_mul_grad_test.py,42,function, 7941,CSRSparseMatrixDenseMatMulGradTest,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_dense_mat_mul_grad_test.py,51,class, 7942,create_mat_mul_test_fn,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_dense_mat_mul_grad_test.py,114,function, 7943,dense_to_csr_sparse_matrix,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py,35,function, 7944,_add_test,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py,41,function, 7945,CSRSparseMatrixGradTest,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py,50,class, 7946,dense_to_csr_sparse_matrix,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_ops_test.py,53,function, 7947,_swap,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_ops_test.py,59,function, 7948,twist_matrix,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_ops_test.py,63,function,Permute the rows and columns of a 2D or (batched) 3D Tensor. 7949,CSRSparseMatrixOpsTest,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_ops_test.py,93,class, 7950,CSRSparseMatrixOpsBenchmark,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_ops_test.py,1336,class, 7951,dense_to_csr_sparse_matrix,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py,36,function, 7952,_add_test,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py,42,function, 7953,CSRSparseMatrixGradTest,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py,51,class, 7954,create_sparse_mat_mul_test_fn,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py,115,function, 7955,CSRSparseMatrixTest,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_test.py,33,class, 7956,SparseMatrixMatmulTest,tensorflow/tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_test.py,145,class, 7957,DecodeProtoOpTest,tensorflow/tensorflow/python/kernel_tests/proto/decode_proto_op_test.py,28,class, 7958,DecodeProtoOpTestBase,tensorflow/tensorflow/python/kernel_tests/proto/decode_proto_op_test_base.py,37,class,Base class for testing proto decoding ops. 7959,DescriptorSourceTest,tensorflow/tensorflow/python/kernel_tests/proto/descriptor_source_test.py,27,class, 7960,DescriptorSourceTestBase,tensorflow/tensorflow/python/kernel_tests/proto/descriptor_source_test_base.py,33,class,Base class for testing descriptor sources. 7961,EncodeProtoOpTest,tensorflow/tensorflow/python/kernel_tests/proto/encode_proto_op_test.py,28,class, 7962,EncodeProtoOpTestBase,tensorflow/tensorflow/python/kernel_tests/proto/encode_proto_op_test_base.py,39,class,Base class for testing proto encoding ops. 7963,ProtoOpTestBase,tensorflow/tensorflow/python/kernel_tests/proto/proto_op_test_base.py,31,class,Base class for testing proto decoding and encoding ops. 7964,MultinomialTest,tensorflow/tensorflow/python/kernel_tests/random/multinomial_op_big_test.py,31,class, 7965,composed_sampler,tensorflow/tensorflow/python/kernel_tests/random/multinomial_op_test.py,40,function, 7966,MultinomialTest,tensorflow/tensorflow/python/kernel_tests/random/multinomial_op_test.py,55,class, 7967,native_op_vs_composed_ops,tensorflow/tensorflow/python/kernel_tests/random/multinomial_op_test.py,216,function, 7968,MultinomialBenchmark,tensorflow/tensorflow/python/kernel_tests/random/multinomial_op_test.py,237,class, 7969,_get_stddev_inside_bounds_before_using_randn,tensorflow/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py,43,function, 7970,TruncatedNormalMoments,tensorflow/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py,51,class, 7971,calculate_moments,tensorflow/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py,96,function, 7972,z_test,tensorflow/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py,103,function, 7973,ParameterizedTruncatedNormalTest,tensorflow/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py,114,class, 7974,parameterized_vs_naive,tensorflow/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py,394,function, 7975,randn_sampler_switchover,tensorflow/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py,419,function, 7976,TruncatedNormalBenchmark,tensorflow/tensorflow/python/kernel_tests/random/parameterized_truncated_normal_op_test.py,471,class, 7977,RandomBinomialTest,tensorflow/tensorflow/python/kernel_tests/random/random_binomial_test.py,36,class,This is a large test due to the moments computation taking some time. 7978,RandomCropTest,tensorflow/tensorflow/python/kernel_tests/random/random_crop_test.py,28,class, 7979,RandomGammaTest,tensorflow/tensorflow/python/kernel_tests/random/random_gamma_test.py,36,class,This is a medium test due to the moments computation taking some time. 7980,AddLeadingUnitDimensionsTest,tensorflow/tensorflow/python/kernel_tests/random/random_grad_test.py,35,class, 7981,RandomGammaGradTest,tensorflow/tensorflow/python/kernel_tests/random/random_grad_test.py,59,class,"Tests for derivative of a sample ~ Gamma(alpha, beta) wrt alpha and beta. The sample is an ""implicit"" function of alpha, beta and the independent random noise u. The derivatives we are looking for are d sample(alpha, beta, u) / dalpha (and dbeta). The derivative w.r.t. beta is computed by the standard automatic differentiation, so we trust that it is computed correctly. The derivative w.r.t. alpha is computed by Eigen function, so we test it in several ways. Unfortunately, the standard derivative checking by perturbing the parameter is impossible here, because we cannot fix the value of u in the random sampler. Instead, we compare the derivative for the given pair of (sample, alpha) to the values computed in various ways, and also check some statistical properties of the derivative." 7982,RandomOpTestCommon,tensorflow/tensorflow/python/kernel_tests/random/random_ops_test.py,35,class, 7983,RandomNormalTest,tensorflow/tensorflow/python/kernel_tests/random/random_ops_test.py,63,class, 7984,TruncatedNormalTest,tensorflow/tensorflow/python/kernel_tests/random/random_ops_test.py,160,class, 7985,RandomUniformTest,tensorflow/tensorflow/python/kernel_tests/random/random_ops_test.py,262,class, 7986,RandomShapeTest,tensorflow/tensorflow/python/kernel_tests/random/random_ops_test.py,419,class, 7987,RandomPoissonTest,tensorflow/tensorflow/python/kernel_tests/random/random_poisson_test.py,38,class,This is a large test due to the moments computation taking some time. 7988,RandomShuffleQueueTest,tensorflow/tensorflow/python/kernel_tests/random/random_shuffle_queue_test.py,40,class, 7989,invert_philox,tensorflow/tensorflow/python/kernel_tests/random/stateless_random_ops_test.py,38,function,Invert the Philox bijection. 7990,StatelessOpsTest,tensorflow/tensorflow/python/kernel_tests/random/stateless_random_ops_test.py,55,class, 7991,test_moment_matching,tensorflow/tensorflow/python/kernel_tests/random/util.py,28,function,"Return z-test scores for sample moments to match analytic moments. Given `samples`, check that the first sample `number_moments` match the given `dist` moments by doing a z-test. Args: samples: Samples from target distribution. number_moments: Python `int` describing how many sample moments to check. dist: SciPy distribution object that provides analytic moments. stride: Distance between samples to check for statistical properties. A stride of 0 means to use all samples, while other strides test for spatial correlation. Returns: Array of z_test scores." 7992,chi_squared,tensorflow/tensorflow/python/kernel_tests/random/util.py,79,function,Pearson's Chi-squared test. 7993,normal_cdf,tensorflow/tensorflow/python/kernel_tests/random/util.py,88,function,Cumulative distribution function for a standard normal distribution. 7994,anderson_darling,tensorflow/tensorflow/python/kernel_tests/random/util.py,93,function,Anderson-Darling test for a standard normal distribution. 7995,test_truncated_normal,tensorflow/tensorflow/python/kernel_tests/random/util.py,103,function,Tests truncated normal distribution's statistics. 7996,try_import,tensorflow/tensorflow/python/kernel_tests/signal/dct_ops_test.py,33,function, 7997,_modify_input_for_dct,tensorflow/tensorflow/python/kernel_tests/signal/dct_ops_test.py,45,function,Pad or trim the provided NumPy array's innermost axis to length n. 7998,_np_dct1,tensorflow/tensorflow/python/kernel_tests/signal/dct_ops_test.py,64,function,Computes the DCT-I manually with NumPy. 7999,_np_dct2,tensorflow/tensorflow/python/kernel_tests/signal/dct_ops_test.py,80,function,Computes the DCT-II manually with NumPy. 8000,_np_dct3,tensorflow/tensorflow/python/kernel_tests/signal/dct_ops_test.py,101,function,Computes the DCT-III manually with NumPy. 8001,_np_dct4,tensorflow/tensorflow/python/kernel_tests/signal/dct_ops_test.py,123,function,Computes the DCT-IV manually with NumPy. 8002,DCTOpsTest,tensorflow/tensorflow/python/kernel_tests/signal/dct_ops_test.py,150,class, 8003,BaseFFTOpsTest,tensorflow/tensorflow/python/kernel_tests/signal/fft_ops_test.py,44,class, 8004,FFTOpsTest,tensorflow/tensorflow/python/kernel_tests/signal/fft_ops_test.py,122,class, 8005,RFFTOpsTest,tensorflow/tensorflow/python/kernel_tests/signal/fft_ops_test.py,299,class, 8006,FFTShiftTest,tensorflow/tensorflow/python/kernel_tests/signal/fft_ops_test.py,603,class, 8007,hertz_to_mel,tensorflow/tensorflow/python/kernel_tests/signal/mel_ops_test.py,40,function,"Convert frequencies to mel scale using HTK formula. Copied from https://github.com/tensorflow/models/blob/master/research/audioset/mel_features.py. Args: frequencies_hertz: Scalar or np.array of frequencies in hertz. Returns: Object of same size as frequencies_hertz containing corresponding values on the mel scale." 8008,spectrogram_to_mel_matrix,tensorflow/tensorflow/python/kernel_tests/signal/mel_ops_test.py,57,function,"Return a matrix that can post-multiply spectrogram rows to make mel. Copied from https://github.com/tensorflow/models/blob/master/research/audioset/mel_features.py. Returns a np.array matrix A that can be used to post-multiply a matrix S of spectrogram values (STFT magnitudes) arranged as frames x bins to generate a ""mel spectrogram"" M of frames x num_mel_bins. M = S A. The classic HTK algorithm exploits the complementarity of adjacent mel bands to multiply each FFT bin by only one mel weight, then add it, with positive and negative signs, to the two adjacent mel bands to which that bin contributes. Here, by expressing this operation as a matrix multiply, we go from num_fft multiplies per frame (plus around 2*num_fft adds) to around num_fft^2 multiplies and adds. However, because these are all presumably accomplished in a single call to np.dot(), it's not clear which approach is faster in Python. The matrix multiplication has the attraction of being more general and flexible, and much easier to read. Args: num_mel_bins: How many bands in the resulting mel spectrum. This is the number of columns in the output matrix. num_spectrogram_bins: How many bins there are in the source spectrogram data, which is understood to be fft_size/2 + 1, i.e. the spectrogram only contains the nonredundant FFT bins. audio_sample_rate: Samples per second of the audio at the input to the spectrogram. We need this to figure out the actual frequencies for each spectrogram bin, which dictates how they are mapped into mel. lower_edge_hertz: Lower bound on the frequencies to be included in the mel spectrum. This corresponds to the lower edge of the lowest triangular band. upper_edge_hertz: The desired top edge of the highest frequency band. Returns: An np.array with shape (num_spectrogram_bins, num_mel_bins). Raises: ValueError: if frequency edges are incorrectly ordered." 8009,LinearToMelTest,tensorflow/tensorflow/python/kernel_tests/signal/mel_ops_test.py,136,class, 8010,MFCCTest,tensorflow/tensorflow/python/kernel_tests/signal/mfcc_ops_test.py,37,class, 8011,ReconstructionOpsTest,tensorflow/tensorflow/python/kernel_tests/signal/reconstruction_ops_test.py,37,class, 8012,FrameTest,tensorflow/tensorflow/python/kernel_tests/signal/shape_ops_test.py,37,class, 8013,SpectralOpsTest,tensorflow/tensorflow/python/kernel_tests/signal/spectral_ops_test.py,39,class, 8014,grappler_optimize,tensorflow/tensorflow/python/kernel_tests/signal/test_util.py,29,function,"Tries to optimize the provided graph using grappler. Args: graph: A `tf.Graph` instance containing the graph to optimize. fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away). Grappler uses the 'train_op' collection to look for fetches, so if not provided this collection should be non-empty. config_proto: An optional `tf.compat.v1.ConfigProto` to use when rewriting the graph. Returns: A `tf.compat.v1.GraphDef` containing the rewritten graph." 8015,tflite_convert,tensorflow/tensorflow/python/kernel_tests/signal/test_util.py,53,function,"Converts the provided fn to tf.lite model. Args: fn: A callable that expects a list of inputs like input_templates that returns a tensor or structure of tensors. input_templates: A list of Tensors, ndarrays or TensorSpecs describing the inputs that fn expects. The actual values of the Tensors or ndarrays are unused. Returns: The serialized tf.lite model." 8016,evaluate_tflite_model,tensorflow/tensorflow/python/kernel_tests/signal/test_util.py,72,function,"Evaluates the provided tf.lite model with the given input ndarrays. Args: tflite_model: bytes. The serialized tf.lite model. input_ndarrays: A list of NumPy arrays to feed as input to the model. Returns: A list of ndarrays produced by the model. Raises: ValueError: If the number of input arrays does not match the number of inputs the model expects." 8017,_scipy_raised_cosine,tensorflow/tensorflow/python/kernel_tests/signal/window_ops_test.py,44,function,"A simple implementation of a raised cosine window that matches SciPy. https://en.wikipedia.org/wiki/Window_function#Hann_window https://github.com/scipy/scipy/blob/v0.14.0/scipy/signal/windows.py#L615 Args: length: The window length. symmetric: Whether to create a symmetric window. a: The alpha parameter of the raised cosine window. b: The beta parameter of the raised cosine window. Returns: A raised cosine window of length `length`." 8018,WindowOpsTest,tensorflow/tensorflow/python/kernel_tests/signal/window_ops_test.py,71,class, 8019,convert_data_format,tensorflow/tensorflow/python/layers/utils.py,26,function, 8020,normalize_tuple,tensorflow/tensorflow/python/layers/utils.py,49,function,"Transforms a single integer or iterable of integers into an integer tuple. Arguments: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. ""strides"" or ""kernel_size"". This is only used to format error messages. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed." 8021,normalize_data_format,tensorflow/tensorflow/python/layers/utils.py,88,function, 8022,normalize_padding,tensorflow/tensorflow/python/layers/utils.py,97,function, 8023,conv_output_length,tensorflow/tensorflow/python/layers/utils.py,105,function,"Determines output length of a convolution given input length. Arguments: input_length: integer. filter_size: integer. padding: one of ""same"", ""valid"", ""full"". stride: integer. dilation: dilation rate, integer. Returns: The output length (integer)." 8024,conv_input_length,tensorflow/tensorflow/python/layers/utils.py,131,function,"Determines input length of a convolution given output length. Arguments: output_length: integer. filter_size: integer. padding: one of ""same"", ""valid"", ""full"". stride: integer. Returns: The input length (integer)." 8025,deconv_output_length,tensorflow/tensorflow/python/layers/utils.py,155,function,"Determines output length of a transposed convolution given input length. Arguments: input_length: integer. filter_size: integer. padding: one of ""same"", ""valid"", ""full"". stride: integer. Returns: The output length (integer)." 8026,smart_cond,tensorflow/tensorflow/python/layers/utils.py,177,function,"Return either `true_fn()` if predicate `pred` is true else `false_fn()`. If `pred` is a bool or has a constant value, we return either `true_fn()` or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both. Arguments: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using `tf.cond`. Returns: Tensors returned by the call to either `true_fn` or `false_fn`. Raises: TypeError: If `true_fn` or `false_fn` is not callable." 8027,constant_value,tensorflow/tensorflow/python/layers/utils.py,203,function,"Return the bool value for `pred`, or None if `pred` had a dynamic value. Arguments: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. Returns: True or False if `pred` has a constant boolean value, None otherwise. Raises: TypeError: If `pred` is not a Variable, Tensor or bool, or Python integer 1 or 0." 8028,ConvUtilsTest,tensorflow/tensorflow/python/layers/utils_test.py,28,class, 8029,ConstantValueTest,tensorflow/tensorflow/python/layers/utils_test.py,94,class, 8030,float_values,tensorflow/tensorflow/python/lib/core/bfloat16_test.py,35,function,Returns values that should round trip exactly to float and back. 8031,Bfloat16Test,tensorflow/tensorflow/python/lib/core/bfloat16_test.py,47,class, 8032,Bfloat16NumPyTest,tensorflow/tensorflow/python/lib/core/bfloat16_test.py,187,class, 8033,FileIO,tensorflow/tensorflow/python/lib/io/file_io.py,37,class,"FileIO class that exposes methods to read / write to / from files. The constructor takes the following arguments: name: [path-like object](https://docs.python.org/3/glossary.html#term-path-like-object) giving the pathname of the file to be opened. mode: one of `r`, `w`, `a`, `r+`, `w+`, `a+`. Append `b` for bytes mode. Can be used as an iterator to iterate over lines in the file. The default buffer size used for the BufferedInputStream used for reading the file line by line is 1024 * 512 bytes." 8034,file_exists,tensorflow/tensorflow/python/lib/io/file_io.py,237,function,"Determines whether a path exists or not. Args: filename: string, a path Returns: True if the path exists, whether it's a file or a directory. False if the path does not exist and there are no filesystem errors. Raises: errors.OpError: Propagates any errors reported by the FileSystem API." 8035,file_exists_v2,tensorflow/tensorflow/python/lib/io/file_io.py,254,function,"Determines whether a path exists or not. Args: path: string, a path Returns: True if the path exists, whether it's a file or a directory. False if the path does not exist and there are no filesystem errors. Raises: errors.OpError: Propagates any errors reported by the FileSystem API." 8036,delete_file,tensorflow/tensorflow/python/lib/io/file_io.py,275,function,"Deletes the file located at 'filename'. Args: filename: string, a filename Raises: errors.OpError: Propagates any errors reported by the FileSystem API. E.g., `NotFoundError` if the file does not exist." 8037,delete_file_v2,tensorflow/tensorflow/python/lib/io/file_io.py,289,function,"Deletes the path located at 'path'. Args: path: string, a path Raises: errors.OpError: Propagates any errors reported by the FileSystem API. E.g., `NotFoundError` if the path does not exist." 8038,read_file_to_string,tensorflow/tensorflow/python/lib/io/file_io.py,302,function,"Reads the entire contents of a file to a string. Args: filename: string, path to a file binary_mode: whether to open the file in binary mode or not. This changes the type of the object returned. Returns: contents of the file as a string or bytes. Raises: errors.OpError: Raises variety of errors that are subtypes e.g. `NotFoundError` etc." 8039,write_string_to_file,tensorflow/tensorflow/python/lib/io/file_io.py,324,function,"Writes a string to a given file. Args: filename: string, path to a file file_content: string, contents that need to be written to the file Raises: errors.OpError: If there are errors during the operation." 8040,get_matching_files,tensorflow/tensorflow/python/lib/io/file_io.py,339,function,"Returns a list of files that match the given pattern(s). Args: filename: string or iterable of strings. The glob pattern(s). Returns: A list of strings containing filenames that match the given pattern(s). Raises: * errors.OpError: If there are filesystem / directory listing errors." 8041,get_matching_files_v2,tensorflow/tensorflow/python/lib/io/file_io.py,355,function,"Returns a list of files that match the given pattern(s). The patterns are defined as strings. Supported patterns are defined here. Note that the pattern can be a Python iteratable of string patterns. The format definition of the pattern is: **pattern**: `{ term }` **term**: * `'*'`: matches any sequence of non-'/' characters * `'?'`: matches a single non-'/' character * `'[' [ '^' ] { match-list } ']'`: matches any single character (not) on the list * `c`: matches character `c` where `c != '*', '?', '\\', '['` * `'\\' c`: matches character `c` **character range**: * `c`: matches character `c` while `c != '\\', '-', ']'` * `'\\' c`: matches character `c` * `lo '-' hi`: matches character `c` for `lo <= c <= hi` Examples: >>> tf.io.gfile.glob(""*.py"") ... # For example, ['__init__.py'] >>> tf.io.gfile.glob(""__init__.??"") ... # As above >>> files = {""*.py""} >>> the_iterator = iter(files) >>> tf.io.gfile.glob(the_iterator) ... # As above See the C++ function `GetMatchingPaths` in [`core/platform/file_system.h`] (../../../core/platform/file_system.h) for implementation details. Args: pattern: string or iterable of strings. The glob pattern(s). Returns: A list of strings containing filenames that match the given pattern(s). Raises: errors.OpError: If there are filesystem / directory listing errors." 8042,create_dir,tensorflow/tensorflow/python/lib/io/file_io.py,423,function,"Creates a directory with the name `dirname`. Args: dirname: string, name of the directory to be created Notes: The parent directories need to exist. Use `tf.io.gfile.makedirs` instead if there is the possibility that the parent dirs don't exist. Raises: errors.OpError: If the operation fails." 8043,create_dir_v2,tensorflow/tensorflow/python/lib/io/file_io.py,439,function,"Creates a directory with the name given by `path`. Args: path: string, name of the directory to be created Notes: The parent directories need to exist. Use `tf.io.gfile.makedirs` instead if there is the possibility that the parent dirs don't exist. Raises: errors.OpError: If the operation fails." 8044,recursive_create_dir,tensorflow/tensorflow/python/lib/io/file_io.py,455,function,"Creates a directory and all parent/intermediate directories. It succeeds if dirname already exists and is writable. Args: dirname: string, name of the directory to be created Raises: errors.OpError: If the operation fails." 8045,recursive_create_dir_v2,tensorflow/tensorflow/python/lib/io/file_io.py,470,function,"Creates a directory and all parent/intermediate directories. It succeeds if path already exists and is writable. Args: path: string, name of the directory to be created Raises: errors.OpError: If the operation fails." 8046,copy,tensorflow/tensorflow/python/lib/io/file_io.py,485,function,"Copies data from `oldpath` to `newpath`. Args: oldpath: string, name of the file who's contents need to be copied newpath: string, name of the file to which to copy to overwrite: boolean, if false it's an error for `newpath` to be occupied by an existing file. Raises: errors.OpError: If the operation fails." 8047,copy_v2,tensorflow/tensorflow/python/lib/io/file_io.py,501,function,"Copies data from `src` to `dst`. Args: src: string, name of the file whose contents need to be copied dst: string, name of the file to which to copy to overwrite: boolean, if false it's an error for `dst` to be occupied by an existing file. Raises: errors.OpError: If the operation fails." 8048,rename,tensorflow/tensorflow/python/lib/io/file_io.py,518,function,"Rename or move a file / directory. Args: oldname: string, pathname for a file newname: string, pathname to which the file needs to be moved overwrite: boolean, if false it's an error for `newname` to be occupied by an existing file. Raises: errors.OpError: If the operation fails." 8049,rename_v2,tensorflow/tensorflow/python/lib/io/file_io.py,534,function,"Rename or move a file / directory. Args: src: string, pathname for a file dst: string, pathname to which the file needs to be moved overwrite: boolean, if false it's an error for `dst` to be occupied by an existing file. Raises: errors.OpError: If the operation fails." 8050,atomic_write_string_to_file,tensorflow/tensorflow/python/lib/io/file_io.py,550,function,"Writes to `filename` atomically. This means that when `filename` appears in the filesystem, it will contain all of `contents`. With write_string_to_file, it is possible for the file to appear in the filesystem with `contents` only partially written. Accomplished by writing to a temp file and then renaming it. Args: filename: string, pathname for a file contents: string, contents that need to be written to the file overwrite: boolean, if false it's an error for `filename` to be occupied by an existing file." 8051,delete_recursively,tensorflow/tensorflow/python/lib/io/file_io.py,578,function,"Deletes everything under dirname recursively. Args: dirname: string, a path to a directory Raises: errors.OpError: If the operation fails." 8052,delete_recursively_v2,tensorflow/tensorflow/python/lib/io/file_io.py,591,function,"Deletes everything under path recursively. Args: path: string, a path Raises: errors.OpError: If the operation fails." 8053,is_directory,tensorflow/tensorflow/python/lib/io/file_io.py,604,function,"Returns whether the path is a directory or not. Args: dirname: string, path to a potential directory Returns: True, if the path is a directory; False otherwise" 8054,is_directory_v2,tensorflow/tensorflow/python/lib/io/file_io.py,617,function,"Returns whether the path is a directory or not. Args: path: string, path to a potential directory Returns: True, if the path is a directory; False otherwise" 8055,has_atomic_move,tensorflow/tensorflow/python/lib/io/file_io.py,632,function,"Checks whether the file system supports atomic moves. Returns whether or not the file system of the given path supports the atomic move operation for a file or folder. If atomic move is supported, it is recommended to use a temp location for writing and then move to the final location. Args: path: string, path to a file Returns: True, if the path is on a file system that supports atomic move False, if the file system does not support atomic move. In such cases we need to be careful about using moves. In some cases it is safer not to use temporary locations in this case." 8056,list_directory,tensorflow/tensorflow/python/lib/io/file_io.py,657,function,"Returns a list of entries contained within a directory. The list is in arbitrary order. It does not contain the special entries ""."" and "".."". Args: dirname: string, path to a directory Returns: [filename1, filename2, ... filenameN] as strings Raises: errors.NotFoundError if directory doesn't exist" 8057,list_directory_v2,tensorflow/tensorflow/python/lib/io/file_io.py,676,function,"Returns a list of entries contained within a directory. The list is in arbitrary order. It does not contain the special entries ""."" and "".."". Args: path: string, path to a directory Returns: [filename1, filename2, ... filenameN] as strings Raises: errors.NotFoundError if directory doesn't exist" 8058,walk,tensorflow/tensorflow/python/lib/io/file_io.py,706,function,"Recursive directory tree generator for directories. Args: top: string, a Directory name in_order: bool, Traverse in order if True, post order if False. Errors that happen while listing directories are ignored. Yields: Each yield is a 3-tuple: the pathname of a directory, followed by lists of all its subdirectories and leaf files. That is, each yield looks like: `(dirname, [subdirname, subdirname, ...], [filename, filename, ...])`. Each item is a string." 8059,walk_v2,tensorflow/tensorflow/python/lib/io/file_io.py,724,function,"Recursive directory tree generator for directories. Args: top: string, a Directory name topdown: bool, Traverse pre order if True, post order if False. onerror: optional handler for errors. Should be a function, it will be called with the error as argument. Rethrowing the error aborts the walk. Errors that happen while listing directories are ignored. Yields: Each yield is a 3-tuple: the pathname of a directory, followed by lists of all its subdirectories and leaf files. That is, each yield looks like: `(dirname, [subdirname, subdirname, ...], [filename, filename, ...])`. Each item is a string." 8060,stat,tensorflow/tensorflow/python/lib/io/file_io.py,782,function,"Returns file statistics for a given path. Args: filename: string, path to a file Returns: FileStatistics struct that contains information about the path Raises: errors.OpError: If the operation fails." 8061,stat_v2,tensorflow/tensorflow/python/lib/io/file_io.py,798,function,"Returns file statistics for a given path. Args: path: string, path to a file Returns: FileStatistics struct that contains information about the path Raises: errors.OpError: If the operation fails." 8062,filecmp,tensorflow/tensorflow/python/lib/io/file_io.py,813,function,"Compare two files, returning True if they are the same, False otherwise. We check size first and return False quickly if the files are different sizes. If they are the same size, we continue to generating a crc for the whole file. You might wonder: why not use Python's `filecmp.cmp()` instead? The answer is that the builtin library is not robust to the many different filesystems TensorFlow runs on, and so we here perform a similar comparison with the more robust FileIO. Args: filename_a: string path to the first file. filename_b: string path to the second file. Returns: True if the files are the same, False otherwise." 8063,file_crc32,tensorflow/tensorflow/python/lib/io/file_io.py,842,function,"Get the crc32 of the passed file. The crc32 of a file can be used for error checking; two files with the same crc32 are considered equivalent. Note that the entire file must be read to produce the crc32. Args: filename: string, path to a file block_size: Integer, process the files by reading blocks of `block_size` bytes. Use -1 to read the file as once. Returns: hexadecimal as string, the crc32 of the passed file." 8064,PathLike,tensorflow/tensorflow/python/lib/io/file_io_test.py,33,class,Backport of pathlib.Path for Python < 3.6 8065,FileIoTest,tensorflow/tensorflow/python/lib/io/file_io_test.py,51,class, 8066,TFRecordCompressionType,tensorflow/tensorflow/python/lib/io/tf_record.py,32,class,The type of compression for the record. 8067,TFRecordOptions,tensorflow/tensorflow/python/lib/io/tf_record.py,43,class,Options used for manipulating TFRecord files. 8068,tf_record_iterator,tensorflow/tensorflow/python/lib/io/tf_record.py,157,function,"An iterator that read the records from a TFRecords file. Args: path: The path to the TFRecords file. options: (optional) A TFRecordOptions object. Returns: An iterator of serialized TFRecords. Raises: IOError: If `path` cannot be opened for reading." 8069,tf_record_random_reader,tensorflow/tensorflow/python/lib/io/tf_record.py,174,function,"Creates a reader that allows random-access reads from a TFRecords file. The created reader object has the following method: - `read(offset)`, which returns a tuple of `(record, ending_offset)`, where `record` is the TFRecord read at the offset, and `ending_offset` is the ending offset of the read record. The method throws a `tf.errors.DataLossError` if data is corrupted at the given offset. The method throws `IndexError` if the offset is out of range for the TFRecords file. Usage example: ```py reader = tf_record_random_reader(file_path) record_1, offset_1 = reader.read(0) # 0 is the initial offset. # offset_1 is the ending offset of the 1st record and the starting offset of # the next. record_2, offset_2 = reader.read(offset_1) # offset_2 is the ending offset of the 2nd record and the starting offset of # the next. # We can jump back and read the first record again if so desired. reader.read(0) ``` Args: path: The path to the TFRecords file. Returns: An object that supports random-access reading of the serialized TFRecords. Raises: IOError: If `path` cannot be opened for reading." 8070,TFRecordWriter,tensorflow/tensorflow/python/lib/io/tf_record.py,218,class,"A class to write records to a TFRecords file. [TFRecords tutorial](https://www.tensorflow.org/tutorials/load_data/tfrecord) TFRecords is a binary format which is optimized for high throughput data retrieval, generally in conjunction with `tf.data`. `TFRecordWriter` is used to write serialized examples to a file for later consumption. The key steps are: Ahead of time: - [Convert data into a serialized format]( https://www.tensorflow.org/tutorials/load_data/tfrecord#tfexample) - [Write the serialized data to one or more files]( https://www.tensorflow.org/tutorials/load_data/tfrecord#tfrecord_files_in_python) During training or evaluation: - [Read serialized examples into memory]( https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file) - [Parse (deserialize) examples]( https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file) A minimal example is given below: >>> import tempfile >>> example_path = os.path.join(tempfile.gettempdir(), ""example.tfrecords"") >>> np.random.seed(0) >>> # Write the records to a file. ... with tf.io.TFRecordWriter(example_path) as file_writer: ... for _ in range(4): ... x, y = np.random.random(), np.random.random() ... ... record_bytes = tf.train.Example(features=tf.train.Features(feature={ ... ""x"": tf.train.Feature(float_list=tf.train.FloatList(value=[x])), ... ""y"": tf.train.Feature(float_list=tf.train.FloatList(value=[y])), ... })).SerializeToString() ... file_writer.write(record_bytes) >>> # Read the data back out. >>> def decode_fn(record_bytes): ... return tf.io.parse_single_example( ... # Data ... record_bytes, ... ... # Schema ... {""x"": tf.io.FixedLenFeature([], dtype=tf.float32), ... ""y"": tf.io.FixedLenFeature([], dtype=tf.float32)} ... ) >>> for batch in tf.data.TFRecordDataset([example_path]).map(decode_fn): ... print(""x = {x:.4f}, y = {y:.4f}"".format(**batch)) x = 0.5488, y = 0.7152 x = 0.6028, y = 0.5449 x = 0.4237, y = 0.6459 x = 0.4376, y = 0.8918 This class implements `__enter__` and `__exit__`, and can be used in `with` blocks like a normal file. (See the usage example above.)" 8071,TFCompressionTestCase,tensorflow/tensorflow/python/lib/io/tf_record_test.py,67,class,TFCompression Test 8072,TFRecordWriterTest,tensorflow/tensorflow/python/lib/io/tf_record_test.py,131,class,TFRecordWriter Test 8073,TFRecordWriterZlibTest,tensorflow/tensorflow/python/lib/io/tf_record_test.py,294,class,TFRecordWriter Zlib test 8074,TFRecordIteratorTest,tensorflow/tensorflow/python/lib/io/tf_record_test.py,360,class,TFRecordIterator test 8075,TFRecordRandomReaderTest,tensorflow/tensorflow/python/lib/io/tf_record_test.py,478,class, 8076,TFRecordWriterCloseAndFlushTests,tensorflow/tensorflow/python/lib/io/tf_record_test.py,518,class,TFRecordWriter close and flush tests 8077,TFRecordWriterCloseAndFlushGzipTests,tensorflow/tensorflow/python/lib/io/tf_record_test.py,577,class, 8078,TFRecordWriterCloseAndFlushZlibTests,tensorflow/tensorflow/python/lib/io/tf_record_test.py,584,class, 8079,Module,tensorflow/tensorflow/python/module/module.py,35,class,"Base neural network module class. A module is a named container for `tf.Variable`s, other `tf.Module`s and functions which apply to user input. For example a dense layer in a neural network might be implemented as a `tf.Module`: >>> class Dense(tf.Module): ... def __init__(self, input_dim, output_size, name=None): ... super(Dense, self).__init__(name=name) ... self.w = tf.Variable( ... tf.random.normal([input_dim, output_size]), name='w') ... self.b = tf.Variable(tf.zeros([output_size]), name='b') ... def __call__(self, x): ... y = tf.matmul(x, self.w) + self.b ... return tf.nn.relu(y) You can use the Dense layer as you would expect: >>> d = Dense(input_dim=3, output_size=2) >>> d(tf.ones([1, 3])) By subclassing `tf.Module` instead of `object` any `tf.Variable` or `tf.Module` instances assigned to object properties can be collected using the `variables`, `trainable_variables` or `submodules` property: >>> d.variables (, ) Subclasses of `tf.Module` can also take advantage of the `_flatten` method which can be used to implement tracking of any other types. All `tf.Module` classes have an associated `tf.name_scope` which can be used to group operations in TensorBoard and create hierarchies for variable names which can help with debugging. We suggest using the name scope when creating nested submodules/parameters or for forward methods whose graph you might want to inspect in TensorBoard. You can enter the name scope explicitly using `with self.name_scope:` or you can annotate methods (apart from `__init__`) with `@tf.Module.with_name_scope`. >>> class MLP(tf.Module): ... def __init__(self, input_size, sizes, name=None): ... super(MLP, self).__init__(name=name) ... self.layers = [] ... with self.name_scope: ... for size in sizes: ... self.layers.append(Dense(input_dim=input_size, output_size=size)) ... input_size = size ... @tf.Module.with_name_scope ... def __call__(self, x): ... for layer in self.layers: ... x = layer(x) ... return x >>> module = MLP(input_size=5, sizes=[5, 5]) >>> module.variables (, , , )" 8080,_is_variable,tensorflow/tensorflow/python/module/module.py,300,function, 8081,_is_trainable_variable,tensorflow/tensorflow/python/module/module.py,304,function, 8082,_is_module,tensorflow/tensorflow/python/module/module.py,308,function, 8083,valid_identifier,tensorflow/tensorflow/python/module/module.py,315,function, 8084,camel_to_snake,tensorflow/tensorflow/python/module/module.py,319,function, 8085,_flatten_module,tensorflow/tensorflow/python/module/module.py,323,function,Implementation of `flatten`. 8086,TestModuleNaming,tensorflow/tensorflow/python/module/module_test.py,41,class, 8087,VariableNamingTest,tensorflow/tensorflow/python/module/module_test.py,195,class, 8088,NameScopeTest,tensorflow/tensorflow/python/module/module_test.py,204,class, 8089,VariableTrackingTest,tensorflow/tensorflow/python/module/module_test.py,227,class, 8090,ModuleTrackingTest,tensorflow/tensorflow/python/module/module_test.py,264,class, 8091,ForwardMethodsTest,tensorflow/tensorflow/python/module/module_test.py,280,class, 8092,AbcTest,tensorflow/tensorflow/python/module/module_test.py,302,class, 8093,get_name_scope,tensorflow/tensorflow/python/module/module_test.py,317,function, 8094,ErrorModuleError,tensorflow/tensorflow/python/module/module_test.py,323,class, 8095,ErrorModule,tensorflow/tensorflow/python/module/module_test.py,327,class, 8096,RecursiveModule,tensorflow/tensorflow/python/module/module_test.py,339,class, 8097,AbstractModule,tensorflow/tensorflow/python/module/module_test.py,351,class, 8098,ConcreteModule,tensorflow/tensorflow/python/module/module_test.py,358,class, 8099,TreeModule,tensorflow/tensorflow/python/module/module_test.py,365,class, 8100,ReturnsNameScopeModule,tensorflow/tensorflow/python/module/module_test.py,378,class, 8101,SubclassedReturnsNameScopeModule,tensorflow/tensorflow/python/module/module_test.py,389,class, 8102,PropertyThrowsWhenCalledModule,tensorflow/tensorflow/python/module/module_test.py,396,class, 8103,ModuleOverridingNameScope,tensorflow/tensorflow/python/module/module_test.py,403,class, 8104,ModuleWithFunctionAnnotatedCall,tensorflow/tensorflow/python/module/module_test.py,410,class, 8105,PropertyModule,tensorflow/tensorflow/python/module/module_test.py,423,class, 8106,FlattenTest,tensorflow/tensorflow/python/module/module_test.py,453,class, 8107,LayerModule,tensorflow/tensorflow/python/module/module_test.py,523,class, 8108,MemberType,tensorflow/tensorflow/python/module/module_test.py,549,class,A simple type to search for. 8109,SimpleModule,tensorflow/tensorflow/python/module/module_test.py,554,class, 8110,AccumulateNBenchmark,tensorflow/tensorflow/python/ops/accumulate_n_benchmark.py,39,class, 8111,_PackGrad,tensorflow/tensorflow/python/ops/array_grad.py,42,function,Gradient for pack op. 8112,_UnpackGrad,tensorflow/tensorflow/python/ops/array_grad.py,48,function,Gradient for unpack op. 8113,_ConcatGradHelper,tensorflow/tensorflow/python/ops/array_grad.py,53,function,"Gradient for concat op. Args: op: An operation. grad: `Tensor` or `IndexedSlices` representing the gradients with respect to each output of the op. start_value_index: An integer index of the first value in the op.inputs. end_value_index: An integer index of the last value in the op.inputs. dim_index: An integer index of concat_dim or axis parameter in op.inputs. Returns: Tensors representing the partial gradients with respect to each input of the op. Raises: ValueError: if concat_dim/axis is not statically known." 8114,_ConcatGrad,tensorflow/tensorflow/python/ops/array_grad.py,217,function, 8115,_ConcatGradV2,tensorflow/tensorflow/python/ops/array_grad.py,227,function, 8116,_SliceGrad,tensorflow/tensorflow/python/ops/array_grad.py,236,function,Gradient for Slice op. 8117,_StridedSliceGrad,tensorflow/tensorflow/python/ops/array_grad.py,264,function,Gradient for StridedSlice op. 8118,_StridedSliceGradGrad,tensorflow/tensorflow/python/ops/array_grad.py,299,function,Gradient for StridedSliceGrad op. 8119,_SplitGrad,tensorflow/tensorflow/python/ops/array_grad.py,318,function, 8120,_SplitVGrad,tensorflow/tensorflow/python/ops/array_grad.py,323,function, 8121,_DiagGrad,tensorflow/tensorflow/python/ops/array_grad.py,336,function, 8122,_DiagPartGrad,tensorflow/tensorflow/python/ops/array_grad.py,341,function, 8123,_MatrixDiagGrad,tensorflow/tensorflow/python/ops/array_grad.py,346,function, 8124,_MatrixDiagV2Grad,tensorflow/tensorflow/python/ops/array_grad.py,351,function, 8125,_MatrixDiagV3Grad,tensorflow/tensorflow/python/ops/array_grad.py,357,function, 8126,_MatrixDiagPartGrad,tensorflow/tensorflow/python/ops/array_grad.py,363,function, 8127,_MatrixDiagPartV2Grad,tensorflow/tensorflow/python/ops/array_grad.py,372,function,Gradient for MatrixDiagPartV2. 8128,_MatrixDiagPartV3Grad,tensorflow/tensorflow/python/ops/array_grad.py,387,function,Gradient for MatrixDiagPartV3. 8129,_MatrixSetDiagGrad,tensorflow/tensorflow/python/ops/array_grad.py,405,function,Gradient for MatrixSetDiag. 8130,_MatrixSetDiagGradV2,tensorflow/tensorflow/python/ops/array_grad.py,428,function,Gradient for MatrixSetDiagV2. 8131,_MatrixSetDiagGradV3,tensorflow/tensorflow/python/ops/array_grad.py,464,function,Gradient for MatrixSetDiagV3. 8132,_MatrixBandPartGrad,tensorflow/tensorflow/python/ops/array_grad.py,504,function, 8133,_FillGrad,tensorflow/tensorflow/python/ops/array_grad.py,515,function, 8134,_PreventGradientGrad,tensorflow/tensorflow/python/ops/array_grad.py,524,function, 8135,_IndexedSlicesToTensorNoWarning,tensorflow/tensorflow/python/ops/array_grad.py,529,function,Converts an IndexedSlices to a Tensor without sparse->dense warnings. 8136,_GatherGrad,tensorflow/tensorflow/python/ops/array_grad.py,544,function,Gradient for Gather op. 8137,_GetBatchIndices,tensorflow/tensorflow/python/ops/array_grad.py,567,function,Addds the batch offsets to the given indices and returns the results. 8138,_BatchGatherGrad,tensorflow/tensorflow/python/ops/array_grad.py,588,function,Returns the gradient of GatherV2 with batch dimensions. 8139,_GatherV2Grad,tensorflow/tensorflow/python/ops/array_grad.py,619,function,Gradient for GatherV2 op. 8140,_GatherNdGrad,tensorflow/tensorflow/python/ops/array_grad.py,692,function, 8141,_ResourceGatherNdGrad,tensorflow/tensorflow/python/ops/array_grad.py,705,function, 8142,_CheckNumericsGrad,tensorflow/tensorflow/python/ops/array_grad.py,718,function,Gradient for check_numerics op. 8143,_CheckNumericsV2Grad,tensorflow/tensorflow/python/ops/array_grad.py,727,function,Gradient for check_numerics op. 8144,_IdGrad,tensorflow/tensorflow/python/ops/array_grad.py,737,function, 8145,_RefIdGrad,tensorflow/tensorflow/python/ops/array_grad.py,742,function, 8146,_IdNGrad,tensorflow/tensorflow/python/ops/array_grad.py,747,function, 8147,_ReshapeGrad,tensorflow/tensorflow/python/ops/array_grad.py,755,function, 8148,_ReshapeToInput,tensorflow/tensorflow/python/ops/array_grad.py,766,function,Reshapes the gradient to the shape of the original input. 8149,_ExpandDimsGrad,tensorflow/tensorflow/python/ops/array_grad.py,773,function, 8150,_SqueezeGrad,tensorflow/tensorflow/python/ops/array_grad.py,778,function, 8151,_TransposeGrad,tensorflow/tensorflow/python/ops/array_grad.py,783,function,Returns unshuffle(grad). 8152,_ConjugateTransposeGrad,tensorflow/tensorflow/python/ops/array_grad.py,790,function,Returns conj(unshuffle(grad)). 8153,_TileGrad,tensorflow/tensorflow/python/ops/array_grad.py,809,function,Sum reduces grad along the tiled dimensions. 8154,_PadGrad,tensorflow/tensorflow/python/ops/array_grad.py,839,function,Gradient for Pad. 8155,_ReverseSequenceGrad,tensorflow/tensorflow/python/ops/array_grad.py,864,function, 8156,_ReverseGrad,tensorflow/tensorflow/python/ops/array_grad.py,876,function, 8157,_ReverseV2Grad,tensorflow/tensorflow/python/ops/array_grad.py,882,function, 8158,_SpaceToBatchGrad,tensorflow/tensorflow/python/ops/array_grad.py,888,function, 8159,_SpaceToBatchNDGrad,tensorflow/tensorflow/python/ops/array_grad.py,897,function, 8160,_BatchToSpaceGrad,tensorflow/tensorflow/python/ops/array_grad.py,905,function, 8161,_BatchToSpaceNDGrad,tensorflow/tensorflow/python/ops/array_grad.py,914,function, 8162,_SpaceToDepthGrad,tensorflow/tensorflow/python/ops/array_grad.py,922,function, 8163,_DepthToSpaceGrad,tensorflow/tensorflow/python/ops/array_grad.py,933,function, 8164,_MirrorPadGrad,tensorflow/tensorflow/python/ops/array_grad.py,947,function, 8165,_MirrorPadGradGrad,tensorflow/tensorflow/python/ops/array_grad.py,953,function, 8166,_QuantizeAndDequantizeGrad,tensorflow/tensorflow/python/ops/array_grad.py,959,function, 8167,_QuantizeAndDequantizeV2Grad,tensorflow/tensorflow/python/ops/array_grad.py,964,function, 8168,_QuantizeAndDequantizeV3Grad,tensorflow/tensorflow/python/ops/array_grad.py,969,function, 8169,_ExtractImagePatchesGrad,tensorflow/tensorflow/python/ops/array_grad.py,975,function, 8170,_ExtractVolumePatchesGrad,tensorflow/tensorflow/python/ops/array_grad.py,1032,function, 8171,_ScatterNdGrad,tensorflow/tensorflow/python/ops/array_grad.py,1095,function, 8172,_TensorScatterUpdateGrad,tensorflow/tensorflow/python/ops/array_grad.py,1102,function, 8173,_TensorScatterAddGrad,tensorflow/tensorflow/python/ops/array_grad.py,1112,function, 8174,_TensorScatterSubGrad,tensorflow/tensorflow/python/ops/array_grad.py,1120,function, 8175,_ScatterNdNonAliasingAddGrad,tensorflow/tensorflow/python/ops/array_grad.py,1128,function, 8176,_BroadcastToGrad,tensorflow/tensorflow/python/ops/array_grad.py,1135,function, 8177,reshape,tensorflow/tensorflow/python/ops/array_ops.py,61,function,"Reshapes a tensor. Given `tensor`, this operation returns a new `tf.Tensor` that has the same values as `tensor` in the same order, except with a new shape given by `shape`. >>> t1 = [[1, 2, 3], ... [4, 5, 6]] >>> print(tf.shape(t1).numpy()) [2 3] >>> t2 = tf.reshape(t1, [6]) >>> t2 >>> tf.reshape(t2, [3, 2]) The `tf.reshape` does not change the order of or the total number of elements in the tensor, and so it can reuse the underlying data buffer. This makes it a fast operation independent of how big of a tensor it is operating on. >>> tf.reshape([1, 2, 3], [2, 2]) Traceback (most recent call last): ... InvalidArgumentError: Input to reshape is a tensor with 3 values, but the requested shape has 4 To instead reorder the data to rearrange the dimensions of a tensor, see `tf.transpose`. >>> t = [[1, 2, 3], ... [4, 5, 6]] >>> tf.reshape(t, [3, 2]).numpy() array([[1, 2], [3, 4], [5, 6]], dtype=int32) >>> tf.transpose(t, perm=[1, 0]).numpy() array([[1, 4], [2, 5], [3, 6]], dtype=int32) If one component of `shape` is the special value -1, the size of that dimension is computed so that the total size remains constant. In particular, a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can be -1. >>> t = [[1, 2, 3], ... [4, 5, 6]] >>> tf.reshape(t, [-1]) >>> tf.reshape(t, [3, -1]) >>> tf.reshape(t, [-1, 2]) `tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar. >>> tf.reshape([7], []).numpy() 7 More examples: >>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> print(tf.shape(t).numpy()) [9] >>> tf.reshape(t, [3, 3]) >>> t = [[[1, 1], [2, 2]], ... [[3, 3], [4, 4]]] >>> print(tf.shape(t).numpy()) [2 2 2] >>> tf.reshape(t, [2, 4]) >>> t = [[[1, 1, 1], ... [2, 2, 2]], ... [[3, 3, 3], ... [4, 4, 4]], ... [[5, 5, 5], ... [6, 6, 6]]] >>> print(tf.shape(t).numpy()) [3 2 3] >>> # Pass '[-1]' to flatten 't'. >>> tf.reshape(t, [-1]) >>> # -- Using -1 to infer the shape -- >>> # Here -1 is inferred to be 9: >>> tf.reshape(t, [2, -1]) >>> # -1 is inferred to be 2: >>> tf.reshape(t, [-1, 9]) >>> # -1 is inferred to be 3: >>> tf.reshape(t, [ 2, -1, 3]) Args: tensor: A `Tensor`. shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. Defines the shape of the output tensor. name: Optional string. A name for the operation. Returns: A `Tensor`. Has the same type as `tensor`." 8178,fill,tensorflow/tensorflow/python/ops/array_ops.py,202,function,"Creates a tensor filled with a scalar value. See also `tf.ones`, `tf.zeros`, `tf.one_hot`, `tf.eye`. This operation creates a tensor of shape `dims` and fills it with `value`. For example: >>> tf.fill([2, 3], 9) `tf.fill` evaluates at graph runtime and supports dynamic shapes based on other runtime `tf.Tensors`, unlike `tf.constant(value, shape=dims)`, which embeds the value as a `Const` node. Args: dims: A 1-D sequence of non-negative numbers. Represents the shape of the output `tf.Tensor`. Entries should be of type: `int32`, `int64`. value: A value to fill the returned `tf.Tensor`. name: Optional string. The name of the output `tf.Tensor`. Returns: A `tf.Tensor` with shape `dims` and the same dtype as `value`. Raises: InvalidArgumentError: `dims` contains negative entries. NotFoundError: `dims` contains non-integer entries. @compatibility(numpy) Similar to `np.full`. In `numpy`, more parameters are supported. Passing a number argument as the shape (`np.full(5, value)`) is valid in `numpy` for specifying a 1-D shaped result, while TensorFlow does not support this syntax. @end_compatibility" 8179,identity,tensorflow/tensorflow/python/ops/array_ops.py,246,function,"Return a Tensor with the same shape and contents as input. The return value is not the same Tensor as the original, but contains the same values. This operation is fast when used on the same device. For example: >>> a = tf.constant([0.78]) >>> a_identity = tf.identity(a) >>> a.numpy() array([0.78], dtype=float32) >>> a_identity.numpy() array([0.78], dtype=float32) Calling `tf.identity` on a variable will make a Tensor that represents the value of that variable at the time it is called. This is equivalent to calling `.read_value()`. >>> a = tf.Variable(5) >>> a_identity = tf.identity(a) >>> a.assign_add(1) >>> a.numpy() 6 >>> a_identity.numpy() 5 Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`." 8180,expand_dims,tensorflow/tensorflow/python/ops/array_ops.py,298,function,"Returns a tensor with a length 1 axis inserted at index `axis`. Given a tensor `input`, this operation inserts a dimension of length 1 at the dimension index `axis` of `input`'s shape. The dimension index follows Python indexing rules: It's zero-based, a negative index it is counted backward from the end. This operation is useful to: * Add an outer ""batch"" dimension to a single element. * Align axes for broadcasting. * To add an inner vector length axis to a tensor of scalars. For example: If you have a single image of shape `[height, width, channels]`: >>> image = tf.zeros([10,10,3]) You can add an outer `batch` axis by passing `axis=0`: >>> tf.expand_dims(image, axis=0).shape.as_list() [1, 10, 10, 3] The new axis location matches Python `list.insert(axis, 1)`: >>> tf.expand_dims(image, axis=1).shape.as_list() [10, 1, 10, 3] Following standard Python indexing rules, a negative `axis` counts from the end so `axis=-1` adds an inner most dimension: >>> tf.expand_dims(image, -1).shape.as_list() [10, 10, 3, 1] This operation requires that `axis` is a valid index for `input.shape`, following Python indexing rules: ``` -1-tf.rank(input) <= axis <= tf.rank(input) ``` This operation is related to: * `tf.squeeze`, which removes dimensions of size 1. * `tf.reshape`, which provides more flexible reshaping capability. * `tf.sparse.expand_dims`, which provides this functionality for `tf.SparseTensor` Args: input: A `Tensor`. axis: 0-D (scalar). Specifies the dimension index at which to expand the shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`. name: The name of the output `Tensor` (optional). dim: 0-D (scalar). Equivalent to `axis`, to be deprecated. Returns: A `Tensor` with the same data as `input`, but its shape has an additional dimension of size 1 added. Raises: ValueError: if either both or neither of `dim` and `axis` are specified." 8181,expand_dims_v2,tensorflow/tensorflow/python/ops/array_ops.py,370,function,"Returns a tensor with a length 1 axis inserted at index `axis`. Given a tensor `input`, this operation inserts a dimension of length 1 at the dimension index `axis` of `input`'s shape. The dimension index follows Python indexing rules: It's zero-based, a negative index it is counted backward from the end. This operation is useful to: * Add an outer ""batch"" dimension to a single element. * Align axes for broadcasting. * To add an inner vector length axis to a tensor of scalars. For example: If you have a single image of shape `[height, width, channels]`: >>> image = tf.zeros([10,10,3]) You can add an outer `batch` axis by passing `axis=0`: >>> tf.expand_dims(image, axis=0).shape.as_list() [1, 10, 10, 3] The new axis location matches Python `list.insert(axis, 1)`: >>> tf.expand_dims(image, axis=1).shape.as_list() [10, 1, 10, 3] Following standard Python indexing rules, a negative `axis` counts from the end so `axis=-1` adds an inner most dimension: >>> tf.expand_dims(image, -1).shape.as_list() [10, 10, 3, 1] This operation requires that `axis` is a valid index for `input.shape`, following Python indexing rules: ``` -1-tf.rank(input) <= axis <= tf.rank(input) ``` This operation is related to: * `tf.squeeze`, which removes dimensions of size 1. * `tf.reshape`, which provides more flexible reshaping capability. * `tf.sparse.expand_dims`, which provides this functionality for `tf.SparseTensor` Args: input: A `Tensor`. axis: Integer specifying the dimension index at which to expand the shape of `input`. Given an input of D dimensions, `axis` must be in range `[-(D+1), D]` (inclusive). name: Optional string. The name of the output `Tensor`. Returns: A tensor with the same data as `input`, with an additional dimension inserted at the index specified by `axis`. Raises: ValueError: If `axis` is not specified. InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`." 8182,listdiff,tensorflow/tensorflow/python/ops/array_ops.py,446,function, 8183,setdiff1d,tensorflow/tensorflow/python/ops/array_ops.py,461,function,"Computes the difference between two lists of numbers or strings. Given a list x and a list y, this operation returns a list out that represents all values that are in x but not in y. The returned list out is sorted in the same order that the numbers appear in x (duplicates are preserved). This operation also returns a list idx that represents the position of each out element in x. In other words: ```python out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1] ``` Example usage: >>> x = [1, 2, 3, 4, 5, 6] >>> y = [1, 3, 5] >>> setdiff1d(x,y) ListDiff(out=, idx=) Args: x: A Tensor. 1-D. Values to keep. y: A Tensor. Must have the same type as x. 1-D. Values to remove. out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to tf.int32. name: A name for the operation (optional). Returns: A tuple of Tensor objects (out, idx). out: A Tensor. Has the same type as x. idx: A Tensor of type out_idx." 8184,broadcast_dynamic_shape,tensorflow/tensorflow/python/ops/array_ops.py,505,function,"Computes the shape of a broadcast given symbolic shapes. When `shape_x` and `shape_y` are Tensors representing shapes (i.e. the result of calling tf.shape on another Tensor) this computes a Tensor which is the shape of the result of a broadcasting op applied in tensors of shapes `shape_x` and `shape_y`. This is useful when validating the result of a broadcasting operation when the tensors do not have statically known shapes. Example: >>> shape_x = (1, 2, 3) >>> shape_y = (5, 1, 3) >>> tf.broadcast_dynamic_shape(shape_x, shape_y) Args: shape_x: A rank 1 integer `Tensor`, representing the shape of x. shape_y: A rank 1 integer `Tensor`, representing the shape of y. Returns: A rank 1 integer `Tensor` representing the broadcasted shape. Raises: InvalidArgumentError: If the two shapes are incompatible for broadcasting." 8185,broadcast_static_shape,tensorflow/tensorflow/python/ops/array_ops.py,539,function,"Computes the shape of a broadcast given known shapes. When `shape_x` and `shape_y` are fully known `TensorShape`s this computes a `TensorShape` which is the shape of the result of a broadcasting op applied in tensors of shapes `shape_x` and `shape_y`. For example, if shape_x is `TensorShape([1, 2, 3])` and shape_y is `TensorShape([5, 1, 3])`, the result is a TensorShape whose value is `TensorShape([5, 2, 3])`. This is useful when validating the result of a broadcasting operation when the tensors have statically known shapes. Example: >>> shape_x = tf.TensorShape([1, 2, 3]) >>> shape_y = tf.TensorShape([5, 1 ,3]) >>> tf.broadcast_static_shape(shape_x, shape_y) TensorShape([5, 2, 3]) Args: shape_x: A `TensorShape` shape_y: A `TensorShape` Returns: A `TensorShape` representing the broadcasted shape. Raises: ValueError: If the two shapes can not be broadcasted." 8186,shape_v2,tensorflow/tensorflow/python/ops/array_ops.py,575,function,"Returns the shape of a tensor. See also `tf.size`, `tf.rank`. `tf.shape` returns a 1-D integer tensor representing the shape of `input`. For example: >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]) >>> tf.shape(t) Note: When using symbolic tensors, such as when using the Keras API, tf.shape() will return the shape of the symbolic tensor. >>> a = tf.keras.layers.Input((None, 10)) >>> tf.shape(a) <... shape=(3,) dtype=int32...> In these cases, using `tf.Tensor.shape` will return more informative results. >>> a.shape TensorShape([None, None, 10]) (The first `None` represents the as yet unknown batch size.) `tf.shape` and `Tensor.shape` should be identical in eager mode. Within `tf.function` or within a `compat.v1` context, not all dimensions may be known until execution time. Hence when defining custom layers and models for graph mode, prefer the dynamic `tf.shape(x)` over the static `x.shape`. Args: input: A `Tensor` or `SparseTensor`. out_type: (Optional) The specified output type of the operation (`int32` or `int64`). Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`." 8187,shape,tensorflow/tensorflow/python/ops/array_ops.py,622,function,"Returns the shape of a tensor. This operation returns a 1-D integer tensor representing the shape of `input`. For example: ```python t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]) tf.shape(t) # [2, 2, 3] ``` Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). out_type: (Optional) The specified output type of the operation (`int32` or `int64`). Defaults to `tf.int32`. Returns: A `Tensor` of type `out_type`." 8188,shape_internal,tensorflow/tensorflow/python/ops/array_ops.py,647,function,"Returns the shape of a tensor. Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). optimize: if true, encode the shape as a constant when possible. out_type: (Optional) The specified output type of the operation (`int32` or `int64`). Defaults to tf.int32. Returns: A `Tensor` of type `out_type`." 8189,shape_n,tensorflow/tensorflow/python/ops/array_ops.py,677,function,"Returns shape of tensors. Args: input: A list of at least 1 `Tensor` object with the same type. out_type: The specified output type of the operation (`int32` or `int64`). Defaults to `tf.int32`(optional). name: A name for the operation (optional). Returns: A list with the same length as `input` of `Tensor` objects with type `out_type`." 8190,size_v2,tensorflow/tensorflow/python/ops/array_ops.py,697,function,"Returns the size of a tensor. See also `tf.shape`. Returns a 0-D `Tensor` representing the number of elements in `input` of type `out_type`. Defaults to tf.int32. For example: >>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]) >>> tf.size(t) Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). out_type: (Optional) The specified non-quantized numeric output type of the operation. Defaults to `tf.int32`. Returns: A `Tensor` of type `out_type`. Defaults to `tf.int32`. @compatibility(numpy) Equivalent to np.size() @end_compatibility" 8191,size,tensorflow/tensorflow/python/ops/array_ops.py,731,function,"Returns the size of a tensor. Returns a 0-D `Tensor` representing the number of elements in `input` of type `out_type`. Defaults to tf.int32. For example: ```python t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]) tf.size(t) # 12 ``` Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). out_type: (Optional) The specified non-quantized numeric output type of the operation. Defaults to `tf.int32`. Returns: A `Tensor` of type `out_type`. Defaults to `tf.int32`. @compatibility(numpy) Equivalent to np.size() @end_compatibility" 8192,size_internal,tensorflow/tensorflow/python/ops/array_ops.py,761,function,"Returns the size of a tensor. Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). optimize: if true, encode the size as a constant when possible. out_type: (Optional) The specified non-quantized numeric output type of the operation. Defaults to `tf.int32`. Returns: A `Tensor` of type `out_type`. Defaults to `tf.int32`." 8193,rank,tensorflow/tensorflow/python/ops/array_ops.py,801,function,"Returns the rank of a tensor. See also `tf.shape`. Returns a 0-D `int32` `Tensor` representing the rank of `input`. For example: ```python # shape of tensor 't' is [2, 2, 3] t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]) tf.rank(t) # 3 ``` **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank of a tensor is the number of indices required to uniquely select each element of the tensor. Rank is also known as ""order"", ""degree"", or ""ndims."" Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` of type `int32`. @compatibility(numpy) Equivalent to np.ndim @end_compatibility" 8194,rank_internal,tensorflow/tensorflow/python/ops/array_ops.py,835,function,"Returns the rank of a tensor. Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). optimize: if true, encode the rank as a constant when possible. Returns: A `Tensor` of type `int32`." 8195,_check_index,tensorflow/tensorflow/python/ops/array_ops.py,868,function,Check if a given value is a valid index into a tensor. 8196,_is_undefined_dimension,tensorflow/tensorflow/python/ops/array_ops.py,884,function, 8197,_slice_helper,tensorflow/tensorflow/python/ops/array_ops.py,890,function,"Overload for Tensor.__getitem__. This operation extracts the specified region from the tensor. The notation is similar to NumPy with the restriction that currently only support basic indexing. That means that using a non-scalar tensor as input is not currently allowed. Some useful examples: ```python # Strip leading and trailing 2 elements foo = tf.constant([1,2,3,4,5,6]) print(foo[2:-2].eval()) # => [3,4] # Skip every other row and reverse the order of the columns foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]]) print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]] # Use scalar tensors as indices on both dimensions print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3 # Insert another dimension foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]]) print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]] print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]] print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]], [[7],[8],[9]]] # Ellipses (3 equivalent operations) foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]]) print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]] print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]] print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]] # Masks foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]]) print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9] ``` Notes: - `tf.newaxis` is `None` as in NumPy. - An implicit ellipsis is placed at the end of the `slice_spec` - NumPy advanced indexing is currently not supported. Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for `Tensor.__getitem__` to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: tensor: An ops.Tensor object. slice_spec: The arguments to Tensor.__getitem__. var: In the case of variable slice assignment, the Variable object to slice (i.e. tensor is the read-only view of this variable). Returns: The appropriate slice of ""tensor"", based on ""slice_spec"". Raises: ValueError: If a slice range is negative size. TypeError: If the slice indices aren't int, slice, ellipsis, tf.newaxis or scalar int32/int64 tensors." 8198,slice,tensorflow/tensorflow/python/ops/array_ops.py,1048,function,"Extracts a slice from a tensor. See also `tf.strided_slice`. This operation extracts a slice of size `size` from a tensor `input_` starting at the location specified by `begin`. The slice `size` is represented as a tensor shape, where `size[i]` is the number of elements of the 'i'th dimension of `input_` that you want to slice. The starting location (`begin`) for the slice is represented as an offset in each dimension of `input_`. In other words, `begin[i]` is the offset into the i'th dimension of `input_` that you want to slice from. Note that `tf.Tensor.__getitem__` is typically a more pythonic way to perform slices, as it allows you to write `foo[3:7, :-2]` instead of `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`. `begin` is zero-based; `size` is one-based. If `size[i]` is -1, all remaining elements in dimension i are included in the slice. In other words, this is equivalent to setting: `size[i] = input_.dim_size(i) - begin[i]` This operation requires that: `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]` For example: ```python t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]]) tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]] tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3], # [4, 4, 4]]] tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]], # [[5, 5, 5]]] ``` Args: input_: A `Tensor`. begin: An `int32` or `int64` `Tensor`. size: An `int32` or `int64` `Tensor`. name: A name for the operation (optional). Returns: A `Tensor` the same type as `input_`." 8199,strided_slice,tensorflow/tensorflow/python/ops/array_ops.py,1104,function,"Extracts a strided slice of a tensor (generalized Python array indexing). See also `tf.slice`. **Instead of calling this op directly most users will want to use the NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.** The interface of this op is a low-level encoding of the slicing syntax. Roughly speaking, this op extracts a slice of size `(end-begin)/stride` from the given `input_` tensor. Starting at the location specified by `begin` the slice continues by adding `stride` to the index until all dimensions are not less than `end`. Note that a stride can be negative, which causes a reverse slice. Given a Python slice `input[spec0, spec1, ..., specn]`, this function will be called as follows. `begin`, `end`, and `strides` will be vectors of length n. n in general is not equal to the rank of the `input_` tensor. In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to the ith spec. If the ith bit of `begin_mask` is set, `begin[i]` is ignored and the fullest possible range in that dimension is used instead. `end_mask` works analogously, except with the end range. `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`. `foo[::-1]` reverses a tensor with shape 8. If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions as needed will be inserted between other dimensions. Only one non-zero bit is allowed in `ellipsis_mask`. For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is equivalent to `foo[3:5,:,:,4:5]` and `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`. If the ith bit of `new_axis_mask` is set, then `begin`, `end`, and `stride` are ignored and a new length 1 dimension is added at this point in the output tensor. For example, `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. If the ith bit of `shrink_axis_mask` is set, it implies that the ith specification shrinks the dimensionality by 1, taking on the value at index `begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask` equal to 2. NOTE: `begin` and `end` are zero-indexed. `strides` entries must be non-zero. ```python t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]]) tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]] tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3], # [4, 4, 4]]] tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4], # [3, 3, 3]]] ``` Args: input_: A `Tensor`. begin: An `int32` or `int64` `Tensor`. end: An `int32` or `int64` `Tensor`. strides: An `int32` or `int64` `Tensor`. begin_mask: An `int32` mask. end_mask: An `int32` mask. ellipsis_mask: An `int32` mask. new_axis_mask: An `int32` mask. shrink_axis_mask: An `int32` mask. var: The variable corresponding to `input_` or None name: A name for the operation (optional). Returns: A `Tensor` the same type as `input`." 8200,_SliceHelperVar,tensorflow/tensorflow/python/ops/array_ops.py,1245,function,"Creates a slice helper object given a variable. This allows creating a sub-tensor from part of the current contents of a variable. See `tf.Tensor.__getitem__` for detailed examples of slicing. This function in addition also allows assignment to a sliced range. This is similar to `__setitem__` functionality in Python. However, the syntax is different so that the user can capture the assignment operation for grouping or passing to `sess.run()`. For example, ```python import tensorflow as tf A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32) with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) print(sess.run(A[:2, :2])) # => [[1,2], [4,5]] op = A[:2,:2].assign(22. * tf.ones((2, 2))) print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]] ``` Note that assignments currently do not support NumPy broadcasting semantics. Args: var: An `ops.Variable` object. slice_spec: The arguments to `Tensor.__getitem__`. Returns: The appropriate slice of ""tensor"", based on ""slice_spec"". As an operator. The operator also has a `assign()` method that can be used to generate an assignment operator. Raises: ValueError: If a slice range is negative size. TypeError: TypeError: If the slice indices aren't int, slice, ellipsis, tf.newaxis or int32/int64 tensors." 8201,parallel_stack,tensorflow/tensorflow/python/ops/array_ops.py,1296,function,"Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel. Requires that the shape of inputs be known at graph construction time. Packs the list of tensors in `values` into a tensor with rank one higher than each tensor in `values`, by packing them along the first dimension. Given a list of length `N` of tensors of shape `(A, B, C)`; the `output` tensor will have the shape `(N, A, B, C)`. For example: ```python x = tf.constant([1, 4]) y = tf.constant([2, 5]) z = tf.constant([3, 6]) tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] ``` The difference between `stack` and `parallel_stack` is that `stack` requires all the inputs be computed before the operation will begin but doesn't require that the input shapes be known during graph construction. `parallel_stack` will copy pieces of the input into the output as they become available, in some situations this can provide a performance benefit. Unlike `stack`, `parallel_stack` does NOT support backpropagation. This is the opposite of unstack. The numpy equivalent is tf.parallel_stack([x, y, z]) = np.asarray([x, y, z]) Args: values: A list of `Tensor` objects with the same shape and type. name: A name for this operation (optional). Returns: output: A stacked `Tensor` with the same type as `values`." 8202,stack,tensorflow/tensorflow/python/ops/array_ops.py,1348,function,"Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor. See also `tf.concat`, `tf.tile`, `tf.repeat`. Packs the list of tensors in `values` into a tensor with rank one higher than each tensor in `values`, by packing them along the `axis` dimension. Given a list of length `N` of tensors of shape `(A, B, C)`; if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. Etc. For example: >>> x = tf.constant([1, 4]) >>> y = tf.constant([2, 5]) >>> z = tf.constant([3, 6]) >>> tf.stack([x, y, z]) >>> tf.stack([x, y, z], axis=1) This is the opposite of unstack. The numpy equivalent is `np.stack` >>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z])) True Args: values: A list of `Tensor` objects with the same shape and type. axis: An `int`. The axis to stack along. Defaults to the first dimension. Negative values wrap around, so the valid range is `[-(R+1), R+1)`. name: A name for this operation (optional). Returns: output: A stacked `Tensor` with the same type as `values`. Raises: ValueError: If `axis` is out of the range [-(R+1), R+1)." 8203,_autopacking_helper,tensorflow/tensorflow/python/ops/array_ops.py,1411,function,"Converts the given list or tuple to a tensor by packing. Args: list_or_tuple: A (possibly nested) list or tuple containing a tensor. dtype: The element type of the returned tensor. name: A name for the returned tensor. Returns: A `tf.Tensor` with value equivalent to `list_or_tuple`." 8204,_get_dtype_from_nested_lists,tensorflow/tensorflow/python/ops/array_ops.py,1461,function,"Returns the dtype of any tensor-like object in `list_or_tuple`, if found. Args: list_or_tuple: A list or tuple representing an object that can be converted to a `tf.Tensor`. Returns: The dtype of any tensor-like object in `list_or_tuple`, or `None` if no such object exists." 8205,_cast_nested_seqs_to_dtype,tensorflow/tensorflow/python/ops/array_ops.py,1482,function, 8206,_should_not_autopack,tensorflow/tensorflow/python/ops/array_ops.py,1497,function, 8207,_autopacking_conversion_function,tensorflow/tensorflow/python/ops/array_ops.py,1507,function,Tensor conversion function that automatically packs arguments. 8208,unstack,tensorflow/tensorflow/python/ops/array_ops.py,1533,function,"Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. If `num` is not specified (the default), it is inferred from `value`'s shape. If `value.shape[axis]` is not known, `ValueError` is raised. For example, given a tensor of shape `(A, B, C, D)`; If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`. (Note that the dimension unpacked along is gone, unlike `split`). If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`. Etc. This is the opposite of stack. Args: value: A rank `R > 0` `Tensor` to be unstacked. num: An `int`. The length of the dimension `axis`. Automatically inferred if `None` (the default). axis: An `int`. The axis to unstack along. Defaults to the first dimension. Negative values wrap around, so the valid range is `[-R, R)`. name: A name for the operation (optional). Returns: The list of `Tensor` objects unstacked from `value`. Raises: ValueError: If `num` is unspecified and cannot be inferred. ValueError: If `axis` is out of the range [-R, R)." 8209,concat,tensorflow/tensorflow/python/ops/array_ops.py,1582,function,"Concatenates tensors along one dimension. See also `tf.tile`, `tf.stack`, `tf.repeat`. Concatenates the list of tensors `values` along dimension `axis`. If `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated result has shape [D0, D1, ... Raxis, ...Dn] where Raxis = sum(Daxis(i)) That is, the data from the input tensors is joined along the `axis` dimension. The number of dimensions of the input tensors must match, and all dimensions except `axis` must be equal. For example: >>> t1 = [[1, 2, 3], [4, 5, 6]] >>> t2 = [[7, 8, 9], [10, 11, 12]] >>> tf.concat([t1, t2], 0) >>> tf.concat([t1, t2], 1) As in Python, the `axis` could also be negative numbers. Negative `axis` are interpreted as counting from the end of the rank, i.e., `axis + rank(values)`-th dimension. For example: >>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]] >>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]] >>> tf.concat([t1, t2], -1) Note: If you are concatenating along a new axis consider using stack. E.g. ```python tf.concat([tf.expand_dims(t, axis) for t in tensors], axis) ``` can be rewritten as ```python tf.stack(tensors, axis=axis) ``` Args: values: A list of `Tensor` objects or a single `Tensor`. axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be in the range `[-rank(values), rank(values))`. As in Python, indexing for axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers to `axis`-th dimension. And negative axis refers to `axis + rank(values)`-th dimension. name: A name for the operation (optional). Returns: A `Tensor` resulting from concatenation of the input tensors." 8210,boolean_mask,tensorflow/tensorflow/python/ops/array_ops.py,1677,function,"Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`. In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match the first K dimensions of `tensor`'s shape. We then have: `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]` where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order). The `axis` could be used with `mask` to indicate the axis to mask from. In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match the first `axis + dim(mask)` dimensions of `tensor`'s shape. See also: `tf.ragged.boolean_mask`, which can be applied to both dense and ragged tensors, and can be used if you need to preserve the masked dimensions of `tensor` (rather than flattening them, as `tf.boolean_mask` does). Examples: ```python # 1-D example tensor = [0, 1, 2, 3] mask = np.array([True, False, True, False]) tf.boolean_mask(tensor, mask) # [0, 2] # 2-D example tensor = [[1, 2], [3, 4], [5, 6]] mask = np.array([True, False, True]) tf.boolean_mask(tensor, mask) # [[1, 2], [5, 6]] ``` Args: tensor: N-D Tensor. mask: K-D boolean Tensor, K <= N and K must be known statically. name: A name for this operation (optional). axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By default, axis is 0 which will mask from the first dimension. Otherwise K + axis <= N. Returns: (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding to `True` values in `mask`. Raises: ValueError: If shapes do not conform." 8211,boolean_mask_v2,tensorflow/tensorflow/python/ops/array_ops.py,1771,function,"Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`. In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match the first K dimensions of `tensor`'s shape. We then have: `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]` where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order). The `axis` could be used with `mask` to indicate the axis to mask from. In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match the first `axis + dim(mask)` dimensions of `tensor`'s shape. See also: `tf.ragged.boolean_mask`, which can be applied to both dense and ragged tensors, and can be used if you need to preserve the masked dimensions of `tensor` (rather than flattening them, as `tf.boolean_mask` does). Examples: >>> tensor = [0, 1, 2, 3] # 1-D example >>> mask = np.array([True, False, True, False]) >>> tf.boolean_mask(tensor, mask) >>> tensor = [[1, 2], [3, 4], [5, 6]] # 2-D example >>> mask = np.array([True, False, True]) >>> tf.boolean_mask(tensor, mask) Args: tensor: N-D Tensor. mask: K-D boolean Tensor, K <= N and K must be known statically. axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By default, axis is 0 which will mask from the first dimension. Otherwise K + axis <= N. name: A name for this operation (optional). Returns: (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding to `True` values in `mask`. Raises: ValueError: If shapes do not conform. Examples: ```python # 2-D example tensor = [[1, 2], [3, 4], [5, 6]] mask = np.array([True, False, True]) boolean_mask(tensor, mask) # [[1, 2], [5, 6]] ```" 8212,sparse_mask,tensorflow/tensorflow/python/ops/array_ops.py,1831,function,"Masks elements of `IndexedSlices`. Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that contains a subset of the slices of `a`. Only the slices at indices not specified in `mask_indices` are returned. This is useful when you need to extract a subset of slices in an `IndexedSlices` object. For example: ```python # `a` contains slices at indices [12, 26, 37, 45] from a large tensor # with shape [1000, 10] a.indices # [12, 26, 37, 45] tf.shape(a.values) # [4, 10] # `b` will be the subset of `a` slices at its second and third indices, so # we want to mask its first and last indices (which are at absolute # indices 12, 45) b = tf.sparse.mask(a, [12, 45]) b.indices # [26, 37] tf.shape(b.values) # [2, 10] ``` Args: a: An `IndexedSlices` instance. mask_indices: Indices of elements to mask. name: A name for the operation (optional). Returns: The masked `IndexedSlices` instance." 8213,unique,tensorflow/tensorflow/python/ops/array_ops.py,1875,function,"Finds unique elements in a 1-D tensor. See also `tf.unique_with_counts`. This operation returns a tensor `y` containing all of the unique elements of `x` sorted in the same order that they occur in `x`. This operation also returns a tensor `idx` the same size as `x` that contains the index of each value of `x` in the unique output `y`. In other words: y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1] Example usage: >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) >>> y, idx = unique(x) >>> y >>> idx Args: x: A Tensor. 1-D. out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to tf.int32. name: A name for the operation (optional). Returns: A tuple of Tensor objects (y, idx). y: A Tensor. Has the same type as x. idx: A Tensor of type out_idx." 8214,unique_with_counts,tensorflow/tensorflow/python/ops/array_ops.py,1923,function,"Finds unique elements in a 1-D tensor. See also `tf.unique`. This operation returns a tensor `y` containing all of the unique elements of `x` sorted in the same order that they occur in `x`. This operation also returns a tensor `idx` the same size as `x` that contains the index of each value of `x` in the unique output `y`. Finally, it returns a third tensor `count` that contains the count of each element of `y` in `x`. In other words: y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1] Example usage: >>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) >>> y, idx, count = unique_with_counts(x) >>> y >>> idx >>> count Args: x: A Tensor. 1-D. out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to tf.int32. name: A name for the operation (optional). Returns: A tuple of Tensor objects (y, idx, count). y: A Tensor. Has the same type as x. idx: A Tensor of type out_idx. count: A Tensor of type out_idx." 8215,split,tensorflow/tensorflow/python/ops/array_ops.py,1976,function,"Splits a tensor `value` into a list of sub tensors. See also `tf.unstack`. If `num_or_size_splits` is an integer, then `value` is split along the dimension `axis` into `num_or_size_splits` smaller tensors. This requires that `value.shape[axis]` is divisible by `num_or_size_splits`. If `num_or_size_splits` is a 1-D Tensor (or list), then `value` is split into `len(num_or_size_splits)` elements. The shape of the `i`-th element has the same size as the `value` except along dimension `axis` where the size is `num_or_size_splits[i]`. For example: >>> x = tf.Variable(tf.random.uniform([5, 30], -1, 1)) >>> >>> # Split `x` into 3 tensors along dimension 1 >>> s0, s1, s2 = tf.split(x, num_or_size_splits=3, axis=1) >>> tf.shape(s0).numpy() array([ 5, 10], dtype=int32) >>> >>> # Split `x` into 3 tensors with sizes [4, 15, 11] along dimension 1 >>> split0, split1, split2 = tf.split(x, [4, 15, 11], 1) >>> tf.shape(split0).numpy() array([5, 4], dtype=int32) >>> tf.shape(split1).numpy() array([ 5, 15], dtype=int32) >>> tf.shape(split2).numpy() array([ 5, 11], dtype=int32) Args: value: The `Tensor` to split. num_or_size_splits: Either an integer indicating the number of splits along `axis` or a 1-D integer `Tensor` or Python list containing the sizes of each output tensor along `axis`. If a scalar, then it must evenly divide `value.shape[axis]`; otherwise the sum of sizes along the split axis must match that of the `value`. axis: An integer or scalar `int32` `Tensor`. The dimension along which to split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0. num: Optional, used to specify the number of outputs when it cannot be inferred from the shape of `size_splits`. name: A name for the operation (optional). Returns: if `num_or_size_splits` is a scalar returns a list of `num_or_size_splits` `Tensor` objects; if `num_or_size_splits` is a 1-D Tensor returns `num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting `value`. Raises: ValueError: If `num` is unspecified and cannot be inferred." 8216,transpose_v2,tensorflow/tensorflow/python/ops/array_ops.py,2054,function,"Transposes `a`, where `a` is a Tensor. Permutes the dimensions according to the value of `perm`. The returned tensor's dimension `i` will correspond to the input dimension `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank of the input tensor. Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors. If conjugate is `True` and `a.dtype` is either `complex64` or `complex128` then the values of `a` are conjugated and transposed. @compatibility(numpy) In `numpy` transposes are memory-efficient constant time operations as they simply return a new view of the same data with adjusted `strides`. TensorFlow does not support strides, so `transpose` returns a new tensor with the items permuted. @end_compatibility For example: >>> x = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> tf.transpose(x) Equivalently, you could call `tf.transpose(x, perm=[1, 0])`. If `x` is complex, setting conjugate=True gives the conjugate transpose: >>> x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j], ... [4 + 4j, 5 + 5j, 6 + 6j]]) >>> tf.transpose(x, conjugate=True) 'perm' is more useful for n-dimensional tensors where n > 2: >>> x = tf.constant([[[ 1, 2, 3], ... [ 4, 5, 6]], ... [[ 7, 8, 9], ... [10, 11, 12]]]) As above, simply calling `tf.transpose` will default to `perm=[2,1,0]`. To take the transpose of the matrices in dimension-0 (such as when you are transposing matrices where 0 is the batch dimesnion), you would set `perm=[0,2,1]`. >>> tf.transpose(x, perm=[0, 2, 1]) Note: This has a shorthand `linalg.matrix_transpose`): Args: a: A `Tensor`. perm: A permutation of the dimensions of `a`. This should be a vector. conjugate: Optional bool. Setting it to `True` is mathematically equivalent to tf.math.conj(tf.transpose(input)). name: A name for the operation (optional). Returns: A transposed `Tensor`." 8217,transpose,tensorflow/tensorflow/python/ops/array_ops.py,2135,function,"Transposes `a`. Permutes the dimensions according to `perm`. The returned tensor's dimension i will correspond to the input dimension `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank of the input tensor. Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors. If conjugate is True and `a.dtype` is either `complex64` or `complex128` then the values of `a` are conjugated and transposed. @compatibility(numpy) In `numpy` transposes are memory-efficient constant time operations as they simply return a new view of the same data with adjusted `strides`. TensorFlow does not support strides, so `transpose` returns a new tensor with the items permuted. @end_compatibility For example: ```python x = tf.constant([[1, 2, 3], [4, 5, 6]]) tf.transpose(x) # [[1, 4] # [2, 5] # [3, 6]] # Equivalently tf.transpose(x, perm=[1, 0]) # [[1, 4] # [2, 5] # [3, 6]] # If x is complex, setting conjugate=True gives the conjugate transpose x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]) tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j], # [2 - 2j, 5 - 5j], # [3 - 3j, 6 - 6j]] # 'perm' is more useful for n-dimensional tensors, for n > 2 x = tf.constant([[[ 1, 2, 3], [ 4, 5, 6]], [[ 7, 8, 9], [10, 11, 12]]]) # Take the transpose of the matrices in dimension-0 # (this common operation has a shorthand `linalg.matrix_transpose`) tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4], # [2, 5], # [3, 6]], # [[7, 10], # [8, 11], # [9, 12]]] ``` Args: a: A `Tensor`. perm: A permutation of the dimensions of `a`. name: A name for the operation (optional). conjugate: Optional bool. Setting it to `True` is mathematically equivalent to tf.math.conj(tf.transpose(input)). Returns: A transposed `Tensor`." 8218,matrix_transpose,tensorflow/tensorflow/python/ops/array_ops.py,2227,function,"Transposes last two dimensions of tensor `a`. For example: ```python x = tf.constant([[1, 2, 3], [4, 5, 6]]) tf.linalg.matrix_transpose(x) # [[1, 4], # [2, 5], # [3, 6]] x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]) tf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j], # [2 - 2j, 5 - 5j], # [3 - 3j, 6 - 6j]] # Matrix with two batch dimensions. # x.shape is [1, 2, 3, 4] # tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3] ``` Note that `tf.matmul` provides kwargs allowing for transpose of arguments. This is done with minimal cost, and is preferable to using this function. E.g. ```python # Good! Transpose is taken at minimal additional cost. tf.matmul(matrix, b, transpose_b=True) # Inefficient! tf.matmul(matrix, tf.linalg.matrix_transpose(b)) ``` @compatibility(numpy) In `numpy` transposes are memory-efficient constant time operations as they simply return a new view of the same data with adjusted `strides`. TensorFlow does not support strides, `linalg.matrix_transpose` returns a new tensor with the items permuted. @end_compatibility Args: a: A `Tensor` with `rank >= 2`. name: A name for the operation (optional). conjugate: Optional bool. Setting it to `True` is mathematically equivalent to tf.math.conj(tf.linalg.matrix_transpose(input)). Returns: A transposed batch matrix `Tensor`. Raises: ValueError: If `a` is determined statically to have `rank < 2`." 8219,matrix_diag,tensorflow/tensorflow/python/ops/array_ops.py,2306,function,"Returns a batched diagonal tensor with given batched diagonal values. Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th diagonals of a matrix, with everything else padded with `padding`. `num_rows` and `num_cols` specify the dimension of the innermost matrix of the output. If both are not specified, the op assumes the innermost matrix is square and infers its size from `k` and the innermost dimension of `diagonal`. If only one of them is specified, the op assumes the unspecified value is the smallest possible based on other criteria. Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`. The second innermost dimension of `diagonal` has double meaning. When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and the output tensor is: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper padding_value ; otherwise ``` Otherwise, `M` is treated as the number of diagonals for the matrix in the same batch (`M = k[1]-k[0]+1`), and the output tensor is: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] padding_value ; otherwise ``` where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0) + offset`. `offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise ``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. For example: ``` # The main diagonal. diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) [5, 6, 7, 8]]) tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]], [[5, 0, 0, 0], [0, 6, 0, 0], [0, 0, 7, 0], [0, 0, 0, 8]]] # A superdiagonal (per batch). diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) [4, 5, 6]]) tf.matrix_diag(diagonal, k = 1) ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 0]], [[0, 4, 0, 0], [0, 0, 5, 0], [0, 0, 0, 6], [0, 0, 0, 0]]] # A tridiagonal band (per batch). diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) [1, 2, 3], [0, 4, 5]], [[2, 3, 0], [6, 7, 9], [0, 9, 1]]]) tf.matrix_diag(diagonals, k = (-1, 1)) ==> [[[1, 8, 0], # Output shape: (2, 3, 3) [4, 2, 9], [0, 5, 3]], [[6, 2, 0], [9, 7, 3], [0, 1, 9]]] # RIGHT_LEFT alignment. diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) [1, 2, 3], [4, 5, 0]], [[0, 2, 3], [6, 7, 9], [9, 1, 0]]]) tf.matrix_diag(diagonals, k = (-1, 1), align=""RIGHT_LEFT"") ==> [[[1, 8, 0], # Output shape: (2, 3, 3) [4, 2, 9], [0, 5, 3]], [[6, 2, 0], [9, 7, 3], [0, 1, 9]]] # Rectangular matrix. diagonal = np.array([1, 2]) # Input shape: (2) tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) ==> [[0, 0, 0, 0], # Output shape: (3, 4) [1, 0, 0, 0], [0, 2, 0, 0]] # Rectangular matrix with inferred num_cols and padding_value = 9. tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) ==> [[9, 9], # Output shape: (3, 2) [1, 9], [9, 2]] ``` Args: diagonal: A `Tensor` with `rank k >= 1`. name: A name for the operation (optional). k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. num_rows: The number of rows of the output matrix. If it is not provided, the op assumes the output matrix is a square matrix and infers the matrix size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`. num_cols: The number of columns of the output matrix. If it is not provided, the op assumes the output matrix is a square matrix and infers the matrix size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`. padding_value: The value to fill the area outside the specified diagonal band with. Default is 0. align: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: ""RIGHT_LEFT"" (default), ""LEFT_RIGHT"", ""LEFT_LEFT"", and ""RIGHT_RIGHT"". ""RIGHT_LEFT"" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses ""LEFT_RIGHT"", which is the opposite alignment. Returns: A Tensor. Has the same type as `diagonal`." 8220,matrix_diag_part,tensorflow/tensorflow/python/ops/array_ops.py,2476,function,"Returns the batched diagonal part of a batched tensor. Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched `input`. Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. Let `max_diag_len` be the maximum length among all diagonals to be extracted, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` Let `num_diags` be the number of diagonals to extract, `num_diags = k[1] - k[0] + 1`. If `num_diags == 1`, the output tensor is of rank `r - 1` with shape `[I, J, ..., L, max_diag_len]` and values: ``` diagonal[i, j, ..., l, n] = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise. ``` where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. Otherwise, the output tensor has rank `r` with dimensions `[I, J, ..., L, num_diags, max_diag_len]` with values: ``` diagonal[i, j, ..., l, m, n] = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise. ``` where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. `offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise ``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. The input must be at least a matrix. For example: ``` input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) [5, 6, 7, 8], [9, 8, 7, 6]], [[5, 4, 3, 2], [1, 2, 3, 4], [5, 6, 7, 8]]]) # A main diagonal from each batch. tf.linalg.diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) [5, 2, 7]] # A superdiagonal from each batch. tf.linalg.diag_part(input, k = 1) ==> [[2, 7, 6], # Output shape: (2, 3) [4, 3, 8]] # A band from each batch. tf.linalg.diag_part(input, k = (-1, 2)) ==> [[[3, 8, 0], # Output shape: (2, 4, 3) [2, 7, 6], [1, 6, 7], [0, 5, 8]], [[3, 4, 0], [4, 3, 8], [5, 2, 7], [0, 1, 6]]] # RIGHT_LEFT alignment. tf.linalg.diag_part(input, k = (-1, 2), align=""RIGHT_LEFT"") ==> [[[0, 3, 8], # Output shape: (2, 4, 3) [2, 7, 6], [1, 6, 7], [5, 8, 0]], [[0, 3, 4], [4, 3, 8], [5, 2, 7], [1, 6, 0]]] # max_diag_len can be shorter than the main diagonal. tf.linalg.diag_part(input, k = (-2, -1)) ==> [[[5, 8], [0, 9]], [[1, 6], [0, 5]]] # padding_value = 9 tf.linalg.diag_part(input, k = (1, 3), padding_value = 9) ==> [[[4, 9, 9], # Output shape: (2, 3, 3) [3, 8, 9], [2, 7, 6]], [[2, 9, 9], [3, 4, 9], [4, 3, 8]]] ``` Args: input: A `Tensor` with `rank k >= 2`. name: A name for the operation (optional). k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. padding_value: The value to fill the area outside the specified diagonal band with. Default is 0. align: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: ""RIGHT_LEFT"" (default), ""LEFT_RIGHT"", ""LEFT_LEFT"", and ""RIGHT_RIGHT"". ""RIGHT_LEFT"" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses ""LEFT_RIGHT"", which is the opposite alignment. Returns: A Tensor containing diagonals of `input`. Has the same type as `input`." 8221,matrix_set_diag,tensorflow/tensorflow/python/ops/array_ops.py,2616,function,"Returns a batched matrix tensor with new batched diagonal values. Given `input` and `diagonal`, this operation returns a tensor with the same shape and values as `input`, except for the specified diagonals of the innermost matrices. These will be overwritten by the values in `diagonal`. `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. If `k` is scalar or `k[0] == k[1]`: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] input[i, j, ..., l, m, n] ; otherwise ``` Otherwise, ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] input[i, j, ..., l, m, n] ; otherwise ``` where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0) + offset`. `offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise ``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. For example: ``` # The main diagonal. input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) [7, 7, 7, 7], [7, 7, 7, 7]], [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]]]) diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) [4, 5, 6]]) tf.matrix_set_diag(input, diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) [7, 2, 7, 7], [7, 7, 3, 7]], [[4, 7, 7, 7], [7, 5, 7, 7], [7, 7, 6, 7]]] # A superdiagonal (per batch). tf.matrix_set_diag(input, diagonal, k = 1) ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) [7, 7, 2, 7], [7, 7, 7, 3]], [[7, 4, 7, 7], [7, 7, 5, 7], [7, 7, 7, 6]]] # A band of diagonals. diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) [6, 5, 8], [1, 2, 3], [0, 4, 5]], [[1, 2, 0], [5, 6, 4], [6, 1, 2], [0, 3, 4]]]) tf.matrix_set_diag(input, diagonals, k = (-1, 2)) ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) [4, 2, 5, 1], [7, 5, 3, 8]], [[6, 5, 1, 7], [3, 1, 6, 2], [7, 4, 2, 4]]] # RIGHT_LEFT alignment. diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) [6, 5, 8], [1, 2, 3], [4, 5, 0]], [[0, 1, 2], [5, 6, 4], [6, 1, 2], [3, 4, 0]]]) tf.matrix_set_diag(input, diagonals, k = (-1, 2), align=""RIGHT_LEFT"") ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) [4, 2, 5, 1], [7, 5, 3, 8]], [[6, 5, 1, 7], [3, 1, 6, 2], [7, 4, 2, 4]]] ``` Args: input: A `Tensor` with rank `k + 1`, where `k >= 1`. diagonal: A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`, otherwise. `k >= 1`. name: A name for the operation (optional). k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. align: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: ""RIGHT_LEFT"" (default), ""LEFT_RIGHT"", ""LEFT_LEFT"", and ""RIGHT_RIGHT"". ""RIGHT_LEFT"" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses ""LEFT_RIGHT"", which is the opposite alignment." 8222,_constant_if_small,tensorflow/tensorflow/python/ops/array_ops.py,2753,function, 8223,_tag_zeros_tensor,tensorflow/tensorflow/python/ops/array_ops.py,2763,function,"Tags the result of function by setting _is_zeros_tensor attribute. This is useful to compute Hessians of fused ops such as cross_entropy." 8224,zeros,tensorflow/tensorflow/python/ops/array_ops.py,2780,function,"Creates a tensor with all elements set to zero. See also `tf.zeros_like`, `tf.ones`, `tf.fill`, `tf.eye`. This operation returns a tensor of type `dtype` with shape `shape` and all elements set to zero. >>> tf.zeros([3, 4], tf.int32) Args: shape: A `list` of integers, a `tuple` of integers, or a 1-D `Tensor` of type `int32`. dtype: The DType of an element in the resulting `Tensor`. name: Optional string. A name for the operation. Returns: A `Tensor` with all elements set to zero." 8225,zeros_like,tensorflow/tensorflow/python/ops/array_ops.py,2836,function,"Creates a tensor with all elements set to zero. See also `tf.zeros`. Given a single tensor (`tensor`), this operation returns a tensor of the same type and shape as `tensor` with all elements set to zero. Optionally, you can use `dtype` to specify a new type for the returned tensor. Examples: >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> tf.zeros_like(tensor) >>> tf.zeros_like(tensor, dtype=tf.float32) Args: tensor: A `Tensor`. dtype: A type for the returned `Tensor`. Must be `float16`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128`, `bool` or `string`. (optional) name: A name for the operation (optional). optimize: if `True`, attempt to statically determine the shape of `tensor` and encode it as a constant. (optional, defaults to `True`) Returns: A `Tensor` with all elements set to zero." 8226,zeros_like_v2,tensorflow/tensorflow/python/ops/array_ops.py,2875,function,"Creates a tensor with all elements set to zero. See also `tf.zeros`. Given a single tensor or array-like object (`input`), this operation returns a tensor of the same type and shape as `input` with all elements set to zero. Optionally, you can use `dtype` to specify a new type for the returned tensor. Examples: >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> tf.zeros_like(tensor) >>> tf.zeros_like(tensor, dtype=tf.float32) >>> tf.zeros_like([[1, 2, 3], [4, 5, 6]]) Args: input: A `Tensor` or array-like object. dtype: A type for the returned `Tensor`. Must be `float16`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128`, `bool` or `string` (optional). name: A name for the operation (optional). Returns: A `Tensor` with all elements set to zero." 8227,zeros_like_impl,tensorflow/tensorflow/python/ops/array_ops.py,2919,function,Internal implementation for the v1/v2 zeros_like API calls. 8228,ones_like,tensorflow/tensorflow/python/ops/array_ops.py,2951,function,"Creates a tensor with all elements set to 1. See also `tf.ones`. Given a single tensor (`tensor`), this operation returns a tensor of the same type and shape as `tensor` with all elements set to 1. Optionally, you can specify a new type (`dtype`) for the returned tensor. For example: ```python tensor = tf.constant([[1, 2, 3], [4, 5, 6]]) tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]] ``` Args: tensor: A `Tensor`. dtype: A type for the returned `Tensor`. Must be `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128` or `bool`. name: A name for the operation (optional). optimize: if true, attempt to statically determine the shape of 'tensor' and encode it as a constant. Returns: A `Tensor` with all elements set to 1." 8229,ones_like_v2,tensorflow/tensorflow/python/ops/array_ops.py,2984,function,"Creates a tensor of all ones that has the same shape as the input. See also `tf.ones`. Given a single tensor (`tensor`), this operation returns a tensor of the same type and shape as `tensor` with all elements set to 1. Optionally, you can use `dtype` to specify a new type for the returned tensor. For example: >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> tf.ones_like(tensor) Args: input: A `Tensor`. dtype: A type for the returned `Tensor`. Must be `float16`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128`, `bool` or `string`. name: A name for the operation (optional). Returns: A `Tensor` with all elements set to one." 8230,ones_like_impl,tensorflow/tensorflow/python/ops/array_ops.py,3017,function,Internal implementation for the v1/v2 ones_like API calls. 8231,ones,tensorflow/tensorflow/python/ops/array_ops.py,3032,function,"Creates a tensor with all elements set to one (1). See also `tf.ones_like`, `tf.zeros`, `tf.fill`, `tf.eye`. This operation returns a tensor of type `dtype` with shape `shape` and all elements set to one. >>> tf.ones([3, 4], tf.int32) Args: shape: A `list` of integers, a `tuple` of integers, or a 1-D `Tensor` of type `int32`. dtype: Optional DType of an element in the resulting `Tensor`. Default is `tf.float32`. name: Optional string. A name for the operation. Returns: A `Tensor` with all elements set to one (1)." 8232,placeholder,tensorflow/tensorflow/python/ops/array_ops.py,3087,function,"Inserts a placeholder for a tensor that will be always fed. **Important**: This tensor will produce an error if evaluated. Its value must be fed using the `feed_dict` optional argument to `Session.run()`, `Tensor.eval()`, or `Operation.run()`. For example: ```python x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024)) y = tf.matmul(x, x) with tf.compat.v1.Session() as sess: print(sess.run(y)) # ERROR: will fail because x was not fed. rand_array = np.random.rand(1024, 1024) print(sess.run(y, feed_dict={x: rand_array})) # Will succeed. ``` @compatibility(eager) Placeholders are not compatible with eager execution. @end_compatibility Args: dtype: The type of elements in the tensor to be fed. shape: The shape of the tensor to be fed (optional). If the shape is not specified, you can feed a tensor of any shape. name: A name for the operation (optional). Returns: A `Tensor` that may be used as a handle for feeding a value, but not evaluated directly. Raises: RuntimeError: if eager execution is enabled" 8233,placeholder_with_default,tensorflow/tensorflow/python/ops/array_ops.py,3132,function,"A placeholder op that passes through `input` when its output is not fed. Args: input: A `Tensor`. The default value to produce when output is not fed. shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of the tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`." 8234,sparse_placeholder,tensorflow/tensorflow/python/ops/array_ops.py,3149,function,"Inserts a placeholder for a sparse tensor that will be always fed. **Important**: This sparse tensor will produce an error if evaluated. Its value must be fed using the `feed_dict` optional argument to `Session.run()`, `Tensor.eval()`, or `Operation.run()`. For example: ```python x = tf.compat.v1.sparse.placeholder(tf.float32) y = tf.sparse.reduce_sum(x) with tf.compat.v1.Session() as sess: print(sess.run(y)) # ERROR: will fail because x was not fed. indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64) values = np.array([1.0, 2.0], dtype=np.float32) shape = np.array([7, 9, 2], dtype=np.int64) print(sess.run(y, feed_dict={ x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will succeed. print(sess.run(y, feed_dict={ x: (indices, values, shape)})) # Will succeed. sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=shape) sp_value = sp.eval(session=sess) print(sess.run(y, feed_dict={x: sp_value})) # Will succeed. ``` @compatibility{eager} Placeholders are not compatible with eager execution. Args: dtype: The type of `values` elements in the tensor to be fed. shape: The shape of the tensor to be fed (optional). If the shape is not specified, you can feed a sparse tensor of any shape. name: A name for prefixing the operations (optional). Returns: A `SparseTensor` that may be used as a handle for feeding a value, but not evaluated directly. Raises: RuntimeError: if eager execution is enabled" 8235,pad_v2,tensorflow/tensorflow/python/ops/array_ops.py,3250,function,"Pads a tensor. This operation pads a `tensor` according to the `paddings` you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how many values to add before the contents of `tensor` in that dimension, and `paddings[D, 1]` indicates how many values to add after the contents of `tensor` in that dimension. If `mode` is ""REFLECT"" then both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If `mode` is ""SYMMETRIC"" then both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than `tensor.dim_size(D)`. The padded size of each dimension D of the output is: `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]` For example: ```python t = tf.constant([[1, 2, 3], [4, 5, 6]]) paddings = tf.constant([[1, 1,], [2, 2]]) # 'constant_values' is 0. # rank of 't' is 2. tf.pad(t, paddings, ""CONSTANT"") # [[0, 0, 0, 0, 0, 0, 0], # [0, 0, 1, 2, 3, 0, 0], # [0, 0, 4, 5, 6, 0, 0], # [0, 0, 0, 0, 0, 0, 0]] tf.pad(t, paddings, ""REFLECT"") # [[6, 5, 4, 5, 6, 5, 4], # [3, 2, 1, 2, 3, 2, 1], # [6, 5, 4, 5, 6, 5, 4], # [3, 2, 1, 2, 3, 2, 1]] tf.pad(t, paddings, ""SYMMETRIC"") # [[2, 1, 1, 2, 3, 3, 2], # [2, 1, 1, 2, 3, 3, 2], # [5, 4, 4, 5, 6, 6, 5], # [5, 4, 4, 5, 6, 6, 5]] ``` Args: tensor: A `Tensor`. paddings: A `Tensor` of type `int32`. mode: One of ""CONSTANT"", ""REFLECT"", or ""SYMMETRIC"" (case-insensitive) constant_values: In ""CONSTANT"" mode, the scalar pad value to use. Must be same type as `tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. Raises: ValueError: When mode is not one of ""CONSTANT"", ""REFLECT"", or ""SYMMETRIC""." 8236,pad,tensorflow/tensorflow/python/ops/array_ops.py,3309,function,"Pads a tensor. This operation pads a `tensor` according to the `paddings` you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how many values to add before the contents of `tensor` in that dimension, and `paddings[D, 1]` indicates how many values to add after the contents of `tensor` in that dimension. If `mode` is ""REFLECT"" then both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If `mode` is ""SYMMETRIC"" then both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than `tensor.dim_size(D)`. The padded size of each dimension D of the output is: `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]` For example: ```python t = tf.constant([[1, 2, 3], [4, 5, 6]]) paddings = tf.constant([[1, 1,], [2, 2]]) # 'constant_values' is 0. # rank of 't' is 2. tf.pad(t, paddings, ""CONSTANT"") # [[0, 0, 0, 0, 0, 0, 0], # [0, 0, 1, 2, 3, 0, 0], # [0, 0, 4, 5, 6, 0, 0], # [0, 0, 0, 0, 0, 0, 0]] tf.pad(t, paddings, ""REFLECT"") # [[6, 5, 4, 5, 6, 5, 4], # [3, 2, 1, 2, 3, 2, 1], # [6, 5, 4, 5, 6, 5, 4], # [3, 2, 1, 2, 3, 2, 1]] tf.pad(t, paddings, ""SYMMETRIC"") # [[2, 1, 1, 2, 3, 3, 2], # [2, 1, 1, 2, 3, 3, 2], # [5, 4, 4, 5, 6, 6, 5], # [5, 4, 4, 5, 6, 6, 5]] ``` Args: tensor: A `Tensor`. paddings: A `Tensor` of type `int32`. mode: One of ""CONSTANT"", ""REFLECT"", or ""SYMMETRIC"" (case-insensitive) name: A name for the operation (optional). constant_values: In ""CONSTANT"" mode, the scalar pad value to use. Must be same type as `tensor`. Returns: A `Tensor`. Has the same type as `tensor`. Raises: ValueError: When mode is not one of ""CONSTANT"", ""REFLECT"", or ""SYMMETRIC""." 8237,_get_paddings_constant,tensorflow/tensorflow/python/ops/array_ops.py,3403,function,"Helper to get the constant values of the paddings arg to pad(). Used under V1 graph mode to facilitate computation of the shape of the output tensor of `pad()`. Args: paddings: The same paddings arg as passed to pad(). Can be a Tensor, or a nested list or tuple of Tensor and/or numbers. Returns: A nested list or numbers or `None`, in which `None` indicates unknown padding size." 8238,meshgrid,tensorflow/tensorflow/python/ops/array_ops.py,3427,function,"Broadcasts parameters for evaluation on an N-D grid. Given N one-dimensional coordinate arrays `*args`, returns a list `outputs` of N-D coordinate arrays for evaluating expressions on an N-D grid. Notes: `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions. When the `indexing` argument is set to 'xy' (the default), the broadcasting instructions for the first two dimensions are swapped. Examples: Calling `X, Y = meshgrid(x, y)` with the tensors ```python x = [1, 2, 3] y = [4, 5, 6] X, Y = tf.meshgrid(x, y) # X = [[1, 2, 3], # [1, 2, 3], # [1, 2, 3]] # Y = [[4, 4, 4], # [5, 5, 5], # [6, 6, 6]] ``` Args: *args: `Tensor`s with rank 1. **kwargs: - indexing: Either 'xy' or 'ij' (optional, default: 'xy'). - name: A name for the operation (optional). Returns: outputs: A list of N `Tensor`s with rank N. Raises: TypeError: When no keyword arguments (kwargs) are passed. ValueError: When indexing keyword argument is not one of `xy` or `ij`." 8239,_compute_size_of_strided_dim,tensorflow/tensorflow/python/ops/array_ops.py,3511,function,Computes the size of a single strided slice dimension. 8240,_TileGradShape,tensorflow/tensorflow/python/ops/array_ops.py,3551,function,Shape function for the TileGrad op. 8241,edit_distance,tensorflow/tensorflow/python/ops/array_ops.py,3571,function,"Computes the Levenshtein distance between sequences. This operation takes variable-length sequences (`hypothesis` and `truth`), each provided as a `SparseTensor`, and computes the Levenshtein distance. You can normalize the edit distance by length of `truth` by setting `normalize` to true. For example: Given the following input, * `hypothesis` is a `tf.SparseTensor` of shape `[2, 1, 1]` * `truth` is a `tf.SparseTensor` of shape `[2, 2, 2]` >>> hypothesis = tf.SparseTensor( ... [[0, 0, 0], ... [1, 0, 0]], ... [""a"", ""b""], ... (2, 1, 1)) >>> truth = tf.SparseTensor( ... [[0, 1, 0], ... [1, 0, 0], ... [1, 0, 1], ... [1, 1, 0]], ... [""a"", ""b"", ""c"", ""a""], ... (2, 2, 2)) >>> tf.edit_distance(hypothesis, truth, normalize=True) The operaton returns a dense Tensor of shape `[2, 2]` with edit distances normalized by `truth` lengths. **Note**: It is possible to calculate edit distance between two sparse tensors with variable-length values. However, attempting to create them while eager execution is enabled will result in a `ValueError`. For the following inputs, ```python # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values: # (0,0) = [""a""] # (1,0) = [""b""] hypothesis = tf.sparse.SparseTensor( [[0, 0, 0], [1, 0, 0]], [""a"", ""b""], (2, 1, 1)) # 'truth' is a tensor of shape `[2, 2]` with variable-length values: # (0,0) = [] # (0,1) = [""a""] # (1,0) = [""b"", ""c""] # (1,1) = [""a""] truth = tf.sparse.SparseTensor( [[0, 1, 0], [1, 0, 0], [1, 0, 1], [1, 1, 0]], [""a"", ""b"", ""c"", ""a""], (2, 2, 2)) normalize = True # The output would be a dense Tensor of shape `(2,)`, with edit distances noramlized by 'truth' lengths. # output => array([0., 0.5], dtype=float32) ``` Args: hypothesis: A `SparseTensor` containing hypothesis sequences. truth: A `SparseTensor` containing truth sequences. normalize: A `bool`. If `True`, normalizes the Levenshtein distance by length of `truth.` name: A name for the operation (optional). Returns: A dense `Tensor` with rank `R - 1`, where R is the rank of the `SparseTensor` inputs `hypothesis` and `truth`. Raises: TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`." 8242,_FakeQuantWithMinMaxArgsGradient,tensorflow/tensorflow/python/ops/array_ops.py,3675,function,Gradient for FakeQuantWithMinMaxArgs op. 8243,_FakeQuantWithMinMaxVarsGradient,tensorflow/tensorflow/python/ops/array_ops.py,3687,function,Gradient for FakeQuantWithMinMaxVars op. 8244,_FakeQuantWithMinMaxVarsPerChannelGradient,tensorflow/tensorflow/python/ops/array_ops.py,3699,function,Gradient for FakeQuantWithMinMaxVarsPerChannel op. 8245,required_space_to_batch_paddings,tensorflow/tensorflow/python/ops/array_ops.py,3711,function,"Calculate padding required to make block_shape divide input_shape. This function can be used to calculate a suitable paddings argument for use with space_to_batch_nd and batch_to_space_nd. Args: input_shape: int32 Tensor of shape [N]. block_shape: int32 Tensor of shape [N]. base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum amount of padding to use. All elements must be >= 0. If not specified, defaults to 0. name: string. Optional name prefix. Returns: (paddings, crops), where: `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2] satisfying: paddings[i, 0] = base_paddings[i, 0]. 0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i] (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0 crops[i, 0] = 0 crops[i, 1] = paddings[i, 1] - base_paddings[i, 1] Raises: ValueError if called with incompatible shapes." 8246,space_to_batch,tensorflow/tensorflow/python/ops/array_ops.py,3792,function, 8247,space_to_batch_v2,tensorflow/tensorflow/python/ops/array_ops.py,3815,function, 8248,space_to_depth,tensorflow/tensorflow/python/ops/array_ops.py,3825,function, 8249,space_to_depth_v2,tensorflow/tensorflow/python/ops/array_ops.py,3834,function, 8250,depth_to_space,tensorflow/tensorflow/python/ops/array_ops.py,3844,function, 8251,depth_to_space_v2,tensorflow/tensorflow/python/ops/array_ops.py,3853,function, 8252,batch_to_space,tensorflow/tensorflow/python/ops/array_ops.py,3862,function, 8253,batch_to_space_v2,tensorflow/tensorflow/python/ops/array_ops.py,3880,function,"BatchToSpace for N-D tensors of type T. This operation reshapes the ""batch"" dimension 0 into `M + 1` dimensions of shape `block_shape + [batch]`, interleaves these blocks back into the grid defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as the input. The spatial dimensions of this intermediate result are then optionally cropped according to `crops` to produce the output. This is the reverse of SpaceToBatch (see `tf.space_to_batch`). Args: input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape + remaining_shape`, where `spatial_shape` has M dimensions. block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following types: `int32`, `int64`. All values must be >= 1. For backwards compatibility with TF 1.0, this parameter may be an int, in which case it is converted to `numpy.array([block_shape, block_shape], dtype=numpy.int64)`. crops: A 2-D `Tensor` with shape `[M, 2]`. Must be one of the following types: `int32`, `int64`. All values must be >= 0. `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input dimension `i + 1`, which corresponds to spatial dimension `i`. It is required that `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. This operation is equivalent to the following steps: 1. Reshape `input` to `reshaped` of shape: [block_shape[0], ..., block_shape[M-1], batch / prod(block_shape), input_shape[1], ..., input_shape[N-1]] 2. Permute dimensions of `reshaped` to produce `permuted` of shape [batch / prod(block_shape), input_shape[1], block_shape[0], ..., input_shape[M], block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]] 3. Reshape `permuted` to produce `reshaped_permuted` of shape [batch / prod(block_shape), input_shape[1] * block_shape[0], ..., input_shape[M] * block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]] 4. Crop the start and end of dimensions `[1, ..., M]` of `reshaped_permuted` according to `crops` to produce the output of shape: [batch / prod(block_shape), input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1], ..., input_shape[N-1]] name: A name for the operation (optional). Examples: (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ```python [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] ``` The output tensor has shape `[1, 2, 2, 1]` and value: ``` x = [[[[1], [2]], [[3], [4]]]] ``` (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ```python [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] ``` The output tensor has shape `[1, 2, 2, 3]` and value: ```python x = [[[[1, 2, 3], [4, 5, 6 ]], [[7, 8, 9], [10, 11, 12]]]] ``` (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`: ```python x = [[[[1], [3]], [[ 9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] ``` The output tensor has shape `[1, 4, 4, 1]` and value: ```python x = [[[1], [2], [ 3], [ 4]], [[5], [6], [ 7], [ 8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]] ``` (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`: ```python x = [[[[0], [ 1], [ 3]]], [[[0], [ 9], [11]]], [[[0], [ 2], [ 4]]], [[[0], [10], [12]]], [[[0], [ 5], [ 7]]], [[[0], [13], [15]]], [[[0], [ 6], [ 8]]], [[[0], [14], [16]]]] ``` The output tensor has shape `[2, 2, 4, 1]` and value: ```python x = [[[[ 1], [ 2], [ 3], [ 4]], [[ 5], [ 6], [ 7], [ 8]]], [[[ 9], [10], [11], [12]], [[13], [14], [15], [16]]]] ``` Returns: A `Tensor`. Has the same type as `input`." 8254,one_hot,tensorflow/tensorflow/python/ops/array_ops.py,4018,function,"Returns a one-hot tensor. See also `tf.fill`, `tf.eye`. The locations represented by indices in `indices` take value `on_value`, while all other locations take value `off_value`. `on_value` and `off_value` must have matching data types. If `dtype` is also provided, they must be the same data type as specified by `dtype`. If `on_value` is not provided, it will default to the value `1` with type `dtype` If `off_value` is not provided, it will default to the value `0` with type `dtype` If the input `indices` is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis` (default: the new axis is appended at the end). If `indices` is a scalar the output shape will be a vector of length `depth` If `indices` is a vector of length `features`, the output shape will be: ``` features x depth if axis == -1 depth x features if axis == 0 ``` If `indices` is a matrix (batch) with shape `[batch, features]`, the output shape will be: ``` batch x features x depth if axis == -1 batch x depth x features if axis == 1 depth x batch x features if axis == 0 ``` If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer to a non-ragged axis. The output will be equivalent to applying 'one_hot' on the values of the RaggedTensor, and creating a new RaggedTensor from the result. If `dtype` is not provided, it will attempt to assume the data type of `on_value` or `off_value`, if one or both are passed in. If none of `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the value `tf.float32`. Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`, etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`. For example: ```python indices = [0, 1, 2] depth = 3 tf.one_hot(indices, depth) # output: [3 x 3] # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] indices = [0, 2, -1, 1] depth = 3 tf.one_hot(indices, depth, on_value=5.0, off_value=0.0, axis=-1) # output: [4 x 3] # [[5.0, 0.0, 0.0], # one_hot(0) # [0.0, 0.0, 5.0], # one_hot(2) # [0.0, 0.0, 0.0], # one_hot(-1) # [0.0, 5.0, 0.0]] # one_hot(1) indices = [[0, 2], [1, -1]] depth = 3 tf.one_hot(indices, depth, on_value=1.0, off_value=0.0, axis=-1) # output: [2 x 2 x 3] # [[[1.0, 0.0, 0.0], # one_hot(0) # [0.0, 0.0, 1.0]], # one_hot(2) # [[0.0, 1.0, 0.0], # one_hot(1) # [0.0, 0.0, 0.0]]] # one_hot(-1) indices = tf.ragged.constant([[0, 1], [2]]) depth = 3 tf.one_hot(indices, depth) # output: [2 x None x 3] # [[[1., 0., 0.], # [0., 1., 0.]], # [[0., 0., 1.]]] ``` Args: indices: A `Tensor` of indices. depth: A scalar defining the depth of the one hot dimension. on_value: A scalar defining the value to fill in output when `indices[j] = i`. (default: 1) off_value: A scalar defining the value to fill in output when `indices[j] != i`. (default: 0) axis: The axis to fill (default: -1, a new inner-most axis). dtype: The data type of the output tensor. name: A name for the operation (optional). Returns: output: The one-hot tensor. Raises: TypeError: If dtype of either `on_value` or `off_value` don't match `dtype` TypeError: If dtype of `on_value` and `off_value` don't match one another" 8255,_all_dimensions,tensorflow/tensorflow/python/ops/array_ops.py,4179,function,Returns a 1D-tensor listing all dimensions in x. 8256,sequence_mask,tensorflow/tensorflow/python/ops/array_ops.py,4196,function,"Returns a mask tensor representing the first N positions of each cell. If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with ``` mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n]) ``` Examples: ```python tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False], # [True, True, True, False, False], # [True, True, False, False, False]] tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False], # [True, True, True]], # [[True, True, False], # [False, False, False]]] ``` Args: lengths: integer tensor, all its values <= maxlen. maxlen: scalar integer tensor, size of last dimension of returned tensor. Default is the maximum value in `lengths`. dtype: output type of the resulting tensor. name: name of the op. Returns: A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype. Raises: ValueError: if `maxlen` is not a scalar." 8257,squeeze,tensorflow/tensorflow/python/ops/array_ops.py,4264,function,"Removes dimensions of size 1 from the shape of a tensor. Given a tensor `input`, this operation returns a tensor of the same type with all dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, you can remove specific size 1 dimensions by specifying `axis`. For example: >>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] >>> t = tf.ones([1, 2, 1, 3, 1, 1]) >>> print(tf.shape(tf.squeeze(t)).numpy()) [2 3] Or, to remove specific size 1 dimensions: >>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] >>> t = tf.ones([1, 2, 1, 3, 1, 1]) >>> print(tf.shape(tf.squeeze(t, [2, 4])).numpy()) [1 2 3 1] Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)` time, where `N` is the number of elements in the squeezed dimensions. Args: input: A `Tensor`. The `input` to squeeze. axis: An optional list of `ints`. Defaults to `[]`. If specified, only squeezes the dimensions listed. The dimension index starts at 0. It is an error to squeeze a dimension that is not 1. Must be in the range `[-rank(input), rank(input))`. Must be specified if `input` is a `RaggedTensor`. name: A name for the operation (optional). squeeze_dims: Deprecated keyword argument that is now axis. Returns: A `Tensor`. Has the same type as `input`. Contains the same data as `input`, but has one or more dimensions of size 1 removed. Raises: ValueError: When both `squeeze_dims` and `axis` are specified." 8258,squeeze_v2,tensorflow/tensorflow/python/ops/array_ops.py,4317,function,"Removes dimensions of size 1 from the shape of a tensor. Given a tensor `input`, this operation returns a tensor of the same type with all dimensions of size 1 removed. If you don't want to remove all size 1 dimensions, you can remove specific size 1 dimensions by specifying `axis`. For example: ```python # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] tf.shape(tf.squeeze(t)) # [2, 3] ``` Or, to remove specific size 1 dimensions: ```python # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1] ``` Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a deprecated `squeeze_dims` argument. Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)` time, where `N` is the number of elements in the squeezed dimensions. Args: input: A `Tensor`. The `input` to squeeze. axis: An optional list of `ints`. Defaults to `[]`. If specified, only squeezes the dimensions listed. The dimension index starts at 0. It is an error to squeeze a dimension that is not 1. Must be in the range `[-rank(input), rank(input))`. Must be specified if `input` is a `RaggedTensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. Contains the same data as `input`, but has one or more dimensions of size 1 removed. Raises: ValueError: The input cannot be converted to a tensor, or the specified axis cannot be squeezed." 8259,where,tensorflow/tensorflow/python/ops/array_ops.py,4369,function,"Return the elements, either from `x` or `y`, depending on the `condition`. If both `x` and `y` are None, then this operation returns the coordinates of true elements of `condition`. The coordinates are returned in a 2-D tensor where the first dimension (rows) represents the number of true elements, and the second dimension (columns) represents the coordinates of the true elements. Keep in mind, the shape of the output tensor can vary depending on how many true values there are in input. Indices are output in row-major order. If both non-None, `x` and `y` must have the same shape. The `condition` tensor must be a scalar if `x` and `y` are scalar. If `x` and `y` are tensors of higher rank, then `condition` must be either a vector with size matching the first dimension of `x`, or must have the same shape as `x`. The `condition` tensor acts as a mask that chooses, based on the value at each element, whether the corresponding element / row in the output should be taken from `x` (if true) or `y` (if false). If `condition` is a vector and `x` and `y` are higher rank matrices, then it chooses which row (outer dimension) to copy from `x` and `y`. If `condition` has the same shape as `x` and `y`, then it chooses which element to copy from `x` and `y`. Args: condition: A `Tensor` of type `bool` x: A Tensor which may have the same shape as `condition`. If `condition` is rank 1, `x` may have higher rank, but its first dimension must match the size of `condition`. y: A `tensor` with the same shape and type as `x`. name: A name of the operation (optional) Returns: A `Tensor` with the same type and shape as `x`, `y` if they are non-None. Otherwise, a `Tensor` with shape `(num_true, rank(condition))`. Raises: ValueError: When exactly one of `x` or `y` is non-None." 8260,where_v2,tensorflow/tensorflow/python/ops/array_ops.py,4423,function,"Return the elements where `condition` is `True` (multiplexing `x` and `y`). This operator has two modes: in one mode both `x` and `y` are provided, in another mode neither are provided. `condition` is always expected to be a `tf.Tensor` of type `bool`. #### Retrieving indices of `True` elements If `x` and `y` are not provided (both are None): `tf.where` will return the indices of `condition` that are `True`, in the form of a 2-D tensor with shape (n, d). (Where n is the number of matching indices in `condition`, and d is the number of dimensions in `condition`). Indices are output in row-major order. >>> tf.where([True, False, False, True]) >>> tf.where([[True, False], [False, True]]) >>> tf.where([[[True, False], [False, True], [True, True]]]) #### Multiplexing between `x` and `y` If `x` and `y` are provided (both have non-None values): `tf.where` will choose an output shape from the shapes of `condition`, `x`, and `y` that all three shapes are [broadcastable](https://docs.scipy.org/doc/numpy/reference/ufuncs.html) to. The `condition` tensor acts as a mask that chooses whether the corresponding element / row in the output should be taken from `x` (if the element in `condition` is True) or `y` (if it is false). >>> tf.where([True, False, False, True], [1,2,3,4], [100,200,300,400]) >>> tf.where([True, False, False, True], [1,2,3,4], [100]) >>> tf.where([True, False, False, True], [1,2,3,4], 100) >>> tf.where([True, False, False, True], 1, 100) >>> tf.where(True, [1,2,3,4], 100) >>> tf.where(False, [1,2,3,4], 100) Args: condition: A `tf.Tensor` of type `bool` x: If provided, a Tensor which is of the same type as `y`, and has a shape broadcastable with `condition` and `y`. y: If provided, a Tensor which is of the same type as `x`, and has a shape broadcastable with `condition` and `x`. name: A name of the operation (optional). Returns: If `x` and `y` are provided: A `Tensor` with the same type as `x` and `y`, and shape that is broadcast from `condition`, `x`, and `y`. Otherwise, a `Tensor` with shape `(num_true, dim_size(condition))`. Raises: ValueError: When exactly one of `x` or `y` is non-None, or the shapes are not all broadcastable." 8261,reverse_sequence,tensorflow/tensorflow/python/ops/array_ops.py,4527,function,"Reverses variable length slices. This op first slices `input` along the dimension `batch_axis`, and for each slice `i`, reverses the first `seq_lengths[i]` elements along the dimension `seq_axis`. The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_axis]`, and `seq_lengths` must be a vector of length `input.dims[batch_axis]`. The output slice `i` along dimension `batch_axis` is then given by input slice `i`, with the first `seq_lengths[i]` slices along dimension `seq_axis` reversed. Example usage: >>> seq_lengths = [7, 2, 3, 5] >>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0], ... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]] >>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0) >>> output Args: input: A `Tensor`. The input to reverse. seq_lengths: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <= input.dims(seq_axis)` seq_axis: An `int`. The dimension which is partially reversed. batch_axis: An optional `int`. Defaults to `0`. The dimension along which reversal is performed. name: A name for the operation (optional). Returns: A Tensor. Has the same type as input." 8262,reverse_sequence_v2,tensorflow/tensorflow/python/ops/array_ops.py,4587,function,"Reverses variable length slices. This op first slices `input` along the dimension `batch_axis`, and for each slice `i`, reverses the first `seq_lengths[i]` elements along the dimension `seq_axis`. The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_axis]`, and `seq_lengths` must be a vector of length `input.dims[batch_axis]`. The output slice `i` along dimension `batch_axis` is then given by input slice `i`, with the first `seq_lengths[i]` slices along dimension `seq_axis` reversed. Example usage: >>> seq_lengths = [7, 2, 3, 5] >>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0], ... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]] >>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0) >>> output Args: input: A `Tensor`. The input to reverse. seq_lengths: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <= input.dims(seq_axis)` seq_axis: An `int`. The dimension which is partially reversed. batch_axis: An optional `int`. Defaults to `0`. The dimension along which reversal is performed. name: A name for the operation (optional). Returns: A Tensor. Has the same type as input." 8263,gather,tensorflow/tensorflow/python/ops/array_ops.py,4644,function,"Gather slices from params axis `axis` according to indices. Gather slices from params axis `axis` according to `indices`. `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). For 0-D (scalar) `indices`: $$\begin{align*} output[p_0, ..., p_{axis-1}, && &&& p_{axis + 1}, ..., p_{N-1}] = \\ params[p_0, ..., p_{axis-1}, && indices, &&& p_{axis + 1}, ..., p_{N-1}] \end{align*}$$ Where *N* = `ndims(params)`. For 1-D (vector) `indices` with `batch_dims=0`: $$\begin{align*} output[p_0, ..., p_{axis-1}, && &i, &&p_{axis + 1}, ..., p_{N-1}] =\\ params[p_0, ..., p_{axis-1}, && indices[&i], &&p_{axis + 1}, ..., p_{N-1}] \end{align*}$$ In the general case, produces an output tensor where: $$\begin{align*} output[p_0, &..., p_{axis-1}, & &i_{B}, ..., i_{M-1}, & p_{axis + 1}, &..., p_{N-1}] = \\ params[p_0, &..., p_{axis-1}, & indices[p_0, ..., p_{B-1}, &i_{B}, ..., i_{M-1}], & p_{axis + 1}, &..., p_{N-1}] \end{align*}$$ Where *N* = `ndims(params)`, *M* = `ndims(indices)`, and *B* = `batch_dims`. Note that `params.shape[:batch_dims]` must be identical to `indices.shape[:batch_dims]`. The shape of the output tensor is: > `output.shape = params.shape[:axis] + indices.shape[batch_dims:] + > params.shape[axis + 1:]`. Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, a 0 is stored in the corresponding output value. See also `tf.gather_nd`.
Args: params: The `Tensor` from which to gather values. Must be at least rank `axis + 1`. indices: The index `Tensor`. Must be one of the following types: `int32`, `int64`. Must be in range `[0, params.shape[axis])`. validate_indices: Deprecated, does nothing. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The `axis` in `params` to gather `indices` from. Must be greater than or equal to `batch_dims`. Defaults to the first non-batch dimension. Supports negative indexes. batch_dims: An `integer`. The number of batch dimensions. Must be less than or equal to `rank(indices)`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`." 8264,gather_v2,tensorflow/tensorflow/python/ops/array_ops.py,4736,function, 8265,batch_gather,tensorflow/tensorflow/python/ops/array_ops.py,4759,function,Gather slices from params according to indices with leading batch dims. 8266,_batch_gather,tensorflow/tensorflow/python/ops/array_ops.py,4770,function,"Gather slices from params according to indices with leading batch dims. This operation assumes that the leading `batch_dims` dimensions of `indices` and `params` are batch dimensions; and performs a `tf.gather` operation within each batch. (If `batch_dims` is not specified, then it defaults to `rank(indices)-1`.) In the case in which `batch_dims==0`, this operation is equivalent to `tf.gather`. Args: params: A Tensor. The tensor from which to gather values. indices: A Tensor. Must be one of the following types: int32, int64. Index tensor. Must be in range `[0, params.shape[batch_dims]]`. batch_dims: An integer or none. The number of batch dimensions. Must be less than `rank(indices)`. Defaults to `rank(indices) - 1` if None. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The `axis` in `params` to gather `indices` from. Must be greater than or equal to `batch_dims`. Defaults to the first non-batch dimension. Supports negative indexes. Returns: A Tensor. Has the same type as `params`. Raises: ValueError: if `indices` has an unknown shape." 8267,gather_nd,tensorflow/tensorflow/python/ops/array_ops.py,4895,function,"Gather slices from `params` into a Tensor with shape specified by `indices`. `indices` is an K-dimensional integer tensor, best thought of as a (K-1)-dimensional tensor of indices into `params`, where each element defines a slice of `params`: output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] Whereas in `tf.gather` `indices` defines slices into the first dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the first `N` dimensions of `params`, where `N = indices.shape[-1]`. The last dimension of `indices` can be at most the rank of `params`: indices.shape[-1] <= params.rank The last dimension of `indices` corresponds to elements (if `indices.shape[-1] == params.rank`) or slices (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` of `params`. The output tensor has shape indices.shape[:-1] + params.shape[indices.shape[-1]:] Additionally both 'params' and 'indices' can have M leading batch dimensions that exactly match. In this case 'batch_dims' must be M. Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, a 0 is stored in the corresponding output value. Some examples below. Simple indexing into a matrix: ```python indices = [[0, 0], [1, 1]] params = [['a', 'b'], ['c', 'd']] output = ['a', 'd'] ``` Slice indexing into a matrix: ```python indices = [[1], [0]] params = [['a', 'b'], ['c', 'd']] output = [['c', 'd'], ['a', 'b']] ``` Indexing into a 3-tensor: ```python indices = [[1]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['a1', 'b1'], ['c1', 'd1']]] indices = [[0, 1], [1, 0]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['c0', 'd0'], ['a1', 'b1']] indices = [[0, 0, 1], [1, 0, 1]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = ['b0', 'b1'] ``` The examples below are for the case when only indices have leading extra dimensions. If both 'params' and 'indices' have leading batch dimensions, use the 'batch_dims' parameter to run gather_nd in batch mode. Batched indexing into a matrix: ```python indices = [[[0, 0]], [[0, 1]]] params = [['a', 'b'], ['c', 'd']] output = [['a'], ['b']] ``` Batched slice indexing into a matrix: ```python indices = [[[1]], [[0]]] params = [['a', 'b'], ['c', 'd']] output = [[['c', 'd']], [['a', 'b']]] ``` Batched indexing into a 3-tensor: ```python indices = [[[1]], [[0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[[['a1', 'b1'], ['c1', 'd1']]], [[['a0', 'b0'], ['c0', 'd0']]]] indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['c0', 'd0'], ['a1', 'b1']], [['a0', 'b0'], ['c1', 'd1']]] indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['b0', 'b1'], ['d0', 'c1']] ``` Examples with batched 'params' and 'indices': ```python batch_dims = 1 indices = [[1], [0]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['c0', 'd0'], ['a1', 'b1']] batch_dims = 1 indices = [[[1]], [[0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['c0', 'd0']], [['a1', 'b1']]] batch_dims = 1 indices = [[[1, 0]], [[0, 1]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['c0'], ['b1']] ``` See also `tf.gather`. Args: params: A `Tensor`. The tensor from which to gather values. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. name: A name for the operation (optional). batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions. Returns: A `Tensor`. Has the same type as `params`." 8268,gather_nd_v2,tensorflow/tensorflow/python/ops/array_ops.py,5058,function, 8269,batch_gather_nd,tensorflow/tensorflow/python/ops/array_ops.py,5065,function,gather_nd implementation with batch support. 8270,quantize_v2,tensorflow/tensorflow/python/ops/array_ops.py,5153,function, 8271,quantize,tensorflow/tensorflow/python/ops/array_ops.py,5204,function,Quantize the input tensor. 8272,dequantize,tensorflow/tensorflow/python/ops/array_ops.py,5244,function, 8273,quantize_and_dequantize,tensorflow/tensorflow/python/ops/array_ops.py,5279,function,"Quantizes then dequantizes a tensor. Args: input: A `Tensor` to quantize and dequantize. input_min: If range_given=True, the minimum input value, that needs to be represented in the quantized representation. If axis is specified, this should be a vector of minimum values for each slice along axis. input_max: If range_given=True, the maximum input value that needs to be represented in the quantized representation. If axis is specified, this should be a vector of maximum values for each slice along axis. signed_input: True if the quantization is signed or unsigned. num_bits: The bitwidth of the quantization. range_given: If true use `input_min` and `input_max` for the range of the input, otherwise determine min and max from the input `Tensor`. round_mode: Rounding mode when rounding from float values to quantized ones. one of ['HALF_TO_EVEN', 'HALF_UP'] name: Optional name for the operation. narrow_range: If true, then the absolute value of the quantized minimum value is the same as the quantized maximum value, instead of 1 greater. i.e. for 8 bit quantization, the minimum value is -127 instead of -128. axis: Integer. If specified, refers to a dimension of the input tensor, such that quantization will be per slice along that dimension. Returns: A `Tensor`. Each element is the result of quantizing and dequantizing the corresponding element of `input`." 8274,searchsorted,tensorflow/tensorflow/python/ops/array_ops.py,5339,function,"Searches input tensor for values on the innermost dimension. A 2-D example: ``` sorted_sequence = [[0, 3, 9, 9, 10], [1, 2, 3, 4, 5]] values = [[2, 4, 9], [0, 2, 6]] result = searchsorted(sorted_sequence, values, side=""left"") result == [[1, 2, 2], [0, 1, 5]] result = searchsorted(sorted_sequence, values, side=""right"") result == [[1, 2, 4], [0, 2, 5]] ``` Args: sorted_sequence: N-D `Tensor` containing a sorted sequence. values: N-D `Tensor` containing the search values. side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to upper_bound. out_type: The output type (`int32` or `int64`). Default is `tf.int32`. name: Optional name for the operation. Returns: An N-D `Tensor` the size of values containing the result of applying either lower_bound or upper_bound (depending on side) to each value. The result is not a global index to the entire `Tensor`, but the index in the last dimension. Raises: ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements. If the total size of values exceeds `2^31 - 1` elements. If the first `N-1` dimensions of the two tensors don't match." 8275,extract_image_patches_v2,tensorflow/tensorflow/python/ops/array_ops.py,5404,function,"Extract `patches` from `images`. This op collects patches from the input image, as if applying a convolution. All extracted patches are stacked in the depth (last) dimension of the output. Specifically, the op extracts patches of shape `sizes` which are `strides` apart in the input image. The output is subsampled using the `rates` argument, in the same manner as ""atrous"" or ""dilated"" convolutions. The result is a 4D tensor which is indexed by batch, row, and column. `output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]` which is taken from the input starting at `images[i, x*strides[1], y*strides[2]]`. Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where `depth` is `images.shape[3]`. The output elements are taken from the input at intervals given by the `rate` argument, as in dilated convolutions. The `padding` argument has no effect on the size of each patch, it determines how many patches are extracted. If `VALID`, only patches which are fully contained in the input image are included. If `SAME`, all patches whose starting point is inside the input are included, and areas outside the input default to zero. Example: ``` n = 10 # images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100 images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]] # We generate two outputs as follows: # 1. 3x3 patches with stride length 5 # 2. Same as above, but the rate is increased to 2 tf.image.extract_patches(images=images, sizes=[1, 3, 3, 1], strides=[1, 5, 5, 1], rates=[1, 1, 1, 1], padding='VALID') # Yields: [[[[ 1 2 3 11 12 13 21 22 23] [ 6 7 8 16 17 18 26 27 28]] [[51 52 53 61 62 63 71 72 73] [56 57 58 66 67 68 76 77 78]]]] ``` If we mark the pixels in the input image which are taken for the output with `*`, we see the pattern: ``` * * * 4 5 * * * 9 10 * * * 14 15 * * * 19 20 * * * 24 25 * * * 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 * * * 54 55 * * * 59 60 * * * 64 65 * * * 69 70 * * * 74 75 * * * 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 ``` ``` tf.image.extract_patches(images=images, sizes=[1, 3, 3, 1], strides=[1, 5, 5, 1], rates=[1, 2, 2, 1], padding='VALID') # Yields: [[[[ 1 3 5 21 23 25 41 43 45] [ 6 8 10 26 28 30 46 48 50]] [[ 51 53 55 71 73 75 91 93 95] [ 56 58 60 76 78 80 96 98 100]]]] ``` We can again draw the effect, this time using the symbols `*`, `x`, `+` and `o` to distinguish the patches: ``` * 2 * 4 * x 7 x 9 x 11 12 13 14 15 16 17 18 19 20 * 22 * 24 * x 27 x 29 x 31 32 33 34 35 36 37 38 39 40 * 42 * 44 * x 47 x 49 x + 52 + 54 + o 57 o 59 o 61 62 63 64 65 66 67 68 69 70 + 72 + 74 + o 77 o 79 o 81 82 83 84 85 86 87 88 89 90 + 92 + 94 + o 97 o 99 o ``` Args: images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth] sizes: The size of the extracted patches. Must be [1, size_rows, size_cols, 1]. strides: A 1-D Tensor of length 4. How far the centers of two consecutive patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`. rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the input stride, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of `rates`. This is equivalent to `rate` in dilated (a.k.a. Atrous) convolutions. padding: The type of padding algorithm to use. name: A name for the operation (optional). Returns: A 4-D Tensor of the same type as the input." 8276,extract_image_patches,tensorflow/tensorflow/python/ops/array_ops.py,5528,function,"Extract patches from images and put them in the ""depth"" output dimension. Args: `images`: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. `ksizes`: A list of `ints` that has length `>= 4`. The size of the sliding window for each dimension of `images`. `strides`: A list of `ints` that has length `>= 4`. 1-D of length 4. How far the centers of two consecutive patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`. `rates`: A list of `ints` that has length `>= 4`. 1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the input stride, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of `rates`. This is equivalent to `rate` in dilated (a.k.a. Atrous) convolutions. `padding`: A `string` from: ""SAME"", ""VALID"". The type of padding algorithm to use. We specify the size-related attributes as: ``` ksizes = [1, ksize_rows, ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1] rates = [1, rates_rows, rates_cols, 1] name: A name for the operation (optional). ``` Returns: A Tensor. Has the same type as images." 8277,fingerprint,tensorflow/tensorflow/python/ops/array_ops.py,5575,function,"Generates fingerprint values. Generates fingerprint values of `data`. Fingerprint op considers the first dimension of `data` as the batch dimension, and `output[i]` contains the fingerprint value generated from contents in `data[i, ...]` for all `i`. Fingerprint op writes fingerprint values as byte arrays. For example, the default method `farmhash64` generates a 64-bit fingerprint value at a time. This 8-byte value is written out as an `tf.uint8` array of size 8, in little-endian order. For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4), and that the fingerprint method is `farmhash64`. In this case, the output shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers in `data[1, :, :]`. Note that this op fingerprints the raw underlying buffer, and it does not fingerprint Tensor's metadata such as data type and/or shape. For example, the fingerprint values are invariant under reshapes and bitcasts as long as the batch dimension remain the same: ```python tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...)) tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...)) ``` For string data, one should expect `tf.fingerprint(data) != tf.fingerprint(tf.string.reduce_join(data))` in general. Args: data: A `Tensor`. Must have rank 1 or higher. method: A `Tensor` of type `tf.string`. Fingerprint method used by this op. Currently available method is `farmhash64`. name: A name for the operation (optional). Returns: A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to `data`'s first dimension, and the second dimension size depends on the fingerprint algorithm." 8278,convert_to_int_tensor,tensorflow/tensorflow/python/ops/array_ops.py,5623,function,Converts the given value to an integer Tensor. 8279,get_positive_axis,tensorflow/tensorflow/python/ops/array_ops.py,5634,function,"Validate an `axis` parameter, and normalize it to be positive. If `ndims` is known (i.e., not `None`), then check that `axis` is in the range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or `axis + ndims` (otherwise). If `ndims` is not known, and `axis` is positive, then return it as-is. If `ndims` is not known, and `axis` is negative, then report an error. Args: axis: An integer constant ndims: An integer constant, or `None` axis_name: The name of `axis` (for error messages). ndims_name: The name of `ndims` (for error messages). Returns: The normalized `axis` value. Raises: ValueError: If `axis` is out-of-bounds, or if `axis` is negative and `ndims is None`." 8280,repeat_with_axis,tensorflow/tensorflow/python/ops/array_ops.py,5680,function,"Repeats elements of `data`. Args: data: An `N`-dimensional tensor. repeats: A 1-D integer tensor specifying how many times each element in `axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`. Supports broadcasting from a scalar value. axis: `int`. The axis along which to repeat values. Must be less than `max(N, 1)`. name: A name for the operation. Returns: A tensor with `max(N, 1)` dimensions. Has the same shape as `data`, except that dimension `axis` has size `sum(repeats)`. Example usage: >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0) >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0) >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1) " 8281,tile_one_dimension,tensorflow/tensorflow/python/ops/array_ops.py,5794,function,Tiles a single dimension of a tensor. 8282,_with_nonzero_rank,tensorflow/tensorflow/python/ops/array_ops.py,5807,function,"If `data` is scalar, then add a dimension; otherwise return as-is." 8283,repeat,tensorflow/tensorflow/python/ops/array_ops.py,5822,function,"Repeat elements of `input`. See also `tf.concat`, `tf.stack`, `tf.tile`. Args: input: An `N`-dimensional Tensor. repeats: An 1-D `int` Tensor. The number of repetitions for each element. repeats is broadcasted to fit the shape of the given axis. `len(repeats)` must equal `input.shape[axis]` if axis is not None. axis: An int. The axis along which to repeat values. By default (axis=None), use the flattened input array, and return a flat output array. name: A name for the operation. Returns: A Tensor which has the same shape as `input`, except along the given axis. If axis is None then the output array is flattened to match the flattened input array. Example usage: >>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0) >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0) >>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1) >>> repeat(3, repeats=4) >>> repeat([[1,2], [3,4]], repeats=2) " 8284,ArrayOpTest,tensorflow/tensorflow/python/ops/array_ops_test.py,31,class, 8285,batch_norm_op,tensorflow/tensorflow/python/ops/batch_norm_benchmark.py,40,function,Fused kernel for batch normalization. 8286,batch_norm_py,tensorflow/tensorflow/python/ops/batch_norm_benchmark.py,55,function,Python implementation of batch normalization. 8287,batch_norm_slow,tensorflow/tensorflow/python/ops/batch_norm_benchmark.py,61,function, 8288,build_graph,tensorflow/tensorflow/python/ops/batch_norm_benchmark.py,68,function,"Build a graph containing a sequence of batch normalizations. Args: device: string, the device to run on. input_shape: shape of the input tensor. axes: axes that are to be normalized across. num_layers: number of batch normalization layers in the graph. mode: ""op"", ""py"" or ""slow"" depending on the implementation. scale: scale after normalization. train: if true, also run backprop. Returns: An array of tensors to run()" 8289,print_difference,tensorflow/tensorflow/python/ops/batch_norm_benchmark.py,117,function,Print the difference in timing between two runs. 8290,BatchNormBenchmark,tensorflow/tensorflow/python/ops/batch_norm_benchmark.py,123,class,Benchmark batch normalization. 8291,batch_function,tensorflow/tensorflow/python/ops/batch_ops.py,32,function,"Batches the computation done by the decorated function. So, for example, in the following code ```python @batch_function(1, 2, 3) def layer(a): return tf.matmul(a, a) b = layer(w) ``` if more than one session.run call is simultaneously trying to compute `b` the values of `w` will be gathered, non-deterministically concatenated along the first axis, and only one thread will run the computation. See the documentation of the `Batch` op for more details. Assumes that all arguments of the decorated function are Tensors which will be batched along their first dimension. SparseTensor is not supported. The return value of the decorated function must be a Tensor or a list/tuple of Tensors. Args: num_batch_threads: Number of scheduling threads for processing batches of work. Determines the number of batches processed in parallel. max_batch_size: Batch sizes will never be bigger than this. batch_timeout_micros: Maximum number of microseconds to wait before outputting an incomplete batch. allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does nothing. Otherwise, supplies a list of batch sizes, causing the op to pad batches up to one of those sizes. The entries must increase monotonically, and the final entry must equal max_batch_size. max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10. autograph: Whether to use autograph to compile python and eager style code for efficient graph-mode execution. Returns: The decorated function will return the unbatched computation output Tensors." 8292,delayed_plus1,tensorflow/tensorflow/python/ops/batch_ops_test.py,37,function,Sleeps for 100ms then returns x+1. 8293,BatchOpsTest,tensorflow/tensorflow/python/ops/batch_ops_test.py,44,class,"Tests for batch_ops.{un,}batch." 8294,bincount,tensorflow/tensorflow/python/ops/bincount_ops.py,36,function,"Counts the number of occurrences of each value in an integer array. If `minlength` and `maxlength` are not given, returns a vector with length `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. If `weights` are non-None, then index `i` of the output stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`. ```python values = tf.constant([1,1,2,3,2,4,4,5]) tf.math.bincount(values) #[0 2 2 1 2 1] ``` Vector length = Maximum element in vector `values` is 5. Adding 1, which is 6 will be the vector length. Each bin value in the output indicates number of occurrences of the particular index. Here, index 1 in output has a value 2. This indicates value 1 occurs two times in `values`. ```python values = tf.constant([1,1,2,3,2,4,4,5]) weights = tf.constant([1,5,0,1,0,5,4,5]) tf.math.bincount(values, weights=weights) #[0 6 0 1 9 5] ``` Bin will be incremented by the corresponding weight instead of 1. Here, index 1 in output has a value 6. This is the summation of weights corresponding to the value in `values`. **Bin-counting on a certain axis** This example takes a 2 dimensional input and returns a `Tensor` with bincounting on each sample. >>> data = np.array([[1, 2, 3, 0], [0, 0, 1, 2]], dtype=np.int32) >>> tf.math.bincount(data, axis=-1) **Bin-counting with binary_output** This example gives binary output instead of counting the occurrence. >>> data = np.array([[1, 2, 3, 0], [0, 0, 1, 2]], dtype=np.int32) >>> tf.math.bincount(data, axis=-1, binary_output=True) Args: arr: A Tensor, RaggedTensor, or SparseTensor whose values should be counted. These tensors must have a rank of 2 if `axis=-1`. weights: If non-None, must be the same shape as arr. For each value in `arr`, the bin will be incremented by the corresponding weight instead of 1. minlength: If given, ensures the output has length at least `minlength`, padding with zeros at the end if necessary. maxlength: If given, skips values in `arr` that are equal or greater than `maxlength`, ensuring that the output has length at most `maxlength`. dtype: If `weights` is None, determines the type of the output bins. name: A name scope for the associated operations (optional). axis: The axis to slice over. Axes at and below `axis` will be flattened before bin counting. Currently, only `0`, and `-1` are supported. If None, all axes will be flattened (identical to passing `0`). binary_output: If True, this op will output 1 instead of the number of times a token appears (equivalent to one_hot + reduce_any instead of one_hot + reduce_add). Defaults to False. Returns: A vector with the same dtype as `weights` or the given `dtype`. The bin values. Raises: `InvalidArgumentError` if negative values are provided as an input." 8295,bincount_v1,tensorflow/tensorflow/python/ops/bincount_ops.py,223,function,"Counts the number of occurrences of each value in an integer array. If `minlength` and `maxlength` are not given, returns a vector with length `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. If `weights` are non-None, then index `i` of the output stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`. Args: arr: An int32 tensor of non-negative values. weights: If non-None, must be the same shape as arr. For each value in `arr`, the bin will be incremented by the corresponding weight instead of 1. minlength: If given, ensures the output has length at least `minlength`, padding with zeros at the end if necessary. maxlength: If given, skips values in `arr` that are equal or greater than `maxlength`, ensuring that the output has length at most `maxlength`. dtype: If `weights` is None, determines the type of the output bins. Returns: A vector with the same dtype as `weights` or the given `dtype`. The bin values." 8296,sparse_bincount,tensorflow/tensorflow/python/ops/bincount_ops.py,255,function,"Count the number of times an integer value appears in a tensor. This op takes an N-dimensional `Tensor`, `RaggedTensor`, or `SparseTensor`, and returns an N-dimensional int64 SparseTensor where element `[i0...i[axis], j]` contains the number of times the value `j` appears in slice `[i0...i[axis], :]` of the input tensor. Currently, only N=0 and N=-1 are supported. Args: values: A Tensor, RaggedTensor, or SparseTensor whose values should be counted. These tensors must have a rank of 2 if `axis=-1`. weights: If non-None, must be the same shape as arr. For each value in `value`, the bin will be incremented by the corresponding weight instead of 1. axis: The axis to slice over. Axes at and below `axis` will be flattened before bin counting. Currently, only `0`, and `-1` are supported. If None, all axes will be flattened (identical to passing `0`). minlength: If given, ensures the output has length at least `minlength`, padding with zeros at the end if necessary. maxlength: If given, skips values in `values` that are equal or greater than `maxlength`, ensuring that the output has length at most `maxlength`. binary_output: If True, this op will output 1 instead of the number of times a token appears (equivalent to one_hot + reduce_any instead of one_hot + reduce_add). Defaults to False. name: A name for this op. Returns: A SparseTensor with `output.shape = values.shape[:axis] + [N]`, where `N` is * `maxlength` (if set); * `minlength` (if set, and `minlength > reduce_max(values)`); * `0` (if `values` is empty); * `reduce_max(values) + 1` otherwise. Examples: **Bin-counting every item in individual batches** This example takes an input (which could be a Tensor, RaggedTensor, or SparseTensor) and returns a SparseTensor where the value of (i,j) is the number of times value j appears in batch i. >>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64) >>> output = tf.sparse.bincount(data, axis=-1) >>> print(output) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101] [ 1 10001]], shape=(6, 2), dtype=int64), values=tf.Tensor([1 2 1 2 1 1], shape=(6,), dtype=int64), dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) **Bin-counting with defined output shape** This example takes an input (which could be a Tensor, RaggedTensor, or SparseTensor) and returns a SparseTensor where the value of (i,j) is the number of times value j appears in batch i. However, all values of j above 'maxlength' are ignored. The dense_shape of the output sparse tensor is set to 'minlength'. Note that, while the input is identical to the example above, the value '10001' in batch item 2 is dropped, and the dense shape is [2, 500] instead of [2,10002] or [2, 102]. >>> minlength = maxlength = 500 >>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64) >>> output = tf.sparse.bincount( ... data, axis=-1, minlength=minlength, maxlength=maxlength) >>> print(output) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101]], shape=(5, 2), dtype=int64), values=tf.Tensor([1 2 1 2 1], shape=(5,), dtype=int64), dense_shape=tf.Tensor([ 2 500], shape=(2,), dtype=int64)) **Binary bin-counting** This example takes an input (which could be a Tensor, RaggedTensor, or SparseTensor) and returns a SparseTensor where (i,j) is 1 if the value j appears in batch i at least once and is 0 otherwise. Note that, even though some values (like 20 in batch 1 and 11 in batch 2) appear more than once, the 'values' tensor is all 1s. >>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64) >>> output = tf.sparse.bincount(data, binary_output=True, axis=-1) >>> print(output) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101] [ 1 10001]], shape=(6, 2), dtype=int64), values=tf.Tensor([1 1 1 1 1 1], shape=(6,), dtype=int64), dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) **Weighted bin-counting** This example takes two inputs - a values tensor and a weights tensor. These tensors must be identically shaped, and have the same row splits or indices in the case of RaggedTensors or SparseTensors. When performing a weighted count, the op will output a SparseTensor where the value of (i, j) is the sum of the values in the weight tensor's batch i in the locations where the values tensor has the value j. In this case, the output dtype is the same as the dtype of the weights tensor. >>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64) >>> weights = [[2, 0.25, 15, 0.5], [2, 17, 3, 0.9]] >>> output = tf.sparse.bincount(data, weights=weights, axis=-1) >>> print(output) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101] [ 1 10001]], shape=(6, 2), dtype=int64), values=tf.Tensor([2. 0.75 15. 5. 17. 0.9], shape=(6,), dtype=float32), dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64))" 8297,validate_dense_weights,tensorflow/tensorflow/python/ops/bincount_ops.py,454,function,Validates the passed weight tensor or creates an empty one. 8298,validate_sparse_weights,tensorflow/tensorflow/python/ops/bincount_ops.py,468,function,Validates the passed weight tensor or creates an empty one. 8299,validate_ragged_weights,tensorflow/tensorflow/python/ops/bincount_ops.py,502,function,Validates the passed weight tensor or creates an empty one. 8300,TestSparseCount,tensorflow/tensorflow/python/ops/bincount_ops_test.py,35,class, 8301,TestDenseBincount,tensorflow/tensorflow/python/ops/bincount_ops_test.py,514,class, 8302,TestSparseCountFailureModes,tensorflow/tensorflow/python/ops/bincount_ops_test.py,745,class, 8303,BitwiseOpTest,tensorflow/tensorflow/python/ops/bitwise_ops_test.py,32,class, 8304,PruningMode,tensorflow/tensorflow/python/ops/boosted_trees_ops.py,55,class,Class for working with Pruning modes. 8305,QuantileAccumulatorSaveable,tensorflow/tensorflow/python/ops/boosted_trees_ops.py,70,class,SaveableObject implementation for QuantileAccumulator. 8306,QuantileAccumulator,tensorflow/tensorflow/python/ops/boosted_trees_ops.py,99,class,"SaveableObject implementation for QuantileAccumulator. The bucket boundaries are serialized and deserialized from checkpointing." 8307,_TreeEnsembleSavable,tensorflow/tensorflow/python/ops/boosted_trees_ops.py,165,class,SaveableObject implementation for TreeEnsemble. 8308,TreeEnsemble,tensorflow/tensorflow/python/ops/boosted_trees_ops.py,210,class,Creates TreeEnsemble resource. 8309,uniform_candidate_sampler,tensorflow/tensorflow/python/ops/candidate_sampling_ops.py,36,function,"Samples a set of classes using a uniform base distribution. This operation randomly samples a tensor of sampled classes (`sampled_candidates`) from the range of integers `[0, range_max)`. The elements of `sampled_candidates` are drawn without replacement (if `unique=True`) or with replacement (if `unique=False`) from the base distribution. The base distribution for this operation is the uniform distribution over the range of integers `[0, range_max)`. In addition, this operation returns tensors `true_expected_count` and `sampled_expected_count` representing the number of times each of the target classes (`true_classes`) and the sampled classes (`sampled_candidates`) is expected to occur in an average tensor of sampled classes. These values correspond to `Q(y|x)` defined in [this document](http://www.tensorflow.org/extras/candidate_sampling.pdf). If `unique=True`, then these are post-rejection probabilities and we compute them approximately. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of classes to randomly sample. The `sampled_candidates` return value will have shape `[num_sampled]`. If `unique=True`, `num_sampled` must be less than or equal to `range_max`. unique: A `bool`. Determines whether all sampled classes in a batch are unique. range_max: An `int`. The number of possible classes. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled classes, either with possible duplicates (`unique=False`) or all unique (`unique=True`). In either case, `sampled_candidates` is independent of the true classes. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`." 8310,log_uniform_candidate_sampler,tensorflow/tensorflow/python/ops/candidate_sampling_ops.py,99,function,"Samples a set of classes using a log-uniform (Zipfian) base distribution. This operation randomly samples a tensor of sampled classes (`sampled_candidates`) from the range of integers `[0, range_max)`. The elements of `sampled_candidates` are drawn without replacement (if `unique=True`) or with replacement (if `unique=False`) from the base distribution. The base distribution for this operation is an approximately log-uniform or Zipfian distribution: `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)` This sampler is useful when the target classes approximately follow such a distribution - for example, if the classes represent words in a lexicon sorted in decreasing order of frequency. If your classes are not ordered by decreasing frequency, do not use this op. In addition, this operation returns tensors `true_expected_count` and `sampled_expected_count` representing the number of times each of the target classes (`true_classes`) and the sampled classes (`sampled_candidates`) is expected to occur in an average tensor of sampled classes. These values correspond to `Q(y|x)` defined in [this document](http://www.tensorflow.org/extras/candidate_sampling.pdf). If `unique=True`, then these are post-rejection probabilities and we compute them approximately. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of classes to randomly sample. unique: A `bool`. Determines whether all sampled classes in a batch are unique. range_max: An `int`. The number of possible classes. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled classes. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`." 8311,learned_unigram_candidate_sampler,tensorflow/tensorflow/python/ops/candidate_sampling_ops.py,162,function,"Samples a set of classes from a distribution learned during training. This operation randomly samples a tensor of sampled classes (`sampled_candidates`) from the range of integers `[0, range_max)`. The elements of `sampled_candidates` are drawn without replacement (if `unique=True`) or with replacement (if `unique=False`) from the base distribution. The base distribution for this operation is constructed on the fly during training. It is a unigram distribution over the target classes seen so far during training. Every integer in `[0, range_max)` begins with a weight of 1, and is incremented by 1 each time it is seen as a target class. The base distribution is not saved to checkpoints, so it is reset when the model is reloaded. In addition, this operation returns tensors `true_expected_count` and `sampled_expected_count` representing the number of times each of the target classes (`true_classes`) and the sampled classes (`sampled_candidates`) is expected to occur in an average tensor of sampled classes. These values correspond to `Q(y|x)` defined in [this document](http://www.tensorflow.org/extras/candidate_sampling.pdf). If `unique=True`, then these are post-rejection probabilities and we compute them approximately. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of classes to randomly sample. unique: A `bool`. Determines whether all sampled classes in a batch are unique. range_max: An `int`. The number of possible classes. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled classes. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`." 8312,fixed_unigram_candidate_sampler,tensorflow/tensorflow/python/ops/candidate_sampling_ops.py,221,function,"Samples a set of classes using the provided (fixed) base distribution. This operation randomly samples a tensor of sampled classes (`sampled_candidates`) from the range of integers `[0, range_max)`. The elements of `sampled_candidates` are drawn without replacement (if `unique=True`) or with replacement (if `unique=False`) from the base distribution. The base distribution is read from a file or passed in as an in-memory array. There is also an option to skew the distribution by applying a distortion power to the weights. In addition, this operation returns tensors `true_expected_count` and `sampled_expected_count` representing the number of times each of the target classes (`true_classes`) and the sampled classes (`sampled_candidates`) is expected to occur in an average tensor of sampled classes. These values correspond to `Q(y|x)` defined in [this document](http://www.tensorflow.org/extras/candidate_sampling.pdf). If `unique=True`, then these are post-rejection probabilities and we compute them approximately. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of classes to randomly sample. unique: A `bool`. Determines whether all sampled classes in a batch are unique. range_max: An `int`. The number of possible classes. vocab_file: Each valid line in this file (which should have a CSV-like format) corresponds to a valid word ID. IDs are in sequential order, starting from num_reserved_ids. The last entry in each line is expected to be a value corresponding to the count or relative probability. Exactly one of `vocab_file` and `unigrams` needs to be passed to this operation. distortion: The distortion is used to skew the unigram probability distribution. Each weight is first raised to the distortion's power before adding to the internal unigram distribution. As a result, `distortion = 1.0` gives regular unigram sampling (as defined by the vocab file), and `distortion = 0.0` gives a uniform distribution. num_reserved_ids: Optionally some reserved IDs can be added in the range `[0, num_reserved_ids)` by the users. One use case is that a special unknown word token is used as ID 0. These IDs will have a sampling probability of 0. num_shards: A sampler can be used to sample from a subset of the original range in order to speed up the whole computation through parallelism. This parameter (together with `shard`) indicates the number of partitions that are being used in the overall computation. shard: A sampler can be used to sample from a subset of the original range in order to speed up the whole computation through parallelism. This parameter (together with `num_shards`) indicates the particular partition number of the operation, when partitioning is being used. unigrams: A list of unigram counts or probabilities, one per ID in sequential order. Exactly one of `vocab_file` and `unigrams` should be passed to this operation. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled classes. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`." 8313,all_candidate_sampler,tensorflow/tensorflow/python/ops/candidate_sampling_ops.py,313,function,"Generate the set of all classes. Deterministically generates and returns the set of all possible classes. For testing purposes. There is no need to use this, since you might as well use full softmax or full logistic regression. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_true: An `int`. The number of target classes per training example. num_sampled: An `int`. The number of possible classes. unique: A `bool`. Ignored. unique. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. This operation deterministically returns the entire range `[0, num_sampled]`. true_expected_count: A tensor of type `float`. Same shape as `true_classes`. The expected counts under the sampling distribution of each of `true_classes`. All returned values are 1.0. sampled_expected_count: A tensor of type `float`. Same shape as `sampled_candidates`. The expected counts under the sampling distribution of each of `sampled_candidates`. All returned values are 1.0." 8314,compute_accidental_hits,tensorflow/tensorflow/python/ops/candidate_sampling_ops.py,350,function,"Compute the position ids in `sampled_candidates` matching `true_classes`. In Candidate Sampling, this operation facilitates virtually removing sampled classes which happen to match target classes. This is done in Sampled Softmax and Sampled Logistic. See our [Candidate Sampling Algorithms Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf). We presuppose that the `sampled_candidates` are unique. We call it an 'accidental hit' when one of the target classes matches one of the sampled classes. This operation reports accidental hits as triples `(index, id, weight)`, where `index` represents the row number in `true_classes`, `id` represents the position in `sampled_candidates`, and weight is `-FLOAT_MAX`. The result of this op should be passed through a `sparse_to_dense` operation, then added to the logits of the sampled classes. This removes the contradictory effect of accidentally sampling the true target classes as noise classes for the same example. Args: true_classes: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`. The sampled_candidates output of CandidateSampler. num_true: An `int`. The number of target classes per training example. seed: An `int`. An operation-specific seed. Default is 0. name: A name for the operation (optional). Returns: indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`. Values indicate rows in `true_classes`. ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`. Values indicate positions in `sampled_candidates`. weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`. Each value is `-FLOAT_MAX`." 8315,_maybe_constant_value_string,tensorflow/tensorflow/python/ops/check_ops.py,73,function, 8316,_assert_static,tensorflow/tensorflow/python/ops/check_ops.py,82,function,Raises a InvalidArgumentError with as much information as possible. 8317,_shape_and_dtype_str,tensorflow/tensorflow/python/ops/check_ops.py,90,function,Returns a string containing tensor's shape and dtype. 8318,_unary_assert_doc,tensorflow/tensorflow/python/ops/check_ops.py,95,function,"Common docstring for assert_* ops that evaluate a unary predicate over every element of a tensor. Args: sym: Mathematical symbol for the check performed on each element, i.e. ""> 0"" sym_name: English-language name for the op described by sym Returns: Decorator that adds the appropriate docstring to the function for symbol `sym`." 8319,_binary_assert_doc,tensorflow/tensorflow/python/ops/check_ops.py,158,function,"Common docstring for most of the v1 assert_* ops that compare two tensors element-wise. Args: sym: Binary operation symbol, i.e. ""=="" Returns: Decorator that adds the appropriate docstring to the function for symbol `sym`." 8320,_make_assert_msg_data,tensorflow/tensorflow/python/ops/check_ops.py,221,function,"Subroutine of _binary_assert that generates the components of the default error message when running in eager mode. Args: sym: Mathematical symbol for the test to apply to pairs of tensor elements, i.e. ""=="" x: First input to the assertion after applying `convert_to_tensor()` y: Second input to the assertion summarize: Value of the ""summarize"" parameter to the original assert_* call; tells how many elements of each tensor to print. test_op: TensorFlow op that returns a Boolean tensor with True in each position where the assertion is satisfied. Returns: List of tensors and scalars that, when stringified and concatenated, will produce the error message string." 8321,_pretty_print,tensorflow/tensorflow/python/ops/check_ops.py,275,function,"Format a data item for use in an error message in eager mode. Args: data_item: One of the items in the ""data"" argument to an assert_* function. Can be a Tensor or a scalar value. summarize: How many elements to retain of each tensor-valued entry in data. Returns: An appropriate string representation of data_item" 8322,_binary_assert,tensorflow/tensorflow/python/ops/check_ops.py,301,function,"Generic binary elementwise assertion. Implements the behavior described in _binary_assert_doc() above. Args: sym: Mathematical symbol for the test to apply to pairs of tensor elements, i.e. ""=="" opname: Name of the assert op in the public API, i.e. ""assert_equal"" op_func: Function that, if passed the two Tensor inputs to the assertion (x and y), will return the test to be passed to reduce_all() i.e. static_func: Function that, if passed numpy ndarray versions of the two inputs to the assertion, will return a Boolean ndarray with containing True in all positions where the assertion PASSES. i.e. np.equal for assert_equal() x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to the value of `opname`. Returns: See docstring template in _binary_assert_doc()." 8323,assert_proper_iterable,tensorflow/tensorflow/python/ops/check_ops.py,381,function,"Static assert that values is a ""proper"" iterable. `Ops` that expect iterables of `Tensor` can call this to validate input. Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves. Args: values: Object to be checked. Raises: TypeError: If `values` is not iterable or is one of `Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`." 8324,assert_negative_v2,tensorflow/tensorflow/python/ops/check_ops.py,410,function,"Assert the condition `x < 0` holds element-wise. This Op checks that `x[i] < 0` holds for every element of `x`. If `x` is empty, this is trivially satisfied. If `x` is not negative everywhere, `message`, as well as the first `summarize` entries of `x` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_negative"". Returns: Op raising `InvalidArgumentError` unless `x` is all negative. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x[i] < 0` is False. The check can be performed immediately during eager execution or if `x` is statically known." 8325,assert_negative,tensorflow/tensorflow/python/ops/check_ops.py,445,function, 8326,assert_positive_v2,tensorflow/tensorflow/python/ops/check_ops.py,464,function,"Assert the condition `x > 0` holds element-wise. This Op checks that `x[i] > 0` holds for every element of `x`. If `x` is empty, this is trivially satisfied. If `x` is not positive everywhere, `message`, as well as the first `summarize` entries of `x` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_positive"". Returns: Op raising `InvalidArgumentError` unless `x` is all positive. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x[i] > 0` is False. The check can be performed immediately during eager execution or if `x` is statically known." 8327,assert_positive,tensorflow/tensorflow/python/ops/check_ops.py,499,function, 8328,assert_non_negative_v2,tensorflow/tensorflow/python/ops/check_ops.py,517,function,"Assert the condition `x >= 0` holds element-wise. This Op checks that `x[i] >= 0` holds for every element of `x`. If `x` is empty, this is trivially satisfied. If `x` is not >= 0 everywhere, `message`, as well as the first `summarize` entries of `x` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_non_negative"". Returns: Op raising `InvalidArgumentError` unless `x` is all non-negative. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x[i] >= 0` is False. The check can be performed immediately during eager execution or if `x` is statically known." 8329,assert_non_negative,tensorflow/tensorflow/python/ops/check_ops.py,554,function, 8330,assert_non_positive_v2,tensorflow/tensorflow/python/ops/check_ops.py,573,function,"Assert the condition `x <= 0` holds element-wise. This Op checks that `x[i] <= 0` holds for every element of `x`. If `x` is empty, this is trivially satisfied. If `x` is not <= 0 everywhere, `message`, as well as the first `summarize` entries of `x` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_non_positive"". Returns: Op raising `InvalidArgumentError` unless `x` is all non-positive. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x[i] <= 0` is False. The check can be performed immediately during eager execution or if `x` is statically known." 8331,assert_non_positive,tensorflow/tensorflow/python/ops/check_ops.py,610,function, 8332,assert_equal_v2,tensorflow/tensorflow/python/ops/check_ops.py,629,function,"Assert the condition `x == y` holds element-wise. This Op checks that `x[i] == y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` and `y` are not equal, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_equal"". Returns: Op that raises `InvalidArgumentError` if `x == y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x == y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known." 8333,assert_equal,tensorflow/tensorflow/python/ops/check_ops.py,665,function, 8334,assert_none_equal_v2,tensorflow/tensorflow/python/ops/check_ops.py,676,function,"Assert the condition `x != y` holds for all elements. This Op checks that `x[i] != y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If any elements of `x` and `y` are equal, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_none_equal"". Returns: Op that raises `InvalidArgumentError` if `x != y` is ever False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x != y` is False for any pair of elements in `x` and `y`. The check can be performed immediately during eager execution or if `x` and `y` are statically known." 8335,assert_none_equal,tensorflow/tensorflow/python/ops/check_ops.py,717,function, 8336,assert_near_v2,tensorflow/tensorflow/python/ops/check_ops.py,725,function,"Assert the condition `x` and `y` are close element-wise. This Op checks that `x[i] - y[i] < atol + rtol * tf.abs(y[i])` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If any elements of `x` and `y` are not close, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest representable positive number such that `1 + eps != 1`. This is about `1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`. See `numpy.finfo`. Args: x: Float or complex `Tensor`. y: Float or complex `Tensor`, same dtype as and broadcastable to `x`. rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`. The relative tolerance. Default is `10 * eps`. atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`. The absolute tolerance. Default is `10 * eps`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_near"". Returns: Op that raises `InvalidArgumentError` if `x` and `y` are not close enough. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x != y` is False for any pair of elements in `x` and `y`. The check can be performed immediately during eager execution or if `x` and `y` are statically known. @compatibility(numpy) Similar to `numpy.testing.assert_allclose`, except tolerance depends on data type. This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`, and even `16bit` data. @end_compatibility" 8337,assert_near,tensorflow/tensorflow/python/ops/check_ops.py,780,function,"Assert the condition `x` and `y` are close element-wise. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_near(x, y)]): output = tf.reduce_sum(x) ``` This condition holds if for every pair of (possibly broadcast) elements `x[i]`, `y[i]`, we have ```tf.abs(x[i] - y[i]) <= atol + rtol * tf.abs(y[i])```. If both `x` and `y` are empty, this is trivially satisfied. The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest representable positive number such that `1 + eps != 1`. This is about `1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`. See `numpy.finfo`. Args: x: Float or complex `Tensor`. y: Float or complex `Tensor`, same `dtype` as, and broadcastable to, `x`. rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`. The relative tolerance. Default is `10 * eps`. atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`. The absolute tolerance. Default is `10 * eps`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`, `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_near"". Returns: Op that raises `InvalidArgumentError` if `x` and `y` are not close enough. @compatibility(numpy) Similar to `numpy.testing.assert_allclose`, except tolerance depends on data type. This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`, and even `16bit` data. @end_compatibility" 8338,assert_less_v2,tensorflow/tensorflow/python/ops/check_ops.py,862,function,"Assert the condition `x < y` holds element-wise. This Op checks that `x[i] < y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not less than `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_less"". Returns: Op that raises `InvalidArgumentError` if `x < y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x < y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known." 8339,assert_less,tensorflow/tensorflow/python/ops/check_ops.py,899,function, 8340,assert_less_equal_v2,tensorflow/tensorflow/python/ops/check_ops.py,906,function,"Assert the condition `x <= y` holds element-wise. This Op checks that `x[i] <= y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not less or equal than `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_less_equal"". Returns: Op that raises `InvalidArgumentError` if `x <= y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x <= y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known." 8341,assert_less_equal,tensorflow/tensorflow/python/ops/check_ops.py,945,function, 8342,assert_greater_v2,tensorflow/tensorflow/python/ops/check_ops.py,952,function,"Assert the condition `x > y` holds element-wise. This Op checks that `x[i] > y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not greater than `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_greater"". Returns: Op that raises `InvalidArgumentError` if `x > y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x > y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known." 8343,assert_greater,tensorflow/tensorflow/python/ops/check_ops.py,990,function, 8344,assert_greater_equal_v2,tensorflow/tensorflow/python/ops/check_ops.py,997,function,"Assert the condition `x >= y` holds element-wise. This Op checks that `x[i] >= y[i]` holds for every pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is trivially satisfied. If `x` is not greater or equal to `y` element-wise, `message`, as well as the first `summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is raised. Args: x: Numeric `Tensor`. y: Numeric `Tensor`, same dtype as and broadcastable to `x`. message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to ""assert_greater_equal"". Returns: Op that raises `InvalidArgumentError` if `x >= y` is False. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x >= y` is False. The check can be performed immediately during eager execution or if `x` and `y` are statically known." 8345,assert_greater_equal,tensorflow/tensorflow/python/ops/check_ops.py,1037,function, 8346,_assert_rank_condition,tensorflow/tensorflow/python/ops/check_ops.py,1043,function,"Assert `x` has a rank that satisfies a given condition. Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. static_condition: A python function that takes `[actual_rank, given_rank]` and returns `True` if the condition is satisfied, `False` otherwise. dynamic_condition: An `op` that takes [actual_rank, given_rank] and return `True` if the condition is satisfied, `False` otherwise. data: The tensors to print out if the condition is false. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. Returns: Op raising `InvalidArgumentError` if `x` fails dynamic_condition. Raises: ValueError: If static checks determine `x` fails static_condition." 8347,assert_rank_v2,tensorflow/tensorflow/python/ops/check_ops.py,1093,function,"Assert that `x` has rank equal to `rank`. This Op checks that the rank of `x` is equal to `rank`. If `x` has a different rank, `message`, as well as the shape of `x` are printed, and `InvalidArgumentError` is raised. Args: x: `Tensor`. rank: Scalar integer `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_rank"". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank. If static checks determine `x` has correct rank, a `no_op` is returned. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and `x` does not have rank `rank`. The check can be performed immediately during eager execution or if the shape of `x` is statically known." 8348,assert_rank,tensorflow/tensorflow/python/ops/check_ops.py,1127,function,"Assert `x` has rank equal to `rank`. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_rank(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar integer `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and the shape of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_rank"". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank." 8349,assert_rank_at_least_v2,tensorflow/tensorflow/python/ops/check_ops.py,1190,function,"Assert that `x` has rank of at least `rank`. This Op checks that the rank of `x` is greater or equal to `rank`. If `x` has a rank lower than `rank`, `message`, as well as the shape of `x` are printed, and `InvalidArgumentError` is raised. Args: x: `Tensor`. rank: Scalar integer `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_rank_at_least"". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank or higher. If static checks determine `x` has correct rank, a `no_op` is returned. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: `x` does not have rank at least `rank`, but the rank cannot be statically determined. ValueError: If static checks determine `x` has mismatched rank." 8350,assert_rank_at_least,tensorflow/tensorflow/python/ops/check_ops.py,1225,function,"Assert `x` has rank equal to `rank` or higher. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_rank_at_least(x, 2)]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. rank: Scalar `Tensor`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_rank_at_least"". Returns: Op raising `InvalidArgumentError` unless `x` has specified rank or higher. If static checks determine `x` has correct rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has wrong rank." 8351,_static_rank_in,tensorflow/tensorflow/python/ops/check_ops.py,1289,function, 8352,_dynamic_rank_in,tensorflow/tensorflow/python/ops/check_ops.py,1293,function, 8353,_assert_ranks_condition,tensorflow/tensorflow/python/ops/check_ops.py,1303,function,"Assert `x` has a rank that satisfies a given condition. Args: x: Numeric `Tensor`. ranks: Scalar `Tensor`. static_condition: A python function that takes `[actual_rank, given_ranks]` and returns `True` if the condition is satisfied, `False` otherwise. dynamic_condition: An `op` that takes [actual_rank, given_ranks] and return `True` if the condition is satisfied, `False` otherwise. data: The tensors to print out if the condition is false. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. Returns: Op raising `InvalidArgumentError` if `x` fails dynamic_condition. Raises: ValueError: If static checks determine `x` fails static_condition." 8354,assert_rank_in_v2,tensorflow/tensorflow/python/ops/check_ops.py,1357,function,"Assert that `x` has a rank in `ranks`. This Op checks that the rank of `x` is in `ranks`. If `x` has a different rank, `message`, as well as the shape of `x` are printed, and `InvalidArgumentError` is raised. Args: x: `Tensor`. ranks: `Iterable` of scalar `Tensor` objects. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_rank_in"". Returns: Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`. If static checks determine `x` has matching rank, a `no_op` is returned. This can be used with `tf.control_dependencies` inside of `tf.function`s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: `x` does not have rank in `ranks`, but the rank cannot be statically determined. ValueError: If static checks determine `x` has mismatched rank." 8355,assert_rank_in,tensorflow/tensorflow/python/ops/check_ops.py,1391,function,"Assert `x` has rank in `ranks`. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_rank_in(x, (2, 4))]): output = tf.reduce_sum(x) ``` Args: x: Numeric `Tensor`. ranks: Iterable of scalar `Tensor` objects. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of `x`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_rank_in"". Returns: Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`. If static checks determine `x` has matching rank, a `no_op` is returned. Raises: ValueError: If static checks determine `x` has mismatched rank." 8356,assert_integer_v2,tensorflow/tensorflow/python/ops/check_ops.py,1454,function,"Assert that `x` is of integer dtype. If `x` has a non-integer type, `message`, as well as the dtype of `x` are printed, and `InvalidArgumentError` is raised. This can always be checked statically, so this method returns nothing. Args: x: A `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_integer"". Raises: TypeError: If `x.dtype` is not a non-quantized integer type." 8357,assert_integer,tensorflow/tensorflow/python/ops/check_ops.py,1476,function,"Assert that `x` is of integer dtype. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_integer(x)]): output = tf.reduce_sum(x) ``` Args: x: `Tensor` whose basetype is integer and is not quantized. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_integer"". Raises: TypeError: If `x.dtype` is anything other than non-quantized integer. Returns: A `no_op` that does nothing. Type can be determined statically." 8358,assert_type_v2,tensorflow/tensorflow/python/ops/check_ops.py,1515,function,"Asserts that the given `Tensor` is of the specified type. This can always be checked statically, so this method returns nothing. Args: tensor: A `Tensor` or `SparseTensor`. tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`, etc). message: A string to prefix to the default message. name: A name for this operation. Defaults to ""assert_type"" Raises: TypeError: If the tensor's data type doesn't match `tf_type`." 8359,assert_type,tensorflow/tensorflow/python/ops/check_ops.py,1536,function,"Statically asserts that the given `Tensor` is of the specified type. Args: tensor: A `Tensor` or `SparseTensor`. tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`, etc). message: A string to prefix to the default message. name: A name to give this `Op`. Defaults to ""assert_type"" Raises: TypeError: If the tensors data type doesn't match `tf_type`. Returns: A `no_op` that does nothing. Type can be determined statically." 8360,_dimension_sizes,tensorflow/tensorflow/python/ops/check_ops.py,1567,function,"Gets the dimension sizes of a tensor `x`. If a size can be determined statically it is returned as an integer, otherwise as a tensor. If `x` is a scalar it is treated as rank 1 size 1. Args: x: A `Tensor`. Returns: Dimension sizes." 8361,_symbolic_dimension_sizes,tensorflow/tensorflow/python/ops/check_ops.py,1598,function, 8362,_has_known_value,tensorflow/tensorflow/python/ops/check_ops.py,1606,function, 8363,_is_symbol_for_any_size,tensorflow/tensorflow/python/ops/check_ops.py,1616,function, 8364,assert_shapes_v2,tensorflow/tensorflow/python/ops/check_ops.py,1627,function,"Assert tensor shapes and dimension size relationships between tensors. This Op checks that a collection of tensors shape relationships satisfies given constraints. Example: >>> n = 10 >>> q = 3 >>> d = 7 >>> x = tf.zeros([n,q]) >>> y = tf.ones([n,d]) >>> param = tf.Variable([1.0, 2.0, 3.0]) >>> scalar = 1.0 >>> tf.debugging.assert_shapes([ ... (x, ('N', 'Q')), ... (y, ('N', 'D')), ... (param, ('Q',)), ... (scalar, ()), ... ]) >>> tf.debugging.assert_shapes([ ... (x, ('N', 'D')), ... (y, ('N', 'D')) ... ]) Traceback (most recent call last): ... ValueError: ... If `x`, `y`, `param` or `scalar` does not have a shape that satisfies all specified constraints, `message`, as well as the first `summarize` entries of the first encountered violating tensor are printed, and `InvalidArgumentError` is raised. Size entries in the specified shapes are checked against other entries by their __hash__, except: - a size entry is interpreted as an explicit size if it can be parsed as an integer primitive. - a size entry is interpreted as *any* size if it is None or '.'. If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates a variable number of outer dimensions of unspecified size, i.e. the constraint applies to the inner-most dimensions only. Scalar tensors and specified shapes of length zero (excluding the 'inner-most' prefix) are both treated as having a single dimension of size one. Args: shapes: dictionary with (`Tensor` to shape) items, or a list of (`Tensor`, shape) tuples. A shape must be an iterable. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of the violating tensor. summarize: Print this many entries of the tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_shapes"". Raises: ValueError: If static checks determine any shape constraint is violated." 8365,assert_shapes,tensorflow/tensorflow/python/ops/check_ops.py,1694,function,"Assert tensor shapes and dimension size relationships between tensors. This Op checks that a collection of tensors shape relationships satisfies given constraints. Example: >>> n = 10 >>> q = 3 >>> d = 7 >>> x = tf.zeros([n,q]) >>> y = tf.ones([n,d]) >>> param = tf.Variable([1.0, 2.0, 3.0]) >>> scalar = 1.0 >>> tf.debugging.assert_shapes([ ... (x, ('N', 'Q')), ... (y, ('N', 'D')), ... (param, ('Q',)), ... (scalar, ()), ... ]) >>> tf.debugging.assert_shapes([ ... (x, ('N', 'D')), ... (y, ('N', 'D')) ... ]) Traceback (most recent call last): ... ValueError: ... Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.assert_shapes(shapes)]): output = tf.matmul(x, y, transpose_a=True) ``` If `x`, `y`, `param` or `scalar` does not have a shape that satisfies all specified constraints, `message`, as well as the first `summarize` entries of the first encountered violating tensor are printed, and `InvalidArgumentError` is raised. Size entries in the specified shapes are checked against other entries by their __hash__, except: - a size entry is interpreted as an explicit size if it can be parsed as an integer primitive. - a size entry is interpreted as *any* size if it is None or '.'. If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates a variable number of outer dimensions of unspecified size, i.e. the constraint applies to the inner-most dimensions only. Scalar tensors and specified shapes of length zero (excluding the 'inner-most' prefix) are both treated as having a single dimension of size one. Args: shapes: A list of (`Tensor`, `shape`) tuples, wherein `shape` is the expected shape of `Tensor`. See the example code above. The `shape` must be an iterable. Each element of the iterable can be either a concrete integer value or a string that abstractly represents the dimension. For example, - `('N', 'Q')` specifies a 2D shape wherein the first and second dimensions of shape may or may not be equal. - `('N', 'N', 'Q')` specifies a 3D shape wherein the first and second dimensions are equal. - `(1, 'N')` specifies a 2D shape wherein the first dimension is exactly 1 and the second dimension can be any value. Note that the abstract dimension letters take effect across different tuple elements of the list. For example, `tf.debugging.assert_shapes([(x, ('N', 'A')), (y, ('N', 'B'))]` asserts that both `x` and `y` are rank-2 tensors and their first dimensions are equal (`N`). `shape` can also be a `tf.TensorShape`. data: The tensors to print out if the condition is False. Defaults to error message and first few entries of the violating tensor. summarize: Print this many entries of the tensor. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to ""assert_shapes"". Returns: Op raising `InvalidArgumentError` unless all shape constraints are satisfied. If static checks determine all constraints are satisfied, a `no_op` is returned. Raises: ValueError: If static checks determine any shape constraint is violated." 8366,_get_diff_for_monotonic_comparison,tensorflow/tensorflow/python/ops/check_ops.py,1949,function,Gets the difference x[1:] - x[:-1]. 8367,is_numeric_tensor,tensorflow/tensorflow/python/ops/check_ops.py,1969,function,"Returns `True` if the elements of `tensor` are numbers. Specifically, returns `True` if the dtype of `tensor` is one of the following: * `tf.float32` * `tf.float64` * `tf.int8` * `tf.int16` * `tf.int32` * `tf.int64` * `tf.uint8` * `tf.qint8` * `tf.qint32` * `tf.quint8` * `tf.complex64` Returns `False` if `tensor` is of a non-numeric type or if `tensor` is not a `tf.Tensor` object." 8368,is_non_decreasing,tensorflow/tensorflow/python/ops/check_ops.py,2001,function,"Returns `True` if `x` is non-decreasing. Elements of `x` are compared in row-major order. The tensor `[x[0],...]` is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`. If `x` has less than two elements, it is trivially non-decreasing. See also: `is_strictly_increasing` >>> x1 = tf.constant([1.0, 1.0, 3.0]) >>> tf.math.is_non_decreasing(x1) >>> x2 = tf.constant([3.0, 1.0, 2.0]) >>> tf.math.is_non_decreasing(x2) Args: x: Numeric `Tensor`. name: A name for this operation (optional). Defaults to ""is_non_decreasing"" Returns: Boolean `Tensor`, equal to `True` iff `x` is non-decreasing. Raises: TypeError: if `x` is not a numeric tensor." 8369,is_strictly_increasing,tensorflow/tensorflow/python/ops/check_ops.py,2043,function,"Returns `True` if `x` is strictly increasing. Elements of `x` are compared in row-major order. The tensor `[x[0],...]` is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`. If `x` has less than two elements, it is trivially strictly increasing. See also: `is_non_decreasing` >>> x1 = tf.constant([1.0, 2.0, 3.0]) >>> tf.math.is_strictly_increasing(x1) >>> x2 = tf.constant([3.0, 1.0, 2.0]) >>> tf.math.is_strictly_increasing(x2) Args: x: Numeric `Tensor`. name: A name for this operation (optional). Defaults to ""is_strictly_increasing"" Returns: Boolean `Tensor`, equal to `True` iff `x` is strictly increasing. Raises: TypeError: if `x` is not a numeric tensor." 8370,_assert_same_base_type,tensorflow/tensorflow/python/ops/check_ops.py,2077,function,"Asserts all items are of the same base type. Args: items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`, `Operation`, or `IndexedSlices`). Can include `None` elements, which will be ignored. expected_type: Expected type. If not specified, assert all items are of the same base type. Returns: Validated type, or none if neither expected_type nor items provided. Raises: ValueError: If any types do not match." 8371,assert_same_float_dtype,tensorflow/tensorflow/python/ops/check_ops.py,2129,function,"Validate and return float type based on `tensors` and `dtype`. For ops such as matrix multiplication, inputs and weights must be of the same float type. This function validates that all `tensors` are the same type, validates that type is `dtype` (if supplied), and returns the type. Type must be a floating point type. If neither `tensors` nor `dtype` is supplied, the function will return `dtypes.float32`. Args: tensors: Tensors of input values. Can include `None` elements, which will be ignored. dtype: Expected type. Returns: Validated type. Raises: ValueError: if neither `tensors` nor `dtype` is supplied, or result is not float, or the common type of the inputs is not a floating point type." 8372,assert_scalar_v2,tensorflow/tensorflow/python/ops/check_ops.py,2161,function,"Asserts that the given `tensor` is a scalar. This function raises `ValueError` unless it can be certain that the given `tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is unknown. This is always checked statically, so this method returns nothing. Args: tensor: A `Tensor`. message: A string to prefix to the default message. name: A name for this operation. Defaults to ""assert_scalar"" Raises: ValueError: If the tensor is not scalar (rank 0), or if its shape is unknown." 8373,assert_scalar,tensorflow/tensorflow/python/ops/check_ops.py,2185,function,"Asserts that the given `tensor` is a scalar (i.e. zero-dimensional). This function raises `ValueError` unless it can be certain that the given `tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is unknown. Args: tensor: A `Tensor`. name: A name for this operation. Defaults to ""assert_scalar"" message: A string to prefix to the default message. Returns: The input tensor (potentially converted to a `Tensor`). Raises: ValueError: If the tensor is not scalar (rank 0), or if its shape is unknown." 8374,ensure_shape,tensorflow/tensorflow/python/ops/check_ops.py,2219,function,"Updates the shape of a tensor and checks at runtime that the shape holds. For example: >>> @tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)]) ... def f(tensor): ... return tf.ensure_shape(tensor, [3, 3]) >>> >>> f(tf.zeros([3, 3])) # Passes >>> f([1, 2, 3]) # fails Traceback (most recent call last): ... InvalidArgumentError: Shape of tensor x [3] is not compatible with expected shape [3,3]. The above example raises `tf.errors.InvalidArgumentError`, because the shape (3,) is not compatible with the shape (None, 3, 3) With eager execution this is a shape assertion, that returns the input: >>> x = tf.constant([1,2,3]) >>> print(x.shape) (3,) >>> x = tf.ensure_shape(x, [3]) >>> x = tf.ensure_shape(x, [5]) Traceback (most recent call last): ... tf.errors.InvalidArgumentError: Shape of tensor dummy_input [3] is not compatible with expected shape [5]. [Op:EnsureShape] Inside a `tf.function` or `v1.Graph` context it checks both the buildtime and runtime shapes. This is stricter than `tf.Tensor.set_shape` which only checks the buildtime shape. Note: This differs from `tf.Tensor.set_shape` in that it sets the static shape of the resulting tensor and enforces it at runtime, raising an error if the tensor's runtime shape is incompatible with the specified shape. `tf.Tensor.set_shape` sets the static shape of the tensor without enforcing it at runtime, which may result in inconsistencies between the statically-known shape of tensors and the runtime value of tensors. For example, of loading images of a known size: >>> @tf.function ... def decode_image(png): ... image = tf.image.decode_png(png, channels=3) ... # the `print` executes during tracing. ... print(""Initial shape: "", image.shape) ... image = tf.ensure_shape(image,[28, 28, 3]) ... print(""Final shape: "", image.shape) ... return image When tracing a function, no ops are being executed, shapes may be unknown. See the [Concrete Functions Guide](https://www.tensorflow.org/guide/concrete_function) for details. >>> concrete_decode = decode_image.get_concrete_function( ... tf.TensorSpec([], dtype=tf.string)) Initial shape: (None, None, 3) Final shape: (28, 28, 3) >>> image = tf.random.uniform(maxval=255, shape=[28, 28, 3], dtype=tf.int32) >>> image = tf.cast(image,tf.uint8) >>> png = tf.image.encode_png(image) >>> image2 = concrete_decode(png) >>> print(image2.shape) (28, 28, 3) >>> image = tf.concat([image,image], axis=0) >>> print(image.shape) (56, 28, 3) >>> png = tf.image.encode_png(image) >>> image2 = concrete_decode(png) Traceback (most recent call last): ... tf.errors.InvalidArgumentError: Shape of tensor DecodePng [56,28,3] is not compatible with expected shape [28,28,3]. Caution: if you don't use the result of `tf.ensure_shape` the check may not run. >>> @tf.function ... def bad_decode_image(png): ... image = tf.image.decode_png(png, channels=3) ... # the `print` executes during tracing. ... print(""Initial shape: "", image.shape) ... # BAD: forgot to use the returned tensor. ... tf.ensure_shape(image,[28, 28, 3]) ... print(""Final shape: "", image.shape) ... return image >>> image = bad_decode_image(png) Initial shape: (None, None, 3) Final shape: (None, None, 3) >>> print(image.shape) (56, 28, 3) Args: x: A `Tensor`. shape: A `TensorShape` representing the shape of this tensor, a `TensorShapeProto`, a list, a tuple, or None. name: A name for this operation (optional). Defaults to ""EnsureShape"". Returns: A `Tensor`. Has the same type and contents as `x`. Raises: tf.errors.InvalidArgumentError: If `shape` is incompatible with the shape of `x`." 8375,_ensure_shape_grad,tensorflow/tensorflow/python/ops/check_ops.py,2340,function, 8376,clip_by_value,tensorflow/tensorflow/python/ops/clip_ops.py,38,function,"Clips tensor values to a specified min and max. Given a tensor `t`, this operation returns a tensor of the same type and shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. Any values less than `clip_value_min` are set to `clip_value_min`. Any values greater than `clip_value_max` are set to `clip_value_max`. Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for correct results. For example: Basic usage passes a scalar as the min and max value. >>> t = tf.constant([[-10., -1., 0.], [0., 2., 10.]]) >>> t2 = tf.clip_by_value(t, clip_value_min=-1, clip_value_max=1) >>> t2.numpy() array([[-1., -1., 0.], [ 0., 1., 1.]], dtype=float32) The min and max can be the same size as `t`, or broadcastable to that size. >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]]) >>> clip_min = [[2],[1]] >>> t3 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100) >>> t3.numpy() array([[ 2., 2., 10.], [ 1., 1., 10.]], dtype=float32) Broadcasting fails, intentionally, if you would expand the dimensions of `t` >>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]]) >>> clip_min = [[[2, 1]]] # Has a third axis >>> t4 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100) Traceback (most recent call last): ... InvalidArgumentError: Incompatible shapes: [2,3] vs. [1,1,2] It throws a `TypeError` if you try to clip an `int` to a `float` value (`tf.cast` the input to `float` first). >>> t = tf.constant([[1, 2], [3, 4]], dtype=tf.int32) >>> t5 = tf.clip_by_value(t, clip_value_min=-3.1, clip_value_max=3.1) Traceback (most recent call last): ... TypeError: Cannot convert ... Args: t: A `Tensor` or `IndexedSlices`. clip_value_min: The minimum value to clip to. A scalar `Tensor` or one that is broadcastable to the shape of `t`. clip_value_max: The maximum value to clip to. A scalar `Tensor` or one that is broadcastable to the shape of `t`. name: A name for the operation (optional). Returns: A clipped `Tensor` or `IndexedSlices`. Raises: `tf.errors.InvalidArgumentError`: If the clip tensors would trigger array broadcasting that would make the returned tensor larger than the input. TypeError: If dtype of the input is `int32` and dtype of the `clip_value_min` or `clip_value_max` is `float32`" 8377,_clip_by_value_grad,tensorflow/tensorflow/python/ops/clip_ops.py,130,function,Returns grad of clip_by_value. 8378,clip_by_norm,tensorflow/tensorflow/python/ops/clip_ops.py,156,function,"Clips tensor values to a maximum L2-norm. Given a tensor `t`, and a maximum clip value `clip_norm`, this operation normalizes `t` so that its L2-norm is less than or equal to `clip_norm`, along the dimensions given in `axes`. Specifically, in the default case where all dimensions are used for calculation, if the L2-norm of `t` is already less than or equal to `clip_norm`, then `t` is not modified. If the L2-norm is greater than `clip_norm`, then this operation returns a tensor of the same type and shape as `t` with its values set to: `t * clip_norm / l2norm(t)` In this case, the L2-norm of the output tensor is `clip_norm`. As another example, if `t` is a matrix and `axes == [1]`, then each row of the output will have L2-norm less than or equal to `clip_norm`. If `axes == [0]` instead, each column of the output will be clipped. Code example: >>> some_nums = tf.constant([[1, 2, 3, 4, 5]], dtype=tf.float32) >>> tf.clip_by_norm(some_nums, 2.0).numpy() array([[0.26967996, 0.5393599 , 0.80903983, 1.0787199 , 1.3483998 ]], dtype=float32) This operation is typically used to clip gradients before applying them with an optimizer. Most gradient data is a collection of different shaped tensors for different parts of the model. Thus, this is a common usage: ``` # Get your gradients after training loss_value, grads = grad(model, features, labels) # Apply some clipping grads = [tf.clip_by_norm(g, norm) for g in grads] # Continue on with training optimizer.apply_gradients(grads) ``` Args: t: A `Tensor` or `IndexedSlices`. This must be a floating point type. clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value, also floating point axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions to use for computing the L2-norm. If `None` (the default), uses all dimensions. name: A name for the operation (optional). Returns: A clipped `Tensor` or `IndexedSlices`. Raises: ValueError: If the clip_norm tensor is not a 0-D scalar tensor. TypeError: If dtype of the input is not a floating point or complex type." 8379,global_norm,tensorflow/tensorflow/python/ops/clip_ops.py,241,function,"Computes the global norm of multiple tensors. Given a tuple or list of tensors `t_list`, this operation returns the global norm of the elements in all tensors in `t_list`. The global norm is computed as: `global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))` Any entries in `t_list` that are of type None are ignored. Args: t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None. name: A name for the operation (optional). Returns: A 0-D (scalar) `Tensor` of type `float`. Raises: TypeError: If `t_list` is not a sequence." 8380,clip_by_global_norm,tensorflow/tensorflow/python/ops/clip_ops.py,291,function,"Clips values of multiple tensors by the ratio of the sum of their norms. Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`, this operation returns a list of clipped tensors `list_clipped` and the global norm (`global_norm`) of all tensors in `t_list`. Optionally, if you've already computed the global norm for `t_list`, you can specify the global norm with `use_norm`. To perform the clipping, the values `t_list[i]` are set to: t_list[i] * clip_norm / max(global_norm, clip_norm) where: global_norm = sqrt(sum([l2norm(t)**2 for t in t_list])) If `clip_norm > global_norm` then the entries in `t_list` remain as they are, otherwise they're all shrunk by the global ratio. If `global_norm == infinity` then the entries in `t_list` are all set to `NaN` to signal that an error occurred. Any of the entries of `t_list` that are of type `None` are ignored. This is the correct way to perform gradient clipping (Pascanu et al., 2012). However, it is slower than `clip_by_norm()` because all the parameters must be ready before the clipping operation can be performed. Args: t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None. clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio. use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global norm to use. If not provided, `global_norm()` is used to compute the norm. name: A name for the operation (optional). Returns: list_clipped: A list of `Tensors` of the same type as `list_t`. global_norm: A 0-D (scalar) `Tensor` representing the global norm. Raises: TypeError: If `t_list` is not a sequence. References: On the difficulty of training Recurrent Neural Networks: [Pascanu et al., 2012](http://proceedings.mlr.press/v28/pascanu13.html) ([pdf](http://proceedings.mlr.press/v28/pascanu13.pdf))" 8381,clip_by_average_norm,tensorflow/tensorflow/python/ops/clip_ops.py,389,function,"Clips tensor values to a maximum average L2-norm. Given a tensor `t`, and a maximum clip value `clip_norm`, this operation normalizes `t` so that its average L2-norm is less than or equal to `clip_norm`. Specifically, if the average L2-norm is already less than or equal to `clip_norm`, then `t` is not modified. If the average L2-norm is greater than `clip_norm`, then this operation returns a tensor of the same type and shape as `t` with its values set to: `t * clip_norm / l2norm_avg(t)` In this case, the average L2-norm of the output tensor is `clip_norm`. This operation is typically used to clip gradients before applying them with an optimizer. Args: t: A `Tensor`. clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value. name: A name for the operation (optional). Returns: A clipped `Tensor`." 8382,ClipOpsTest,tensorflow/tensorflow/python/ops/clip_ops_test.py,31,class, 8383,KMeans,tensorflow/tensorflow/python/ops/clustering_ops.py,56,class,Creates the graph for k-means clustering. 8384,_InitializeClustersOpFactory,tensorflow/tensorflow/python/ops/clustering_ops.py,538,class,"Internal class to create the op to initialize the clusters. The op performs this algorithm (see constructor args): num_remaining = num_clusters - length(cluster_centers) if num_remaining == 0: assert that cluster_centers_initialized is true else: assert that num_remaining > 0 new_centers = choose up to num_remaining initial centers l2-normalize new_centers if using cosine distance all_centers = concat(cluster_centers, new_centers) cluster_centers := all_centers if there is a cluster_centers_updated variable: cluster_centers_updated := cluster_centers num_now_remaining = num_clusters - length(cluster_centers) if num_now_remaining == 0: cluster_centers_initialized := true" 8385,KmeansPlusPlusInitializationTest,tensorflow/tensorflow/python/ops/clustering_ops_test.py,29,class, 8386,KMC2InitializationTest,tensorflow/tensorflow/python/ops/clustering_ops_test.py,60,class, 8387,KMC2InitializationLargeTest,tensorflow/tensorflow/python/ops/clustering_ops_test.py,80,class, 8388,KMC2InitializationCornercaseTest,tensorflow/tensorflow/python/ops/clustering_ops_test.py,103,class, 8389,NearestCentersTest,tensorflow/tensorflow/python/ops/clustering_ops_test.py,121,class, 8390,NearestCentersLargeTest,tensorflow/tensorflow/python/ops/clustering_ops_test.py,152,class, 8391,all_reduce,tensorflow/tensorflow/python/ops/collective_ops.py,23,function,"Reduces tensors collectively, across devices. Args: t: the tensor to be reduced. group_size: the total number of tensors to be collectively reduced. Each must reside on a different device. Should be a positive integer. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. merge_op: string naming the binary Op to be applied to compute each partial reduction. final_op: string naming the unary Op to be applied to each fully reduced value. Can be 'Id' for no operation. subdiv_offsets: a list of integer offsets into the tensor at which each independent subdivision should begin. Use [0] if no subdivision should be done. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include `auto`, `ring`, and `nccl`. timeout: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the distributed reduction. Raises: ValueError: if any of the input parameter constraints are not met." 8392,all_gather,tensorflow/tensorflow/python/ops/collective_ops.py,74,function,"Accumulates tensors collectively, across devices, along first dimension. Args: t: the tensor to participate in the accumulation. group_size: the total number of tensors to be collectively accumulated. Each must reside on a different device. Should be a positive integer. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include `auto`, `ring`, and `nccl`. timeout: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the distributed operation. Raises: ValueError: if any of the input parameter constraints are not met." 8393,broadcast_send,tensorflow/tensorflow/python/ops/collective_ops.py,113,function,"Broadcasts one tensor to a group of others, across devices. Args: t: the tensor to be sent. shape: the shape of the tensor being sent, which must agree with t. dtype: the type of the tensor being sent, which must agree with t. group_size: one plus the number of receiving tensors, i.e. the total number of devices participating. Each tensor must reside on a different device. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include `auto`, `ring`, and `nccl`. timeout: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the distributed broadcast send. Raises: ValueError: if any of the input parameter constraints are not met. Note that the shape and dtype arguments appear redundant since they should be obtainable from t. The are two reasons for including them. First, the shape and type of tensors passed via broadcast must be known ahead of time in their most specific form so that the receive side can allocate memory for the operation and shape/type inference can carry forward from there. Including the same declarations on the send side clarifies a commitment already made. Secondly, having nearly identical use syntax for send and receive sides may simplify tool-driven generation of broadcast." 8394,broadcast_recv,tensorflow/tensorflow/python/ops/collective_ops.py,174,function,"Receives a broadcasts tensor, across devices. Args: shape: Shape of the tensor to be received. dtype: Type of the tensor to be received. group_size: one plus the number of receiving tensors, i.e. the total number of devices participating. Each tensor must reside on a different device. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include `auto`, `ring`, and `nccl`. timeout: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the broadcast receive. Raises: ValueError: if any of the input parameter constraints are not met." 8395,CollectiveOpBenchmark,tensorflow/tensorflow/python/ops/collective_ops_benchmark.py,33,class,Benchmarks for local CPU collective op execution. 8396,CollectiveOpGPUTest,tensorflow/tensorflow/python/ops/collective_ops_gpu_test.py,37,class, 8397,CollectiveOpTest,tensorflow/tensorflow/python/ops/collective_ops_test.py,46,class, 8398,CollectiveOpXlaTest,tensorflow/tensorflow/python/ops/collective_ops_xla_test.py,31,class, 8399,build_graph,tensorflow/tensorflow/python/ops/concat_benchmark.py,35,function,"Build a graph containing a sequence of concat operations. Args: device: string, the device to run on. input_shape: shape of the input tensors. variable: whether or not to randomize the input shape num_inputs: the number of inputs to concat axis: axis to be concat'ed grad: if True compute the gradient Returns: An array of tensors to run()" 8400,ConcatBenchmark,tensorflow/tensorflow/python/ops/concat_benchmark.py,78,class,Benchmark concat. 8401,cond_v2,tensorflow/tensorflow/python/ops/cond_v2.py,59,function,"Like tf.cond, except emits a single If op." 8402,_IfGrad,tensorflow/tensorflow/python/ops/cond_v2.py,107,function,The gradient of an If op produced by cond_v2. 8403,_build_cond,tensorflow/tensorflow/python/ops/cond_v2.py,192,function,"Creates an If op from the specified predicate, branch functions and inputs. Note that this modifies true_graph and false_graph to make the inputs match, and to output all intermediates values so they're available for the gradient computation. true_graph and false_graph need not have the same input types, but they must have the same outpute types. Args: pred: boolean Tensor true_graph: FuncGraph false_graph: FuncGraph true_inputs: a list of Tensors to be passed to true_graph as input. false_inputs: a list of Tensors to be passed to false_graph as input. building_gradient: Whether this is a gradient If op. name: the name for the If op. Returns: A list of Tensors which are the outputs of the If op. Does not include added intermediate outputs." 8404,get_func_graphs,tensorflow/tensorflow/python/ops/cond_v2.py,301,function,"Returns `FuncGraph`s for the input op branches. Args: op: The If or Case Operation. Returns: A tuple of the `FuncGraph`s of the then_branch and else_branch (all branches for Case)." 8405,_grad_fn,tensorflow/tensorflow/python/ops/cond_v2.py,341,function,"The gradient function for each conditional branch. This function builds the gradient graph of the corresponding forward-pass conditional branch in `func_graph`. This is done by differentiating func_graph's outputs w.r.t. its inputs. Args: func_graph: FuncGraph. The corresponding forward-pass function. grads: The list of input gradient Tensors. Returns: The output gradient Tensors." 8406,_create_grad_func,tensorflow/tensorflow/python/ops/cond_v2.py,379,function,Returns the FuncGraph representation of _grad_fn. 8407,_resolve_grad_inputs,tensorflow/tensorflow/python/ops/cond_v2.py,387,function,"Returns the tensors to pass as inputs to `grad_graph`. The `grad_graph` may have external references to 1. Its outer graph containing the input gradients. These references are kept as is. 2. Tensors in the forward pass graph. These tensors may not be ""live"" when the gradient is being computed. We replace such references by their corresponding tensor in `cond_graph.outer_graph`. In the case of nested control flow or functions, the gradient logic handling `grad_graph.outer_graph` will make sure the tensor from `cond_graph.outer_graph` is also correctly captured. Args: cond_graph: FuncGraph. The forward-pass function. grad_graph: FuncGraph. The gradients function. Returns: A list of inputs tensors to be passed to grad_graph." 8408,_get_intermediates,tensorflow/tensorflow/python/ops/cond_v2.py,439,function,Returns intermediate tensors of `func_graph` for gradient computation. 8409,_make_intermediates_match,tensorflow/tensorflow/python/ops/cond_v2.py,455,function,"Returns new optionals lists that have matching signatures. This is done by mirroring each list in the other using none optionals. There is no merging of like optionals. Args: branch_graphs: `list` of `FuncGraph`. branch_optionals: `list` of `list`s of optional `Tensor`s from other branch_graphs Returns: A `list` of `list`s of `Tensor`s for each branch_graph. Each list has the same number of `Tensor`s, all of which will be optionals of the same shape/type." 8410,_make_intermediates_match_xla,tensorflow/tensorflow/python/ops/cond_v2.py,482,function,Like _make_intermediates_match but for the XLA case. 8411,_make_inputs_match,tensorflow/tensorflow/python/ops/cond_v2.py,497,function,"Modifies branch_graphs so they have the same input signature. This method reorders and/or adds parameters to each graph in branch_graphs so they have the same input signature, and updates the 'inputs' and 'captured' fields of each graph accordingly. It uses the input tensors from the outer graph to avoid duplicating shared arguments. Args: branch_graphs: a `list` of `FuncGraph` branch_inputs: a `list` of `list`s of `Tensor`s in the outer graph. The inputs for the corresponding graph in `branch_graphs`. Returns: A new list of Tensors from the outer graph that are the new inputs for each branch_graph. This is a deduped version of `sum(branch_inputs)`." 8412,_create_zeros_for_none_grads,tensorflow/tensorflow/python/ops/cond_v2.py,542,function,"Creates zeros for None out grads if atleast one branch has non-None grad. Args: forward_graphs: List of forward FuncGraphs. grad_graphs: List of grad FuncGraphs." 8413,_make_output_composite_tensors_match,tensorflow/tensorflow/python/ops/cond_v2.py,570,function,"Modifies each branch_graph's outputs to have the same output signature. Currently the only transformation implemented is turning a Tensor into an equivalent IndexedSlices if the other branch returns an IndexedSlices. Updates branch_graph.{outputs,structured_outputs} for each branch_graph in branch_graphs. Args: op_type: _COND or _CASE branch_graphs: `list` of `FuncGraph` Raises: TypeError: if a set of outputs cannot be rewritten." 8414,_make_indexed_slices_indices_types_match,tensorflow/tensorflow/python/ops/cond_v2.py,620,function,Match dtype of IndexedSlices.indices in outputs of branch_graphs. 8415,_get_op_and_outputs,tensorflow/tensorflow/python/ops/cond_v2.py,684,function, 8416,_pack_sequence_as,tensorflow/tensorflow/python/ops/cond_v2.py,693,function,"Packs the outputs of the gradient If/Case op. The branch functions may contain None's in the list of `structured_outputs`. `op_outputs` has those outputs missing. So we need to add those Nones to the list of `op_outputs` and then pack it in the same structure as `structured_outputs`. Args: structured_outputs: structured_outputs from one of the branch functions. op_outputs: List of output tensors of the op. Returns: `op_outputs` packed like `structured_outputs`." 8417,_wrap_intermediates,tensorflow/tensorflow/python/ops/cond_v2.py,720,function, 8418,_create_dummy_input,tensorflow/tensorflow/python/ops/cond_v2.py,725,function,"Creates tensors in func_graph to represent template_tensors. Args: func_graph: FuncGraph. template_tensor: a tensor in the outer graph. Returns: A tensor in func_graph." 8419,_create_none_optionals,tensorflow/tensorflow/python/ops/cond_v2.py,740,function,"Creates `n` `None` optionals in func_graph. Args: func_graph: FuncGraph. n: `int` the number of `None` optionals to make. Returns: A list of tensors in func_graph." 8420,_create_fakeparams,tensorflow/tensorflow/python/ops/cond_v2.py,754,function,Create FakeParams for the XLA case. 8421,_check_same_outputs,tensorflow/tensorflow/python/ops/cond_v2.py,761,function,Raises an error if `graphs` have different outputs. 8422,_get_output_shapes,tensorflow/tensorflow/python/ops/cond_v2.py,805,function, 8423,verify_captures,tensorflow/tensorflow/python/ops/cond_v2.py,815,function,Verify that a branch's tensor is not accessed in another branch fn. 8424,_CondGradFuncGraph,tensorflow/tensorflow/python/ops/cond_v2.py,834,class,"FuncGraph for the gradient function of the branch of an If op. Handles wrapping and unwrapping intermediate values that are captured by the gradient computation in optionals. Attributes: op_needs_rewrite: True if any intermediates were captured, meaning the forward If op needs to be written to output the wrapped intermediates." 8425,indexed_case,tensorflow/tensorflow/python/ops/cond_v2.py,946,function,"Like conv_v2, except emits a Case op instead of an If." 8426,_CaseGrad,tensorflow/tensorflow/python/ops/cond_v2.py,988,function,The gradient of a Case op produced by tf.switch_case. 8427,_build_case,tensorflow/tensorflow/python/ops/cond_v2.py,1081,function,"Creates an `Case` op from `branch_index`, branch graphs and inputs. Note that this modifies `branch_graphs` to make the inputs match, and to output all intermediates values so they're available for the gradient computation. `branch_graphs` need not have the same input types, but they must have the same outpute types. Args: branch_index: integer Tensor branch_graphs: List of FuncGraph branch_inputs: List of lists of Tensors to be passed to corresponding branch_graph as input. name: the name for the Case op. lower_using_switch_merge: Lower this op using switch merge ops (optional). Returns: A list of Tensors which are the outputs of the Case op. Does not include added intermediate outputs." 8428,_set_read_only_resource_inputs_attr,tensorflow/tensorflow/python/ops/cond_v2.py,1146,function,"Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. Args: op: If or Case Operation. branch_graphs: List of branch FuncGraphs." 8429,remove_squeezable_dimensions,tensorflow/tensorflow/python/ops/confusion_matrix.py,34,function,"Squeeze last dim if ranks differ from expected by exactly 1. In the common case where we expect shapes to match, `expected_rank_diff` defaults to 0, and we squeeze the last dimension of the larger rank if they differ by 1. But, for example, if `labels` contains class IDs and `predictions` contains 1 probability per class, we expect `predictions` to have 1 more dimension than `labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze `labels` if `rank(predictions) - rank(labels) == 0`, and `predictions` if `rank(predictions) - rank(labels) == 2`. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: labels: Label values, a `Tensor` whose dimensions match `predictions`. predictions: Predicted values, a `Tensor` of arbitrary dimensions. expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`. name: Name of the op. Returns: Tuple of `labels` and `predictions`, possibly with last dim squeezed." 8430,confusion_matrix,tensorflow/tensorflow/python/ops/confusion_matrix.py,98,function,"Computes the confusion matrix from predictions and labels. The matrix columns represent the prediction labels and the rows represent the real labels. The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid labels for a given classification task. Both prediction and labels must be 1-D arrays of the same shape in order for this function to work. If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in either predictions or labels. Class labels are expected to start at 0. For example, if `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. If `weights` is not `None`, then each prediction contributes its corresponding weight to the total value of the confusion matrix cell. For example: ```python tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==> [[0 0 0 0 0] [0 0 1 0 0] [0 0 1 0 0] [0 0 0 0 0] [0 0 0 0 1]] ``` Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`, resulting in a 5x5 confusion matrix. Args: labels: 1-D `Tensor` of real labels for the classification task. predictions: 1-D `Tensor` of predictions for a given classification. num_classes: The possible number of labels the classification task can have. If this value is not provided, it will be calculated using both predictions and labels array. weights: An optional `Tensor` whose shape matches `predictions`. dtype: Data type of the confusion matrix. name: Scope name. Returns: A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion matrix, where `n` is the number of possible labels in the classification task. Raises: ValueError: If both predictions and labels are not 1-D vectors and have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`." 8431,confusion_matrix_v1,tensorflow/tensorflow/python/ops/confusion_matrix.py,209,function,"Computes the confusion matrix from predictions and labels. The matrix columns represent the prediction labels and the rows represent the real labels. The confusion matrix is always a 2-D array of shape `[n, n]`, where `n` is the number of valid labels for a given classification task. Both prediction and labels must be 1-D arrays of the same shape in order for this function to work. If `num_classes` is `None`, then `num_classes` will be set to one plus the maximum value in either predictions or labels. Class labels are expected to start at 0. For example, if `num_classes` is 3, then the possible labels would be `[0, 1, 2]`. If `weights` is not `None`, then each prediction contributes its corresponding weight to the total value of the confusion matrix cell. For example: ```python tf.math.confusion_matrix([1, 2, 4], [2, 2, 4]) ==> [[0 0 0 0 0] [0 0 1 0 0] [0 0 1 0 0] [0 0 0 0 0] [0 0 0 0 1]] ``` Note that the possible labels are assumed to be `[0, 1, 2, 3, 4]`, resulting in a 5x5 confusion matrix. Args: labels: 1-D `Tensor` of real labels for the classification task. predictions: 1-D `Tensor` of predictions for a given classification. num_classes: The possible number of labels the classification task can have. If this value is not provided, it will be calculated using both predictions and labels array. dtype: Data type of the confusion matrix. name: Scope name. weights: An optional `Tensor` whose shape matches `predictions`. Returns: A `Tensor` of type `dtype` with shape `[n, n]` representing the confusion matrix, where `n` is the number of possible labels in the classification task. Raises: ValueError: If both predictions and labels are not 1-D vectors and have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`." 8432,_SwitchGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,35,function,"Gradients for a Switch op is calculated using a Merge op. If the switch is a loop switch, it will be visited twice. We create the merge on the first visit, and update the other input of the merge on the second visit. A next_iteration is also added on second visit." 8433,_MergeGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,96,function,Gradients for a Merge op are calculated using a Switch op. 8434,_RefMergeGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,142,function, 8435,_ExitGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,147,function,Gradients for an exit op are calculated using an Enter op. 8436,_NextIterationGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,189,function,"A forward next_iteration is translated into a backprop identity. Note that the backprop next_iteration is added in switch grad." 8437,_RefNextIterationGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,198,function, 8438,_EnterGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,203,function,"Gradients for an Enter are calculated using an Exit op. For loop variables, grad is the gradient so just add an exit. For loop invariants, we need to add an accumulator loop." 8439,_RefEnterGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,238,function, 8440,_LoopCondGrad,tensorflow/tensorflow/python/ops/control_flow_grad.py,243,function,Stop backprop for the predicate of a while loop. 8441,_summarize_eager,tensorflow/tensorflow/python/ops/control_flow_ops.py,80,function,"Returns a summarized string representation of eager `tensor`. Args: tensor: EagerTensor to summarize summarize: Include these many first elements of `array`" 8442,Assert,tensorflow/tensorflow/python/ops/control_flow_ops.py,117,function,"Asserts that the given condition is true. If `condition` evaluates to false, print the list of tensors in `data`. `summarize` determines how many entries of the tensors to print. Args: condition: The condition to evaluate. data: The tensors to print out when condition is false. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Returns: assert_op: An `Operation` that, when executed, raises a `tf.errors.InvalidArgumentError` if `condition` is not true. @compatibility(eager) returns None @end_compatibility Raises: @compatibility(TF1) When in TF V1 mode (that is, outside `tf.function`) Assert needs a control dependency on the output to ensure the assertion executes: ```python # Ensure maximum element of x is smaller or equal to 1 assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x]) with tf.control_dependencies([assert_op]): ... code using x ... ``` @end_compatibility" 8443,_Identity,tensorflow/tensorflow/python/ops/control_flow_ops.py,181,function,"Return a tensor with the same shape and contents as the input tensor. Args: data: A Tensor. name: A name for this operation (optional). Returns: A Tensor with the same type and value as the input Tensor." 8444,_NextIteration,tensorflow/tensorflow/python/ops/control_flow_ops.py,203,function, 8445,_Enter,tensorflow/tensorflow/python/ops/control_flow_ops.py,216,function,"Creates or finds a child frame, and makes `data` available to it. The unique `frame_name` is used by the `Executor` to identify frames. If `is_constant` is true, `data` is a constant in the child frame; otherwise it may be changed in the child frame. At most `parallel_iterations` iterations are run in parallel in the child frame. Args: data: The tensor to be made available to the child frame. frame_name: The name of the child frame. is_constant: If true, the output is constant within the child frame. parallel_iterations: The number of iterations allowed to run in parallel. use_ref: If true, use ref_enter if data is of ref type. use_input_shape: If true, set the result's shape based on data's shape. name: A name for this operation (optional). Returns: The same tensor as `data`." 8446,exit,tensorflow/tensorflow/python/ops/control_flow_ops.py,264,function,"Exits the current frame to its parent frame. Exit makes its input `data` available to the parent frame. Args: data: The tensor to be made available to the parent frame. name: A name for this operation (optional). Returns: The same tensor as `data`." 8447,switch,tensorflow/tensorflow/python/ops/control_flow_ops.py,288,function,"Forwards `data` to an output determined by `pred`. If `pred` is false, the `data` input is forwarded to the first output. Otherwise, the data goes to the second output. This op handles `Tensor`s and `IndexedSlices`. Args: data: The tensor to be forwarded to the appropriate output. pred: A scalar that specifies which output port will receive data. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: A name for this operation (optional). Returns: `(output_false, output_true)`: If `pred` is true, data will be forwarded to `output_true`, otherwise it goes to `output_false`." 8448,_SwitchRefOrTensor,tensorflow/tensorflow/python/ops/control_flow_ops.py,323,function,"Forwards `data` to an output determined by `pred`. If `pred` is false, the `data` input is forwarded to the first output. Otherwise, the data goes to the second output. This op handles `Tensor`s and `IndexedSlices`. Args: data: The tensor to be forwarded to the appropriate output. pred: A scalar that specifies which output port will receive data. name: A name for this operation (optional). Returns: `(output_false, output_true)`: If `pred` is true, data will be forwarded to `output_true`, otherwise it goes to `output_false`. Raises: TypeError: if data is not a Tensor or IndexedSlices" 8449,merge,tensorflow/tensorflow/python/ops/control_flow_ops.py,367,function,"Returns the value of an available element of `inputs`. This op tests each of the tensors in `inputs` in turn to determine if any of them is available. If it finds an available tensor, it returns it and its index in `inputs`. It is an error if more than one tensor in `inputs` is available. If no tensor in `inputs` is available, the returned tensor and index are not set. This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of `Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices before merging. Args: inputs: The input tensors, at most one of which is available. name: A name for this operation (optional). Returns: A tuple containing the chosen input tensor and its index in `inputs`. Raises: ValueError: If any of the inputs is None, or inputs are IndexedSlices and some but not all have a dense_shape property." 8450,_convert_tensorarray_to_flow,tensorflow/tensorflow/python/ops/control_flow_ops.py,432,function, 8451,_convert_flows_to_tensorarrays,tensorflow/tensorflow/python/ops/control_flow_ops.py,439,function, 8452,_ShapeLessThanOrEqual,tensorflow/tensorflow/python/ops/control_flow_ops.py,451,function, 8453,_get_shape_invariant,tensorflow/tensorflow/python/ops/control_flow_ops.py,462,function,"Returns shape invariant(s) for the given variable. Args: var: The tensor whose shape is described. shape: The shape invariant for the tensor. If not specified, then a default shape invariant for `var` is returned. Returns: `TensorShape` or `list` of `TensorShape`: The shape invariant for `var` (if it is a `Tensor`), or the shape invariants for the components that comprise `var` (if it is a `CompositeTensor`)." 8454,_shape_invariant_to_type_spec,tensorflow/tensorflow/python/ops/control_flow_ops.py,497,function,"Converts a shape invariant to a TypeSpec. Args: var: The tensor whose shape is described by the shape invariant. shape: A `TypeSpec` or `TensorShape`. If `shape` is already a `TypeSpec`, then it is simply returned as-is. Returns: A `TypeSpec` for `var`, consistent with the given shape." 8455,_SetShapeInvariants,tensorflow/tensorflow/python/ops/control_flow_ops.py,535,function,"Set the shapes of the tensors in `enter_vars` to `shapes`. Args: input_vars: A list of tensors that are inputs to `enter_vars`. enter_vars: A list of tensors whose shapes will be set. shapes: A (possibly nested) list of shapes. Raises: ValueError: If any tensor in `enter_vars` has a less specific shape than its corresponding shape in `shapes`." 8456,_EnforceShapeInvariant,tensorflow/tensorflow/python/ops/control_flow_ops.py,567,function,"Check if the shapes of the loops variables are invariants. Args: merge_var: The list of tensors representing the initial values of the loop variables. next_var: The list of tensors representing the values of the loop variables after one loop iteration. Raises: ValueError: If any tensor in `merge_var` has a more specific shape than its corresponding tensor in `next_var`." 8457,_AddNextAndBackEdge,tensorflow/tensorflow/python/ops/control_flow_ops.py,596,function,Add NextIteration and back edge from v to m. 8458,ControlFlowContext,tensorflow/tensorflow/python/ops/control_flow_ops.py,624,class,"The base class for control flow context. The usage pattern is a sequence of (Enter, Exit) followed by a final ExitResult. We maintain the following state for control flow contexts during graph construction: 1. graph has _control_flow_context: the current context used to construct new nodes. Changed by ctxt.Enter() and ctxt.Exit() 2. op has _control_flow_context: the context to which the op belongs. Set at the time the op is created. Immutable. 3. A ControlFlowContext has _outer_context: the context in which this context is created. Set at the time a context is created. Immutable. 4. A ControlFlowContext has _context_stack. Pushed and popped by ctxt.Enter() and ctxt.Exit()" 8459,CondContext,tensorflow/tensorflow/python/ops/control_flow_ops.py,813,class,The context for the conditional construct. 8460,_UnpackIfSingleton,tensorflow/tensorflow/python/ops/control_flow_ops.py,1091,function, 8461,cond,tensorflow/tensorflow/python/ops/control_flow_ops.py,1105,function,"Return `true_fn()` if the predicate `pred` is true else `false_fn()`. `true_fn` and `false_fn` both return lists of output tensors. `true_fn` and `false_fn` must have the same non-zero number and type of outputs. **WARNING**: Any Tensors or Operations created outside of `true_fn` and `false_fn` will be executed regardless of which branch is selected at runtime. Although this behavior is consistent with the dataflow model of TensorFlow, it has frequently surprised users who expected a lazier semantics. Consider the following simple program: ```python z = tf.multiply(a, b) result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y)) ``` If `x < y`, the `tf.add` operation will be executed and `tf.square` operation will not be executed. Since `z` is needed for at least one branch of the `cond`, the `tf.multiply` operation is always executed, unconditionally. Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the call to `cond`, and not at all during `Session.run()`). `cond` stitches together the graph fragments created during the `true_fn` and `false_fn` calls with some additional graph nodes to ensure that the right branch gets executed depending on the value of `pred`. `tf.cond` supports nested structures as implemented in `tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. Singleton lists and tuples form the only exceptions to this: when returned by `true_fn` and/or `false_fn`, they are implicitly unpacked to single values. This behavior is disabled by passing `strict=True`. Args: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. strict: A boolean that enables/disables 'strict' mode; see above. name: Optional name prefix for the returned tensors. Returns: Tensors returned by the call to either `true_fn` or `false_fn`. If the callables return a singleton list, the element is extracted from the list. Raises: TypeError: if `true_fn` or `false_fn` is not callable. ValueError: if `true_fn` and `false_fn` do not return the same number of tensors, or return tensors of different types. Example: ```python x = tf.constant(2) y = tf.constant(5) def f1(): return tf.multiply(x, 17) def f2(): return tf.add(y, 23) r = tf.cond(tf.less(x, y), f1, f2) # r is set to f1(). # Operations in f2 (e.g., tf.add) are not executed. ```" 8462,_cast_indexed_slice_indices,tensorflow/tensorflow/python/ops/control_flow_ops.py,1302,function,"Cast IndexedSlice.indices from int32 to int64 where necessary. If `a` and `b` are both IndexedSlices, and their indices have different dtypes, then cast both their dtypes to `int64` (modifies `a` and `b` in-place). Otherwise, does nothing. Args: a: A value, which may be an IndexedSlices. b: A value, which may be an IndexedSlices." 8463,cond_for_tf_v2,tensorflow/tensorflow/python/ops/control_flow_ops.py,1326,function,"Return `true_fn()` if the predicate `pred` is true else `false_fn()`. `true_fn` and `false_fn` both return lists of output tensors. `true_fn` and `false_fn` must have the same non-zero number and type of outputs. **WARNING**: Any Tensors or Operations created outside of `true_fn` and `false_fn` will be executed regardless of which branch is selected at runtime. Although this behavior is consistent with the dataflow model of TensorFlow, it has frequently surprised users who expected a lazier semantics. Consider the following simple program: ```python z = tf.multiply(a, b) result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y)) ``` If `x < y`, the `tf.add` operation will be executed and `tf.square` operation will not be executed. Since `z` is needed for at least one branch of the `cond`, the `tf.multiply` operation is always executed, unconditionally. Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the call to `cond`, and not at all during `Session.run()`). `cond` stitches together the graph fragments created during the `true_fn` and `false_fn` calls with some additional graph nodes to ensure that the right branch gets executed depending on the value of `pred`. `tf.cond` supports nested structures as implemented in `tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. Singleton lists and tuples form the only exceptions to this: when returned by `true_fn` and/or `false_fn`, they are implicitly unpacked to single values. Note: It is illegal to ""directly"" use tensors created inside a cond branch outside it, e.g. by storing a reference to a branch tensor in the python state. If you need to use a tensor created in a branch function you should return it as an output of the branch function and use the output from `tf.cond` instead. Args: pred: A scalar determining whether to return the result of `true_fn` or `false_fn`. true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix for the returned tensors. Returns: Tensors returned by the call to either `true_fn` or `false_fn`. If the callables return a singleton list, the element is extracted from the list. Raises: TypeError: if `true_fn` or `false_fn` is not callable. ValueError: if `true_fn` and `false_fn` do not return the same number of tensors, or return tensors of different types. Example: ```python x = tf.constant(2) y = tf.constant(5) def f1(): return tf.multiply(x, 17) def f2(): return tf.add(y, 23) r = tf.cond(tf.less(x, y), f1, f2) # r is set to f1(). # Operations in f2 (e.g., tf.add) are not executed. ```" 8464,_resource_safe_shape,tensorflow/tensorflow/python/ops/control_flow_ops.py,1399,function,Returns the shape of t or the variable it points to. 8465,WhileContext,tensorflow/tensorflow/python/ops/control_flow_ops.py,1411,class,The context for the loop construct. 8466,while_loop_v2,tensorflow/tensorflow/python/ops/control_flow_ops.py,2323,function,"Repeat `body` while the condition `cond` is true. `cond` is a callable returning a boolean scalar tensor. `body` is a callable returning a (possibly nested) tuple, namedtuple or list of tensors of the same arity (length and structure) and types as `loop_vars`. `loop_vars` is a (possibly nested) tuple, namedtuple or list of tensors that is passed to both `cond` and `body`. `cond` and `body` both take as many arguments as there are `loop_vars`. In addition to regular Tensors or IndexedSlices, the body may accept and return TensorArray objects. The flows of the TensorArray objects will be appropriately forwarded between loops and during gradient calculations. Note that `while_loop` calls `cond` and `body` *exactly once* (inside the call to `while_loop`, and not at all during `Session.run()`). `while_loop` stitches together the graph fragments created during the `cond` and `body` calls with some additional graph nodes to create the graph flow that repeats `body` until `cond` returns false. For correctness, `tf.while_loop()` strictly enforces shape invariants for the loop variables. A shape invariant is a (possibly partial) shape that is unchanged across the iterations of the loop. An error will be raised if the shape of a loop variable after an iteration is determined to be more general than or incompatible with its shape invariant. For example, a shape of [11, None] is more general than a shape of [11, 17], and [11, 21] is not compatible with [11, 17]. By default (if the argument `shape_invariants` is not specified), it is assumed that the initial shape of each tensor in `loop_vars` is the same in every iteration. The `shape_invariants` argument allows the caller to specify a less specific shape invariant for each loop variable, which is needed if the shape varies between iterations. The `tf.Tensor.set_shape` function may also be used in the `body` function to indicate that the output loop variable has a particular shape. The shape invariant for SparseTensor and IndexedSlices are treated specially as follows: a) If a loop variable is a SparseTensor, the shape invariant must be TensorShape([r]) where r is the rank of the dense tensor represented by the sparse tensor. It means the shapes of the three tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here is the shape of the SparseTensor.dense_shape property. It must be the shape of a vector. b) If a loop variable is an IndexedSlices, the shape invariant must be a shape invariant of the values tensor of the IndexedSlices. It means the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]], [shape.ndims]). `while_loop` implements non-strict semantics, enabling multiple iterations to run in parallel. The maximum number of parallel iterations can be controlled by `parallel_iterations`, which gives users some control over memory consumption and execution order. For correct programs, `while_loop` should return the same result for any parallel_iterations > 0. For training, TensorFlow stores the tensors that are produced in the forward inference and are needed in back propagation. These tensors are a main source of memory consumption and often cause OOM errors when training on GPUs. When the flag swap_memory is true, we swap out these tensors from GPU to CPU. This for example allows us to train RNN models with very long sequences and large batches. Args: cond: A callable that represents the termination condition of the loop. body: A callable that represents the loop body. loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array, `Tensor`, and `TensorArray` objects. shape_invariants: The shape invariants for the loop variables. parallel_iterations: The number of iterations allowed to run in parallel. It must be a positive integer. back_prop: (optional) Deprecated. False disables support for back propagation. Prefer using `tf.stop_gradient` instead. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. maximum_iterations: Optional maximum number of iterations of the while loop to run. If provided, the `cond` output is AND-ed with an additional condition ensuring the number of iterations executed is no greater than `maximum_iterations`. name: Optional name prefix for the returned tensors. Returns: The output tensors for the loop variables after the loop. The return value has the same structure as `loop_vars`. Raises: TypeError: if `cond` or `body` is not callable. ValueError: if `loop_vars` is empty. Example: ```python i = tf.constant(0) c = lambda i: tf.less(i, 10) b = lambda i: (tf.add(i, 1), ) r = tf.while_loop(c, b, [i]) ``` Example with nesting and a namedtuple: ```python import collections Pair = collections.namedtuple('Pair', 'j, k') ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2))) c = lambda i, p: i < 10 b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k))) ijk_final = tf.while_loop(c, b, ijk_0) ``` Example using shape_invariants: ```python i0 = tf.constant(0) m0 = tf.ones([2, 2]) c = lambda i, m: i < 10 b = lambda i, m: [i+1, tf.concat([m, m], axis=0)] tf.while_loop( c, b, loop_vars=[i0, m0], shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])]) ``` Example which demonstrates non-strict semantics: In the following example, the final value of the counter `i` does not depend on `x`. So the `while_loop` can increment the counter parallel to updates of `x`. However, because the loop counter at one loop iteration depends on the value at the previous iteration, the loop counter itself cannot be incremented in parallel. Hence if we just want the final value of the counter (which we print on the line `print(sess.run(i))`), then `x` will never be incremented, but the counter will be updated on a single thread. Conversely, if we want the value of the output (which we print on the line `print(sess.run(out).shape)`), then the counter may be incremented on its own thread, while `x` can be incremented in parallel on a separate thread. In the extreme case, it is conceivable that the thread incrementing the counter runs until completion before `x` is incremented even a single time. The only thing that can never happen is that the thread updating `x` can never get ahead of the counter thread because the thread incrementing `x` depends on the value of the counter. ```python import tensorflow as tf n = 10000 x = tf.constant(list(range(n))) c = lambda i, x: i < n b = lambda i, x: (tf.compat.v1.Print(i + 1, [i]), tf.compat.v1.Print(x + 1, [i], ""x:"")) i, out = tf.while_loop(c, b, (0, x)) with tf.compat.v1.Session() as sess: print(sess.run(i)) # prints [0] ... [9999] # The following line may increment the counter and x in parallel. # The counter thread may get ahead of the other thread, but not the # other way around. So you may see things like # [9996] x:[9987] # meaning that the counter thread is on iteration 9996, # while the other thread is on iteration 9987 print(sess.run(out).shape) ```" 8467,while_loop,tensorflow/tensorflow/python/ops/control_flow_ops.py,2504,function,"Repeat `body` while the condition `cond` is true. `cond` is a callable returning a boolean scalar tensor. `body` is a callable returning a (possibly nested) tuple, namedtuple or list of tensors of the same arity (length and structure) and types as `loop_vars`. `loop_vars` is a (possibly nested) tuple, namedtuple or list of tensors that is passed to both `cond` and `body`. `cond` and `body` both take as many arguments as there are `loop_vars`. In addition to regular Tensors or IndexedSlices, the body may accept and return TensorArray objects. The flows of the TensorArray objects will be appropriately forwarded between loops and during gradient calculations. Note that `while_loop` calls `cond` and `body` *exactly once* (inside the call to `while_loop`, and not at all during `Session.run()`). `while_loop` stitches together the graph fragments created during the `cond` and `body` calls with some additional graph nodes to create the graph flow that repeats `body` until `cond` returns false. For correctness, `tf.while_loop()` strictly enforces shape invariants for the loop variables. A shape invariant is a (possibly partial) shape that is unchanged across the iterations of the loop. An error will be raised if the shape of a loop variable after an iteration is determined to be more general than or incompatible with its shape invariant. For example, a shape of [11, None] is more general than a shape of [11, 17], and [11, 21] is not compatible with [11, 17]. By default (if the argument `shape_invariants` is not specified), it is assumed that the initial shape of each tensor in `loop_vars` is the same in every iteration. The `shape_invariants` argument allows the caller to specify a less specific shape invariant for each loop variable, which is needed if the shape varies between iterations. The `tf.Tensor.set_shape` function may also be used in the `body` function to indicate that the output loop variable has a particular shape. The shape invariant for SparseTensor and IndexedSlices are treated specially as follows: a) If a loop variable is a SparseTensor, the shape invariant must be TensorShape([r]) where r is the rank of the dense tensor represented by the sparse tensor. It means the shapes of the three tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here is the shape of the SparseTensor.dense_shape property. It must be the shape of a vector. b) If a loop variable is an IndexedSlices, the shape invariant must be a shape invariant of the values tensor of the IndexedSlices. It means the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]], [shape.ndims]). `while_loop` implements non-strict semantics, enabling multiple iterations to run in parallel. The maximum number of parallel iterations can be controlled by `parallel_iterations`, which gives users some control over memory consumption and execution order. For correct programs, `while_loop` should return the same result for any parallel_iterations > 0. For training, TensorFlow stores the tensors that are produced in the forward inference and are needed in back propagation. These tensors are a main source of memory consumption and often cause OOM errors when training on GPUs. When the flag swap_memory is true, we swap out these tensors from GPU to CPU. This for example allows us to train RNN models with very long sequences and large batches. Args: cond: A callable that represents the termination condition of the loop. body: A callable that represents the loop body. loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array, `Tensor`, and `TensorArray` objects. shape_invariants: The shape invariants for the loop variables. parallel_iterations: The number of iterations allowed to run in parallel. It must be a positive integer. back_prop: Whether backprop is enabled for this while loop. swap_memory: Whether GPU-CPU memory swap is enabled for this loop. name: Optional name prefix for the returned tensors. maximum_iterations: Optional maximum number of iterations of the while loop to run. If provided, the `cond` output is AND-ed with an additional condition ensuring the number of iterations executed is no greater than `maximum_iterations`. return_same_structure: If True, output has same structure as `loop_vars`. If eager execution is enabled, this is ignored (and always treated as True). Returns: The output tensors for the loop variables after the loop. If `return_same_structure` is True, the return value has the same structure as `loop_vars`. If `return_same_structure` is False, the return value is a Tensor, TensorArray or IndexedSlice if the length of `loop_vars` is 1, or a list otherwise. Raises: TypeError: if `cond` or `body` is not callable. ValueError: if `loop_vars` is empty. Example: ```python i = tf.constant(0) c = lambda i: tf.less(i, 10) b = lambda i: tf.add(i, 1) r = tf.while_loop(c, b, [i]) ``` Example with nesting and a namedtuple: ```python import collections Pair = collections.namedtuple('Pair', 'j, k') ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2))) c = lambda i, p: i < 10 b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k))) ijk_final = tf.while_loop(c, b, ijk_0) ``` Example using shape_invariants: ```python i0 = tf.constant(0) m0 = tf.ones([2, 2]) c = lambda i, m: i < 10 b = lambda i, m: [i+1, tf.concat([m, m], axis=0)] tf.while_loop( c, b, loop_vars=[i0, m0], shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])]) ``` Example which demonstrates non-strict semantics: In the following example, the final value of the counter `i` does not depend on `x`. So the `while_loop` can increment the counter parallel to updates of `x`. However, because the loop counter at one loop iteration depends on the value at the previous iteration, the loop counter itself cannot be incremented in parallel. Hence if we just want the final value of the counter (which we print on the line `print(sess.run(i))`), then `x` will never be incremented, but the counter will be updated on a single thread. Conversely, if we want the value of the output (which we print on the line `print(sess.run(out).shape)`), then the counter may be incremented on its own thread, while `x` can be incremented in parallel on a separate thread. In the extreme case, it is conceivable that the thread incrementing the counter runs until completion before `x` is incremented even a single time. The only thing that can never happen is that the thread updating `x` can never get ahead of the counter thread because the thread incrementing `x` depends on the value of the counter. ```python import tensorflow as tf n = 10000 x = tf.constant(list(range(n))) c = lambda i, x: i < n b = lambda i, x: (tf.compat.v1.Print(i + 1, [i]), tf.compat.v1.Print(x + 1, [i], ""x:"")) i, out = tf.while_loop(c, b, (0, x)) with tf.compat.v1.Session() as sess: print(sess.run(i)) # prints [0] ... [9999] # The following line may increment the counter and x in parallel. # The counter thread may get ahead of the other thread, but not the # other way around. So you may see things like # [9996] x:[9987] # meaning that the counter thread is on iteration 9996, # while the other thread is on iteration 9987 print(sess.run(out).shape) ```" 8468,_AsTensorList,tensorflow/tensorflow/python/ops/control_flow_ops.py,2784,function,"Return x as a list of Tensors or IndexedSlices. For entries of `x` that are Operations, this returns an Identity of `p` with a dependency on the operation. Args: x: A Tensor/IndexedSlices/Operation or a list or tuple of them. p: A Tensor to return for entries in `x` that are Operations. Returns: A list of Tensors or IndexedSlices." 8469,_CheckResults,tensorflow/tensorflow/python/ops/control_flow_ops.py,2814,function, 8470,with_dependencies,tensorflow/tensorflow/python/ops/control_flow_ops.py,2823,function,"Produces the content of `output_tensor` only after `dependencies`. In some cases, a user may want the output of an operation to be consumed externally only after some other dependencies have run first. This function ensures returns `output_tensor`, but only after all operations in `dependencies` have run. Note that this means that there is no guarantee that `output_tensor` will be evaluated after any `dependencies` have run. See also `tf.tuple` and `tf.group`. Args: dependencies: Iterable of operations to run before this op finishes. output_tensor: A `Tensor` or `IndexedSlices` that will be returned. name: (Optional) A name for this operation. Returns: Same as `output_tensor`. Raises: TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`." 8471,_GroupControlDeps,tensorflow/tensorflow/python/ops/control_flow_ops.py,2861,function, 8472,group,tensorflow/tensorflow/python/ops/control_flow_ops.py,2872,function,"Create an op that groups multiple operations. When this op finishes, all ops in `inputs` have finished. This op has no output. Note: *In TensorFlow 2 with eager and/or Autograph, you should not require this method, as code executes in your expected order.* Only use tf.group when working with v1-style code or in a graph context such as inside `Dataset.map`. When operating in a v1-style graph context, ops are not executed in the same order as specified in the code; TensorFlow will attempt to execute ops in parallel or in an order convienient to the result it is computing. `tf.group` allows you to request that one or more results finish before execution continues. `tf.group` creates a single op (of type `NoOp`), and then adds appropriate control dependencies. Thus, `c = tf.group(a, b)` will compute the same graph as this: with tf.control_dependencies([a, b]): c = tf.no_op() See also `tf.tuple` and `tf.control_dependencies`. Args: *inputs: Zero or more tensors to group. name: A name for this operation (optional). Returns: An Operation that executes all its inputs. Raises: ValueError: If an unknown keyword argument is provided." 8473,tuple_v2,tensorflow/tensorflow/python/ops/control_flow_ops.py,2951,function,"Group tensors together. This creates a tuple of tensors with the same values as the `tensors` argument, except that the value of each tensor is only returned after the values of all tensors have been computed. `control_inputs` contains additional ops that have to finish before this op finishes, but whose outputs are not returned. This can be used as a ""join"" mechanism for parallel computations: all the argument tensors can be computed in parallel, but the values of any tensor returned by `tuple` are only available after all the parallel computations are done. See also `tf.group` and `tf.control_dependencies`. Args: tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`. control_inputs: List of additional ops to finish before returning. name: (optional) A name to use as a `name_scope` for the operation. Returns: Same as `tensors`. Raises: ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`. TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` objects." 8474,tuple,tensorflow/tensorflow/python/ops/control_flow_ops.py,2988,function,"Group tensors together. This creates a tuple of tensors with the same values as the `tensors` argument, except that the value of each tensor is only returned after the values of all tensors have been computed. `control_inputs` contains additional ops that have to finish before this op finishes, but whose outputs are not returned. This can be used as a ""join"" mechanism for parallel computations: all the argument tensors can be computed in parallel, but the values of any tensor returned by `tuple` are only available after all the parallel computations are done. See also `tf.group` and `tf.control_dependencies`. Args: tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`. name: (optional) A name to use as a `name_scope` for the operation. control_inputs: List of additional ops to finish before returning. Returns: Same as `tensors`. Raises: ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`. TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` objects." 8475,_assert_at_most_n_true,tensorflow/tensorflow/python/ops/control_flow_ops.py,3057,function,"Returns an Assert op that checks that at most n predicates are True. Args: predicates: list of bool scalar tensors. n: maximum number of true predicates allowed. msg: Error message." 8476,_case_create_default_action,tensorflow/tensorflow/python/ops/control_flow_ops.py,3078,function,"Creates default action for a list of actions and their predicates. It uses the input actions to select an arbitrary as default and makes sure that corresponding predicates have valid values. Args: predicates: a list of bool scalar tensors actions: a list of callable objects which return tensors. Returns: a callable" 8477,_case_verify_and_canonicalize_args,tensorflow/tensorflow/python/ops/control_flow_ops.py,3111,function,"Verifies input arguments for the case function. Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. name: A name for the case operation. allow_python_preds: if true, pred_fn_pairs may contain Python bools in addition to boolean Tensors Raises: TypeError: If `pred_fn_pairs` is not a list/dictionary. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. Returns: a tuple ." 8478,_case_helper,tensorflow/tensorflow/python/ops/control_flow_ops.py,3174,function,"Implementation of case that allows for different cond functions. Args: cond_fn: method that has signature and semantics of `cond` above. pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. name: A name for this operation (optional). allow_python_preds: if true, pred_fn_pairs may contain Python bools in addition to boolean Tensors **cond_kwargs: keyword arguments that will be passed to `cond_fn`. Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by `default` if none does. Raises: TypeError: If `pred_fn_pairs` is not a list/dictionary. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable." 8479,_indexed_case_verify_and_canonicalize_args,tensorflow/tensorflow/python/ops/control_flow_ops.py,3226,function,"Verifies input arguments for the case function. Args: branch_fns: Dict or list of pairs of an `int` and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. branch_index: Optional int `Tensor`, which selects for the corresponding pred_fn_pair. Raises: TypeError: If `branch_fns` is not a list/dictionary. TypeError: If `branch_fns` is a list but does not contain 2-tuples or callables. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable. Returns: branch_fns: validated list of callables for each branch (default last)." 8480,_indexed_case_helper,tensorflow/tensorflow/python/ops/control_flow_ops.py,3287,function,"Implementation of case that emits the n-way indexed Case op. Args: branch_fns: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. branch_index: Optional int `Tensor`, which selects for the corresponding pred_fn_pair. name: A name for this operation (optional). lower_using_switch_merge: Lower this op using switch merge ops (optional). Returns: The tensors returned by the pair whose key matched branch_index, or those returned by `default` if none does. Raises: TypeError: If `branch_fns` is not a list/dictionary. TypeError: If `branch_fns` is a list but does not contain 2-tuples or callables. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable." 8481,case_v2,tensorflow/tensorflow/python/ops/control_flow_ops.py,3331,function,"Create a case operation. See also `tf.switch_case`. The `pred_fn_pairs` parameter is a list of pairs of size N. Each pair contains a boolean scalar tensor and a python callable that creates the tensors to be returned if the boolean evaluates to True. `default` is a callable generating a list of tensors. All the callables in `pred_fn_pairs` as well as `default` (if provided) should return the same number and types of tensors. If `exclusive==True`, all predicates are evaluated, and an exception is thrown if more than one of the predicates evaluates to `True`. If `exclusive==False`, execution stops at the first predicate which evaluates to True, and the tensors generated by the corresponding function are returned immediately. If none of the predicates evaluate to True, this operation returns the tensors generated by `default`. `tf.case` supports nested structures as implemented in `tf.contrib.framework.nest`. All of the callables must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. Singleton lists and tuples form the only exceptions to this: when returned by a callable, they are implicitly unpacked to single values. This behavior is disabled by passing `strict=True`. @compatibility(v2) `pred_fn_pairs` could be a dictionary in v1. However, tf.Tensor and tf.Variable are no longer hashable in v2, so cannot be used as a key for a dictionary. Please use a list or a tuple instead. @end_compatibility **Example 1:** Pseudocode: ``` if (x < y) return 17; else return 23; ``` Expressions: ```python f1 = lambda: tf.constant(17) f2 = lambda: tf.constant(23) r = tf.case([(tf.less(x, y), f1)], default=f2) ``` **Example 2:** Pseudocode: ``` if (x < y && x > z) raise OpError(""Only one predicate may evaluate to True""); if (x < y) return 17; else if (x > z) return 23; else return -1; ``` Expressions: ```python def f1(): return tf.constant(17) def f2(): return tf.constant(23) def f3(): return tf.constant(-1) r = tf.case([(tf.less(x, y), f1), (tf.greater(x, z), f2)], default=f3, exclusive=True) ``` Args: pred_fn_pairs: List of pairs of a boolean scalar tensor and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. strict: A boolean that enables/disables 'strict' mode; see above. name: A name for this operation (optional). Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by `default` if none does. Raises: TypeError: If `pred_fn_pairs` is not a list/tuple. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable." 8482,case,tensorflow/tensorflow/python/ops/control_flow_ops.py,3436,function,"Create a case operation. See also `tf.switch_case`. The `pred_fn_pairs` parameter is a dict or list of pairs of size N. Each pair contains a boolean scalar tensor and a python callable that creates the tensors to be returned if the boolean evaluates to True. `default` is a callable generating a list of tensors. All the callables in `pred_fn_pairs` as well as `default` (if provided) should return the same number and types of tensors. If `exclusive==True`, all predicates are evaluated, and an exception is thrown if more than one of the predicates evaluates to `True`. If `exclusive==False`, execution stops at the first predicate which evaluates to True, and the tensors generated by the corresponding function are returned immediately. If none of the predicates evaluate to True, this operation returns the tensors generated by `default`. `tf.case` supports nested structures as implemented in `tf.contrib.framework.nest`. All of the callables must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. Singleton lists and tuples form the only exceptions to this: when returned by a callable, they are implicitly unpacked to single values. This behavior is disabled by passing `strict=True`. If an unordered dictionary is used for `pred_fn_pairs`, the order of the conditional tests is not guaranteed. However, the order is guaranteed to be deterministic, so that variables created in conditional branches are created in fixed order across runs. @compatibility(eager) Unordered dictionaries are not supported in eager mode when `exclusive=False`. Use a list of tuples instead. @end_compatibility **Example 1:** Pseudocode: ``` if (x < y) return 17; else return 23; ``` Expressions: ```python f1 = lambda: tf.constant(17) f2 = lambda: tf.constant(23) r = tf.case([(tf.less(x, y), f1)], default=f2) ``` **Example 2:** Pseudocode: ``` if (x < y && x > z) raise OpError(""Only one predicate may evaluate to True""); if (x < y) return 17; else if (x > z) return 23; else return -1; ``` Expressions: ```python def f1(): return tf.constant(17) def f2(): return tf.constant(23) def f3(): return tf.constant(-1) r = tf.case({tf.less(x, y): f1, tf.greater(x, z): f2}, default=f3, exclusive=True) ``` Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to `True`. strict: A boolean that enables/disables 'strict' mode; see above. name: A name for this operation (optional). Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by `default` if none does. Raises: TypeError: If `pred_fn_pairs` is not a list/dictionary. TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable." 8483,switch_case,tensorflow/tensorflow/python/ops/control_flow_ops.py,3544,function,"Create a switch/case operation, i.e. an integer-indexed conditional. See also `tf.case`. This op can be substantially more efficient than `tf.case` when exactly one branch will be selected. `tf.switch_case` is more like a C++ switch/case statement than `tf.case`, which is more like an if/elif/elif/else chain. The `branch_fns` parameter is either a dict from `int` to callables, or list of (`int`, callable) pairs, or simply a list of callables (in which case the index is implicitly the key). The `branch_index` `Tensor` is used to select an element in `branch_fns` with matching `int` key, falling back to `default` if none match, or `max(keys)` if no `default` is provided. The keys must form a contiguous set from `0` to `len(branch_fns) - 1`. `tf.switch_case` supports nested structures as implemented in `tf.nest`. All callables must return the same (possibly nested) value structure of lists, tuples, and/or named tuples. **Example:** Pseudocode: ```c++ switch (branch_index) { // c-style switch case 0: return 17; case 1: return 31; default: return -1; } ``` or ```python branches = {0: lambda: 17, 1: lambda: 31} branches.get(branch_index, lambda: -1)() ``` Expressions: ```python def f1(): return tf.constant(17) def f2(): return tf.constant(31) def f3(): return tf.constant(-1) r = tf.switch_case(branch_index, branch_fns={0: f1, 1: f2}, default=f3) # Equivalent: tf.switch_case(branch_index, branch_fns={0: f1, 1: f2, 2: f3}) ``` Args: branch_index: An int Tensor specifying which of `branch_fns` should be executed. branch_fns: A `dict` mapping `int`s to callables, or a `list` of (`int`, callable) pairs, or simply a list of callables (in which case the index serves as the key). Each callable must return a matching structure of tensors. default: Optional callable that returns a structure of tensors. name: A name for this operation (optional). Returns: The tensors returned by the callable identified by `branch_index`, or those returned by `default` if no key matches and `default` was provided, or those returned by the max-keyed `branch_fn` if no `default` is provided. Raises: TypeError: If `branch_fns` is not a list/dictionary. TypeError: If `branch_fns` is a list but does not contain 2-tuples or callables. TypeError: If `fns[i]` is not callable for any i, or `default` is not callable." 8484,execute_fn_for_device,tensorflow/tensorflow/python/ops/control_flow_ops.py,3619,function,"Executes one of the provided callables based on the device placement. This API is used when the implementations for high level function depend on the underlying device placement. It takes a dictionary of device type to callables. The device type includes ""CPU"", ""GPU"", ""TPU"", etc. When the type of the device where to run this op matches the key in 'device_branch_fns', the corresponding callable is executed, falling back to 'default_fn' if none matches. **Example:** ```python def f1(): return tf.constant(1) def f2(): return tf.constant(2) r = tf.execute_fn_for_device({""CPU"": f1, ""GPU"": f2}, default_fn=f1) ``` 'r' is evaluated as 1 when it runs on CPU, 2 running on GPU, 1 running on any other device types. Args: device_branch_fns: a dictionary of device types to the callables. Each callable must return a matching structure of tensors. default_fn: fallback callable when the underlying device does not match any key in the 'device_branch_fns'. name: A name for this operation (optional). Returns: The tensors returned by the callable identified by device type during execution, or those returned by 'default_fn' if no key matches." 8485,XLAControlFlowContext,tensorflow/tensorflow/python/ops/control_flow_ops.py,3663,class,Base class for XLA and TPU control flow contexts. 8486,from_control_flow_context_def,tensorflow/tensorflow/python/ops/control_flow_ops.py,3691,function,"Deserializes `context_def` into the appropriate ControlFlowContext. Args: context_def: ControlFlowContextDef proto import_scope: Optional `string`. Name scope to add. Returns: A ControlFlowContext subclass" 8487,CondWithManyIntermediatesBenchmark,tensorflow/tensorflow/python/ops/control_flow_ops_benchmark.py,36,class,Checks the runtime performance of outputting all intermediates. 8488,GroupTestCase,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,69,class, 8489,ShapeTestCase,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,150,class, 8490,WithDependenciesTestCase,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,160,class, 8491,SwitchTestCase,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,191,class, 8492,CondTest,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,361,class, 8493,ContextTest,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,469,class, 8494,_get_nested_shape,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,546,function, 8495,_create_tensor_array,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,559,function, 8496,_raw_nested_shape,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,567,function, 8497,DataTypesTest,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,579,class, 8498,IndexedCaseTest,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,959,class, 8499,ExecuteFnForDeviceTest,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,1225,class, 8500,CaseTest,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,1336,class, 8501,WhileLoopTestCase,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,1410,class, 8502,AssertTest,tensorflow/tensorflow/python/ops/control_flow_ops_test.py,1519,class, 8503,_GetMaxSizeFromNestedMaximumIterations,tensorflow/tensorflow/python/ops/control_flow_state.py,37,function,"Calculate a max_size for use by stack ops inside an XLA while_loop. Args: value: The value inside the while_loop forward context. Used for printing error messages. while_ctxt: The forward context inside which value resides. This does not always match the value's immediate context, as `value` may be inside e.g. a cond context inside the while_loop. Returns: A tensor containing the `max_size` to feed to a Stack initializer. Raises: ValueError: If `value` is nested inside a `while_loop` that either lacks a `maximum_iterations` parameter, or the `maximum_iterations` parameter: - is inside a `while_loop` that is a parent of the calling context, and - cannot be evaluated at graph build time to a constant." 8504,_GradLoopState,tensorflow/tensorflow/python/ops/control_flow_state.py,108,class,"The state used for constructing the gradient graph for a while loop. We create a _GradLoopState for each while loop in forward and its corresponding while loop in backprop. This gives us access to both the forward and the backprop WhileContexts. During the construction of gradient graph, any time when we detect a forward value that is needed for backprop, we create a history accumulator and add it to `history_map`. Any time when we backprop a loop switch op (in _SwitchGrad), we add the grad merge op in `switch_map`." 8505,_ControlFlowState,tensorflow/tensorflow/python/ops/control_flow_state.py,494,class,Maintain the mapping from the loops to their grad states. 8506,MaybeCreateControlFlowState,tensorflow/tensorflow/python/ops/control_flow_state.py,761,function,"Create the state for all the while loops involved in one gradients(). We create a _ControlFlowState when there are while loops involved in gradients(). In gradients(), control flow logic is only invoked when the _ControlFlowState is not None. Note that this method modifies `between_op_list` and `between_ops`." 8507,_ZerosLikeV1,tensorflow/tensorflow/python/ops/control_flow_state.py,784,function,Branch of ZerosLike for TF1. 8508,_ZerosLikeV2,tensorflow/tensorflow/python/ops/control_flow_state.py,810,function,Branch of ZerosLike for TF2. 8509,ZerosLike,tensorflow/tensorflow/python/ops/control_flow_state.py,834,function,Create zeros_like for the specified output of an op. 8510,enable_control_flow_v2,tensorflow/tensorflow/python/ops/control_flow_util.py,41,function,"Use control flow v2. Do not use this symbol. This will be removed." 8511,EnableControlFlowV2,tensorflow/tensorflow/python/ops/control_flow_util.py,50,function,Returns whether control flow v2 should be used in `graph`. 8512,IsInXLAContext,tensorflow/tensorflow/python/ops/control_flow_util.py,58,function, 8513,InXlaContext,tensorflow/tensorflow/python/ops/control_flow_util.py,68,function, 8514,GraphOrParentsInXlaContext,tensorflow/tensorflow/python/ops/control_flow_util.py,73,function, 8515,IsInWhileLoop,tensorflow/tensorflow/python/ops/control_flow_util.py,82,function, 8516,IsInCond,tensorflow/tensorflow/python/ops/control_flow_util.py,87,function, 8517,IsSwitch,tensorflow/tensorflow/python/ops/control_flow_util.py,92,function,Return true if `op` is a Switch. 8518,IsMerge,tensorflow/tensorflow/python/ops/control_flow_util.py,97,function,Return true if `op` is a Merge. 8519,IsLoopEnter,tensorflow/tensorflow/python/ops/control_flow_util.py,102,function,Returns true if `op` is an Enter. 8520,IsLoopExit,tensorflow/tensorflow/python/ops/control_flow_util.py,107,function,Return true if `op` is an Exit. 8521,IsCondSwitch,tensorflow/tensorflow/python/ops/control_flow_util.py,112,function,Return true if `op` is the Switch for a conditional. 8522,IsCondMerge,tensorflow/tensorflow/python/ops/control_flow_util.py,133,function,Return true if `op` is the Merge for a conditional. 8523,IsLoopSwitch,tensorflow/tensorflow/python/ops/control_flow_util.py,150,function,Return true if `op` is the Switch for a while loop. 8524,IsLoopMerge,tensorflow/tensorflow/python/ops/control_flow_util.py,158,function,Return true if `op` is the Merge for a while loop. 8525,IsLoopConstantEnter,tensorflow/tensorflow/python/ops/control_flow_util.py,166,function,Return true iff op is a loop invariant. 8526,GetLoopConstantEnter,tensorflow/tensorflow/python/ops/control_flow_util.py,171,function,Return the enter op if we can infer `value` to be a loop invariant. 8527,GetOutputContext,tensorflow/tensorflow/python/ops/control_flow_util.py,180,function,Return the control flow context for the output of an op. 8528,GetContainingWhileContext,tensorflow/tensorflow/python/ops/control_flow_util.py,191,function,"Returns the first ancestor WhileContext of `ctxt`. Returns `ctxt` if `ctxt` is a WhileContext, or None if `ctxt` is not in a while loop. Args: ctxt: ControlFlowContext stop_ctxt: ControlFlowContext, optional. If provided, the search will end if it sees stop_ctxt. Returns: `ctxt` if `ctxt` is a WhileContext, the most nested WhileContext containing `ctxt`, or None if `ctxt` is not in a while loop. If `stop_ctxt` is not `None`, this returns `ctxt` if it matches `stop_ctxt` in its traversal." 8529,GetContainingXLAContext,tensorflow/tensorflow/python/ops/control_flow_util.py,213,function,"Returns the first ancestor XLAContext of `ctxt`. Returns `ctxt` if `ctxt` is a XLAContext, or None if `ctxt` is not in a while loop. Args: ctxt: ControlFlowContext Returns: `ctxt` if `ctxt` is a XLAContext, the most nested XLAContext containing `ctxt`, or None if `ctxt` is not in a while loop." 8530,GetContainingCondContext,tensorflow/tensorflow/python/ops/control_flow_util.py,232,function,"Returns the first ancestor CondContext of `ctxt`. Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond. Args: ctxt: ControlFlowContext Returns: `ctxt` if `ctxt` is a CondContext, the most nested CondContext containing `ctxt`, or None if `ctxt` is not in a cond." 8531,IsContainingContext,tensorflow/tensorflow/python/ops/control_flow_util.py,250,function,Returns true if `maybe_containing_ctxt` is or contains `ctxt`. 8532,OpInContext,tensorflow/tensorflow/python/ops/control_flow_util.py,258,function, 8533,TensorInContext,tensorflow/tensorflow/python/ops/control_flow_util.py,262,function, 8534,CheckInputFromValidContext,tensorflow/tensorflow/python/ops/control_flow_util.py,266,function,"Returns whether `input_op` can be used from `op`s context. Conceptually, only inputs from op's while context or any ancestor while context (including outside of any context) are valid. In practice, there are many other edge cases as well. Args: op: Operation input_op: Operation Raises: ValueError: if input_op is from an invalid context." 8535,GetWhileContext,tensorflow/tensorflow/python/ops/control_flow_util.py,366,function,Get the WhileContext to which this op belongs. 8536,in_defun,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,44,function,"Returns if the current graph is, or is nested in, a defun." 8537,in_while_loop_defun,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,56,function,Returns if the graph is a while loop FuncGraph. 8538,create_new_tf_function,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,63,function,"Converts func_graph to a TF_Function and adds it to the current graph. Args: func_graph: FuncGraph Returns: The name of the new TF_Function." 8539,unique_fn_name,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,78,function,"Returns a unique name to use for a control flow function. Args: scope: A name scope string. name: An identifier for this function (e.g. ""true"", ""body""). Returns: A string, the name to use for the function." 8540,unique_grad_fn_name,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,91,function, 8541,maybe_set_lowering_attr,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,95,function,"Sets the flag to enable lowering on `op` if necessary. Lowering allows cond_v2 and while_v2 to avoid some of the limitations of Functions, allowing users to specify devices & colocation inside of cond_v2 and while_v2 input functions, and enabling non-strict evaluation & partial pruning. This brings v2 control flow closer to feature parity with v1 control flow. However, we do not lower in the following cases: - When the `If` or `While` ops are in the XLA context. Because it is easier for XLA to apply its own optimizations when dealing with un-lowered control flow operators than with low-level control flow primitives. - When the eager execution context specifies the executor of functions to be the single threaded executor (see context.function_executor_type()). Because the single threaded executor does not support v1 control flow ops. - When 'lower_using_switch_merge' is explicitly set to False. Args: op: An `If` or `While` Operation. lower_using_switch_merge: Explicit value to lower or not (optional)." 8542,maybe_propagate_compile_time_consts_in_xla,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,131,function,"Tells XLA whether to propagate compile-time consts in the loop body. This is needed to make compile time constants available to ops, for example `max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this would always be turned on, but that doesn't work with legacy functionalized while_loops. Args: op: A `While` Operation." 8543,resource_input_index,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,149,function,"Returns the index of the input corresponding to `tensor_name`. This method is used to find the corresponding index of an arbitrary resource tensor in a function (the function could be a loop body). We assume that resource handles are never created in functions, so that every resource tensor can be traced back to a function input. The awkward signature of this method is to make it work with both FuncGraphs and FunctionDefs. This is so we can recurse on function call ops without building the corresponding FuncGraph (note that even if a FuncGraph for a FunctionDef already exists, the input/output/node names may have been changed when the FuncGraph was serialized to the FunctionDef, which makes it unusable with this algorithm). Args: tensor_name: the name of the resource tensor to be resolved to an input. input_names: a list of the names of all inputs to the function. node_defs: a dict mapping op name -> NodeDef for every op in the function. functions: a dict mapping function name -> _EagerDefinedFunction. Returns: The index into input_names corresponding to `tensor_name`." 8544,clear_control_inputs,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,221,function,"Clears the control inputs but preserves the ControlFlowContext. This is needed to preserve the XLAControlFlowControl when clearing control inputs for the gradient accumulators in while_v2. `ops.control_dependencies` does not allow that. Yields: A context manager in which the ops created will not have any control inputs by default but the control flow context is the same." 8545,_is_tpu_strategy,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,240,function, 8546,_register_keras_layer_context_function,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,245,function, 8547,_is_building_keras_layer,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,251,function, 8548,output_all_intermediates,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,260,function,"Whether to output all intermediates of a functional control flow op. The default behavior is to output intermediates only when building a Keras Layer in graph mode and that too when certain other conditions are met: 1. We do not output intermediates if the functional control flow op is being built inside a FuncGraph which is not a If/While graph. This guards against outputting intermediates in eager mode since keras adds tensors to a FuncGraph named ""keras_graph"" in that case. Also because we do not output intermediates of tf.function (since this feature is only for backwards compatibility) outputting intermediates of functional control flow ops built inside tf.function is of no value. 2. We do not output intermediates when the compilation is using XLA or for a TPU. 3. We do not output intermediates when a single threaded executor is used since that does not perform inlining and pruning. Returns: A bool telling whether to output all intermediates." 8549,get_func_graph,tensorflow/tensorflow/python/ops/control_flow_util_v2.py,293,function,Generates and returns a FuncGraph for the given op and input_shapes. 8550,ControlFlowV2DisableTest,tensorflow/tensorflow/python/ops/control_flow_v2_disable_test.py,31,class, 8551,ControlFlowV2EnableTest,tensorflow/tensorflow/python/ops/control_flow_v2_enable_test.py,30,class, 8552,CondBranchFuncGraph,tensorflow/tensorflow/python/ops/control_flow_v2_func_graphs.py,25,class,"FuncGraph for branches of tf.cond(). This is used to distinguish cond branches from other functions." 8553,WhileCondFuncGraph,tensorflow/tensorflow/python/ops/control_flow_v2_func_graphs.py,38,class,"FuncGraph for the condition of tf.while_loop(). This is used to distinguish while conditions from other functions." 8554,WhileBodyFuncGraph,tensorflow/tensorflow/python/ops/control_flow_v2_func_graphs.py,51,class,"FuncGraph for the body of tf.while_loop(). This is used to distinguish while bodies from other functions." 8555,enable_control_flow_v2,tensorflow/tensorflow/python/ops/control_flow_v2_toggles.py,29,function,"Use control flow v2. control flow v2 (cfv2) is an improved version of control flow in TensorFlow with support for higher order derivatives. Enabling cfv2 will change the graph/function representation of control flow, e.g., `tf.while_loop` and `tf.cond` will generate functional `While` and `If` ops instead of low-level `Switch`, `Merge` etc. ops. Note: Importing and running graphs exported with old control flow will still be supported. Calling tf.enable_control_flow_v2() lets you opt-in to this TensorFlow 2.0 feature. Note: v2 control flow is always enabled inside of tf.function. Calling this function is not required." 8556,disable_control_flow_v2,tensorflow/tensorflow/python/ops/control_flow_v2_toggles.py,51,function,"Opts out of control flow v2. Note: v2 control flow is always enabled inside of tf.function. Calling this function has no effect in that case. If your code needs tf.disable_control_flow_v2() to be called to work properly please file a bug." 8557,control_flow_v2_enabled,tensorflow/tensorflow/python/ops/control_flow_v2_toggles.py,66,function,"Returns `True` if v2 control flow is enabled. Note: v2 control flow is always enabled inside of tf.function." 8558,output_all_intermediates,tensorflow/tensorflow/python/ops/control_flow_v2_toggles.py,75,function,"Whether to output all intermediates from functional control flow ops. The ""default"" behavior to is to output all intermediates when using v2 control flow inside Keras models in graph mode (possibly inside Estimators). This is needed to support taking gradients of v2 control flow. In graph mode, Keras can sometimes freeze the forward graph before the gradient computation which does not work for v2 control flow since it requires updating the forward ops to output the needed intermediates. We work around this by proactively outputting the needed intermediates when building the forward pass itself. Ideally any such extra tensors should be pruned out at runtime. However, if for any reason this doesn't work for you or if you have an inference-only model you can turn this behavior off using `tf.compat.v1.experimental.output_all_intermediates(False)`. If with the default behavior you are still seeing errors of the form ""Connecting to invalid output X of source node Y which has Z outputs"" try setting `tf.compat.v1.experimental.output_all_intermediates(True)` and please file an issue at https://github.com/tensorflow/tensorflow/issues. Args: state: True, False or None. None restores the default behavior." 8559,ControlFlowV2TogglesTest,tensorflow/tensorflow/python/ops/control_flow_v2_toggles_test.py,27,class, 8560,build_graph,tensorflow/tensorflow/python/ops/conv2d_benchmark.py,44,function,"builds a graph containing a sequence of conv2d operations. Args: device: String, the device to run on. dtype: Data type for the convolution. data_format: A string from: ""NHWC"" or ""NCHW"". Data format for input and output data. input_shape: Shape of the input tensor. filter_shape: Shape of the filter tensor. strides: A list of ints. 1-D of length 4. The stride of sliding window for each dimension of input. padding: A string from: ""SAME"", ""VALID"". The type of padding algorithm to use. num_iters: number of iterations to run conv2d. warmup_iters: number of iterations for warmup runs. Returns: An array of tensors to run()" 8561,Conv2DBenchmark,tensorflow/tensorflow/python/ops/conv2d_benchmark.py,94,class,Benchmark conv2d! 8562,_ExecutionSignature,tensorflow/tensorflow/python/ops/critical_section_ops.py,45,class,A class storing an `ExecuteInCriticalResource` op and associated attrs. 8563,_identity,tensorflow/tensorflow/python/ops/critical_section_ops.py,53,function,"Identity op that recognizes `TensorArray`, `Operation`, and `Tensor`." 8564,_get_device_or_colocation,tensorflow/tensorflow/python/ops/critical_section_ops.py,65,function, 8565,_get_colocation,tensorflow/tensorflow/python/ops/critical_section_ops.py,69,function,"Get colocation symbol from op, if any." 8566,_get_critical_section_stack,tensorflow/tensorflow/python/ops/critical_section_ops.py,80,function, 8567,_push_critical_section_stack,tensorflow/tensorflow/python/ops/critical_section_ops.py,89,function,"Push a CriticalSection._signature to the thread-local stack. If the signature is already on the stack, raise an error because it means we're trying to execute inside the same locked CriticalSection, which will create a deadlock. Args: signature: Tuple of the type `CriticalSection._signature`. Uniquely identifies a CriticalSection by its `shared_name`, `container`, and device. Yields: An empty value. The context is guaranteed to run without deadlock. Raises: ValueError: If the signature is already on the stack. RuntimeError: If another thread or function modifies the current stack entry during the yield." 8568,CriticalSection,tensorflow/tensorflow/python/ops/critical_section_ops.py,126,class,"Critical section. A `CriticalSection` object is a resource in the graph which executes subgraphs in **serial** order. A common example of a subgraph one may wish to run exclusively is the one given by the following function: ```python v = resource_variable_ops.ResourceVariable(0.0, name=""v"") def count(): value = v.read_value() with tf.control_dependencies([value]): with tf.control_dependencies([v.assign_add(1)]): return tf.identity(value) ``` Here, a snapshot of `v` is captured in `value`; and then `v` is updated. The snapshot value is returned. If multiple workers or threads all execute `count` in parallel, there is no guarantee that access to the variable `v` is atomic at any point within any thread's calculation of `count`. In fact, even implementing an atomic counter that guarantees that the user will see each value `0, 1, ...,` is currently impossible. The solution is to ensure any access to the underlying resource `v` is only processed through a critical section: ```python cs = CriticalSection() f1 = cs.execute(count) f2 = cs.execute(count) output = f1 + f2 session.run(output) ``` The functions `f1` and `f2` will be executed serially, and updates to `v` will be atomic. **NOTES** All resource objects, including the critical section and any captured variables of functions executed on that critical section, will be colocated to the same device (host and cpu/gpu). When using multiple critical sections on the same resources, there is no guarantee of exclusive access to those resources. This behavior is disallowed by default (but see the kwarg `exclusive_resource_access`). For example, running the same function in two separate critical sections will not ensure serial execution: ```python v = tf.compat.v1.get_variable(""v"", initializer=0.0, use_resource=True) def accumulate(up): x = v.read_value() with tf.control_dependencies([x]): with tf.control_dependencies([v.assign_add(up)]): return tf.identity(x) ex1 = CriticalSection().execute( accumulate, 1.0, exclusive_resource_access=False) ex2 = CriticalSection().execute( accumulate, 1.0, exclusive_resource_access=False) bad_sum = ex1 + ex2 sess.run(v.initializer) sess.run(bad_sum) # May return 0.0 ```" 8569,_get_context_device_type,tensorflow/tensorflow/python/ops/ctc_ops.py,56,function,"Parse the current context and return the device type, eg CPU/GPU." 8570,_generate_defun_backend,tensorflow/tensorflow/python/ops/ctc_ops.py,64,function, 8571,ctc_loss,tensorflow/tensorflow/python/ops/ctc_ops.py,75,function,"Computes the CTC (Connectionist Temporal Classification) Loss. This op implements the CTC loss as presented in (Graves et al., 2006). Input requirements: ``` sequence_length(b) <= time for all b max(labels.indices(labels.indices[:, 1] == b, 2)) <= sequence_length(b) for all b. ``` Notes: This class performs the softmax operation for you, so inputs should be e.g. linear projections of outputs by an LSTM. The `inputs` Tensor's innermost dimension size, `num_classes`, represents `num_labels + 1` classes, where num_labels is the number of true labels, and the largest value `(num_classes - 1)` is reserved for the blank label. For example, for a vocabulary containing 3 labels `[a, b, c]`, `num_classes = 4` and the labels indexing is `{a: 0, b: 1, c: 2, blank: 3}`. Regarding the arguments `preprocess_collapse_repeated` and `ctc_merge_repeated`: If `preprocess_collapse_repeated` is True, then a preprocessing step runs before loss calculation, wherein repeated labels passed to the loss are merged into single labels. This is useful if the training labels come from, e.g., forced alignments and therefore have unnecessary repetitions. If `ctc_merge_repeated` is set False, then deep within the CTC calculation, repeated non-blank labels will not be merged and are interpreted as individual labels. This is a simplified (non-standard) version of CTC. Here is a table of the (roughly) expected first order behavior: * `preprocess_collapse_repeated=False`, `ctc_merge_repeated=True` Classical CTC behavior: Outputs true repeated classes with blanks in between, and can also output repeated classes with no blanks in between that need to be collapsed by the decoder. * `preprocess_collapse_repeated=True`, `ctc_merge_repeated=False` Never learns to output repeated classes, as they are collapsed in the input labels before training. * `preprocess_collapse_repeated=False`, `ctc_merge_repeated=False` Outputs repeated classes with blanks in between, but generally does not require the decoder to collapse/merge repeated classes. * `preprocess_collapse_repeated=True`, `ctc_merge_repeated=True` Untested. Very likely will not learn to output repeated classes. The `ignore_longer_outputs_than_inputs` option allows to specify the behavior of the CTCLoss when dealing with sequences that have longer outputs than inputs. If true, the CTCLoss will simply return zero gradient for those items, otherwise an InvalidArgument error is returned, stopping training. Args: labels: An `int32` `SparseTensor`. `labels.indices[i, :] == [b, t]` means `labels.values[i]` stores the id for (batch b, time t). `labels.values[i]` must take on values in `[0, num_labels)`. See `core/ops/ctc_ops.cc` for more details. inputs: 3-D `float` `Tensor`. If time_major == False, this will be a `Tensor` shaped: `[batch_size, max_time, num_classes]`. If time_major == True (default), this will be a `Tensor` shaped: `[max_time, batch_size, num_classes]`. The logits. sequence_length: 1-D `int32` vector, size `[batch_size]`. The sequence lengths. preprocess_collapse_repeated: Boolean. Default: False. If True, repeated labels are collapsed prior to the CTC calculation. ctc_merge_repeated: Boolean. Default: True. ignore_longer_outputs_than_inputs: Boolean. Default: False. If True, sequences with longer outputs than inputs will be ignored. time_major: The shape format of the `inputs` Tensors. If True, these `Tensors` must be shaped `[max_time, batch_size, num_classes]`. If False, these `Tensors` must be shaped `[batch_size, max_time, num_classes]`. Using `time_major = True` (default) is a bit more efficient because it avoids transposes at the beginning of the ctc_loss calculation. However, most TensorFlow data is batch-major, so by this function also accepts inputs in batch-major form. logits: Alias for inputs. Returns: A 1-D `float` `Tensor`, size `[batch]`, containing the negative log probabilities. Raises: TypeError: if labels is not a `SparseTensor`. References: Connectionist Temporal Classification - Labeling Unsegmented Sequence Data with Recurrent Neural Networks: [Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891) ([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))" 8572,_ctc_loss_impl,tensorflow/tensorflow/python/ops/ctc_ops.py,198,function, 8573,_CTCLossGradImpl,tensorflow/tensorflow/python/ops/ctc_ops.py,241,function, 8574,_CTCLossGrad,tensorflow/tensorflow/python/ops/ctc_ops.py,260,function,"The derivative provided by CTC Loss. Args: op: the CTCLoss op. grad_loss: The backprop for cost. Returns: The CTC Loss gradient." 8575,_CTCLossV2Grad,tensorflow/tensorflow/python/ops/ctc_ops.py,275,function,"The derivative provided by CTC Loss V2. Args: op: the CTCLossV2 op. grad_loss: The backprop for cost. Returns: The CTC Loss V2 gradient." 8576,ctc_greedy_decoder,tensorflow/tensorflow/python/ops/ctc_ops.py,290,function,"Performs greedy decoding on the logits given in input (best path). Note: Regardless of the value of merge_repeated, if the maximum index of a given time and batch corresponds to the blank index `(num_classes - 1)`, no new element is emitted. If `merge_repeated` is `True`, merge repeated classes in output. This means that if consecutive logits' maximum indices are the same, only the first of these is emitted. The sequence `A B B * B * B` (where '*' is the blank label) becomes * `A B B B` if `merge_repeated=True`. * `A B B B B` if `merge_repeated=False`. Args: inputs: 3-D `float` `Tensor` sized `[max_time, batch_size, num_classes]`. The logits. sequence_length: 1-D `int32` vector containing sequence lengths, having size `[batch_size]`. merge_repeated: Boolean. Default: True. Returns: A tuple `(decoded, neg_sum_logits)` where decoded: A single-element list. `decoded[0]` is an `SparseTensor` containing the decoded outputs s.t.: `decoded.indices`: Indices matrix `(total_decoded_outputs, 2)`. The rows store: `[batch, time]`. `decoded.values`: Values vector, size `(total_decoded_outputs)`. The vector stores the decoded classes. `decoded.dense_shape`: Shape vector, size `(2)`. The shape values are: `[batch_size, max_decoded_length]` neg_sum_logits: A `float` matrix `(batch_size x 1)` containing, for the sequence found, the negative of the sum of the greatest logit at each timeframe." 8577,ctc_beam_search_decoder,tensorflow/tensorflow/python/ops/ctc_ops.py,340,function,"Performs beam search decoding on the logits given in input. **Note** The `ctc_greedy_decoder` is a special case of the `ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but that decoder is faster for this special case). If `merge_repeated` is `True`, merge repeated classes in the output beams. This means that if consecutive entries in a beam are the same, only the first of these is emitted. That is, when the sequence is `A B B * B * B` (where '*' is the blank label), the return value is: * `A B` if `merge_repeated = True`. * `A B B B` if `merge_repeated = False`. Args: inputs: 3-D `float` `Tensor`, size `[max_time x batch_size x num_classes]`. The logits. sequence_length: 1-D `int32` vector containing sequence lengths, having size `[batch_size]`. beam_width: An int scalar >= 0 (beam search beam width). top_paths: An int scalar >= 0, <= beam_width (controls output size). merge_repeated: Boolean. Default: True. Returns: A tuple `(decoded, log_probabilities)` where decoded: A list of length top_paths, where `decoded[j]` is a `SparseTensor` containing the decoded outputs: `decoded[j].indices`: Indices matrix `(total_decoded_outputs[j] x 2)` The rows store: [batch, time]. `decoded[j].values`: Values vector, size `(total_decoded_outputs[j])`. The vector stores the decoded classes for beam j. `decoded[j].dense_shape`: Shape vector, size `(2)`. The shape values are: `[batch_size, max_decoded_length[j]]`. log_probability: A `float` matrix `(batch_size x top_paths)` containing sequence log-probabilities." 8578,ctc_beam_search_decoder_v2,tensorflow/tensorflow/python/ops/ctc_ops.py,403,function,"Performs beam search decoding on the logits given in input. **Note** The `ctc_greedy_decoder` is a special case of the `ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but that decoder is faster for this special case). Args: inputs: 3-D `float` `Tensor`, size `[max_time, batch_size, num_classes]`. The logits. sequence_length: 1-D `int32` vector containing sequence lengths, having size `[batch_size]`. beam_width: An int scalar >= 0 (beam search beam width). top_paths: An int scalar >= 0, <= beam_width (controls output size). Returns: A tuple `(decoded, log_probabilities)` where decoded: A list of length top_paths, where `decoded[j]` is a `SparseTensor` containing the decoded outputs: `decoded[j].indices`: Indices matrix `[total_decoded_outputs[j], 2]`; The rows store: `[batch, time]`. `decoded[j].values`: Values vector, size `[total_decoded_outputs[j]]`. The vector stores the decoded classes for beam `j`. `decoded[j].dense_shape`: Shape vector, size `(2)`. The shape values are: `[batch_size, max_decoded_length[j]]`. log_probability: A `float` matrix `[batch_size, top_paths]` containing sequence log-probabilities." 8579,_ctc_state_trans,tensorflow/tensorflow/python/ops/ctc_ops.py,454,function,"Compute CTC alignment model transition matrix. Args: label_seq: tensor of shape [batch_size, max_seq_length] Returns: tensor of shape [batch_size, states, states] with a state transition matrix computed for each sequence of the batch." 8580,ctc_state_log_probs,tensorflow/tensorflow/python/ops/ctc_ops.py,510,function,"Computes CTC alignment initial and final state log probabilities. Create the initial/final state values directly as log values to avoid having to take a float64 log on tpu (which does not exist). Args: seq_lengths: int tensor of shape [batch_size], seq lengths in the batch. max_seq_length: int, max sequence length possible. Returns: initial_state_log_probs, final_state_log_probs" 8581,_ilabel_to_state,tensorflow/tensorflow/python/ops/ctc_ops.py,550,function,Project ilabel log probs to state log probs. 8582,_state_to_olabel,tensorflow/tensorflow/python/ops/ctc_ops.py,566,function,Sum state log probs to ilabel log probs. 8583,_state_to_olabel_unique,tensorflow/tensorflow/python/ops/ctc_ops.py,585,function,Sum state log probs to ilabel log probs using unique label indices. 8584,ctc_loss_and_grad,tensorflow/tensorflow/python/ops/ctc_ops.py,629,function,"Computes the CTC loss and gradients. Most users will want fwd_bwd.ctc_loss This function returns the computed gradient, it does not have a gradient of its own defined. Args: logits: tensor of shape [frames, batch_size, num_labels] labels: tensor of shape [batch_size, max_label_seq_length] label_length: tensor of shape [batch_size] Length of reference label sequence in labels. logit_length: tensor of shape [batch_size] Length of input sequence in logits. unique: (optional) unique label indices as computed by unique(labels) If supplied, enables an implementation that is faster and more memory efficient on TPU. Returns: loss: tensor of shape [batch_size] gradient: tensor of shape [frames, batch_size, num_labels]" 8585,_ctc_loss_grad,tensorflow/tensorflow/python/ops/ctc_ops.py,690,function, 8586,_ctc_loss_op_standard,tensorflow/tensorflow/python/ops/ctc_ops.py,697,function, 8587,_ctc_loss_op_cudnn,tensorflow/tensorflow/python/ops/ctc_ops.py,715,function, 8588,_ctc_loss_shape,tensorflow/tensorflow/python/ops/ctc_ops.py,733,function, 8589,ctc_loss_v2,tensorflow/tensorflow/python/ops/ctc_ops.py,740,function,"Computes CTC (Connectionist Temporal Classification) loss. This op implements the CTC loss as presented in (Graves et al., 2006). Notes: - Same as the ""Classic CTC"" in TensorFlow 1.x's tf.compat.v1.nn.ctc_loss setting of preprocess_collapse_repeated=False, ctc_merge_repeated=True - Labels may be supplied as either a dense, zero-padded tensor with a vector of label sequence lengths OR as a SparseTensor. - On TPU and GPU: Only dense padded labels are supported. - On CPU: Caller may use SparseTensor or dense padded labels but calling with a SparseTensor will be significantly faster. - Default blank label is 0 rather num_classes - 1, unless overridden by blank_index. Args: labels: tensor of shape [batch_size, max_label_seq_length] or SparseTensor logits: tensor of shape [frames, batch_size, num_labels], if logits_time_major == False, shape is [batch_size, frames, num_labels]. label_length: tensor of shape [batch_size], None if labels is SparseTensor Length of reference label sequence in labels. logit_length: tensor of shape [batch_size] Length of input sequence in logits. logits_time_major: (optional) If True (default), logits is shaped [time, batch, logits]. If False, shape is [batch, time, logits] unique: (optional) Unique label indices as computed by ctc_unique_labels(labels). If supplied, enable a faster, memory efficient implementation on TPU. blank_index: (optional) Set the class index to use for the blank label. Negative values will start from num_classes, ie, -1 will reproduce the ctc_loss behavior of using num_classes - 1 for the blank symbol. There is some memory/performance overhead to switching from the default of 0 as an additional shifted copy of the logits may be created. name: A name for this `Op`. Defaults to ""ctc_loss_dense"". Returns: loss: tensor of shape [batch_size], negative log probabilities. References: Connectionist Temporal Classification - Labeling Unsegmented Sequence Data with Recurrent Neural Networks: [Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891) ([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))" 8590,ctc_loss_v3,tensorflow/tensorflow/python/ops/ctc_ops.py,835,function,"Computes CTC (Connectionist Temporal Classification) loss. This op implements the CTC loss as presented in (Graves et al., 2016). Notes: - Same as the ""Classic CTC"" in TensorFlow 1.x's tf.compat.v1.nn.ctc_loss setting of preprocess_collapse_repeated=False, ctc_merge_repeated=True - Labels may be supplied as either a dense, zero-padded tensor with a vector of label sequence lengths OR as a SparseTensor. - On TPU and GPU: Only dense padded labels are supported. - On CPU: Caller may use SparseTensor or dense padded labels but calling with a SparseTensor will be significantly faster. - Default blank label is 0 rather num_classes - 1, unless overridden by blank_index. Args: labels: tensor of shape [batch_size, max_label_seq_length] or SparseTensor logits: tensor of shape [frames, batch_size, num_labels], if logits_time_major == False, shape is [batch_size, frames, num_labels]. label_length: tensor of shape [batch_size], None if labels is SparseTensor Length of reference label sequence in labels. logit_length: tensor of shape [batch_size] Length of input sequence in logits. logits_time_major: (optional) If True (default), logits is shaped [time, batch, logits]. If False, shape is [batch, time, logits] unique: (optional) Unique label indices as computed by ctc_unique_labels(labels). If supplied, enable a faster, memory efficient implementation on TPU. blank_index: (optional) Set the class index to use for the blank label. Negative values will start from num_classes, ie, -1 will reproduce the ctc_loss behavior of using num_classes - 1 for the blank symbol. There is some memory/performance overhead to switching from the default of 0 as an additional shifted copy of the logits may be created. name: A name for this `Op`. Defaults to ""ctc_loss_dense"". Returns: loss: tensor of shape [batch_size], negative log probabilities. References: Connectionist Temporal Classification - Labeling Unsegmented Sequence Data with Recurrent Neural Networks: [Graves et al., 2016](https://dl.acm.org/citation.cfm?id=1143891) ([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))" 8591,ctc_loss_dense,tensorflow/tensorflow/python/ops/ctc_ops.py,939,function,"Computes CTC (Connectionist Temporal Classification) loss. This op implements the CTC loss as presented in (Graves et al., 2006), using the batched forward backward algorithm described in (Sim et al., 2017). Notes: Significant differences from tf.compat.v1.nn.ctc_loss: Supports GPU and TPU (tf.compat.v1.nn.ctc_loss supports CPU only): For batched operations, GPU and TPU are significantly faster than using ctc_loss on CPU. This implementation runs on CPU, but significantly slower than ctc_loss. Blank label is 0 rather num_classes - 1, unless overridden by blank_index. Logits and labels are dense arrays with padding rather than SparseTensor. The only mode supported is the same as: preprocess_collapse_repeated=False, ctc_merge_repeated=True To collapse labels, the caller can preprocess label sequence first. The dense implementation supports both CPU, GPU and TPU. A fast path is provided that significantly improves memory use for large vocabulary if the caller preprocesses label sequences to get unique label indices on the CPU (eg. in the data input pipeline) using ctc_ops.unique and simplifies this in the optional ""unique"" kwarg. This is especially useful for TPU and GPU but also works with if used on CPU. Args: labels: tensor of shape [batch_size, max_label_seq_length] logits: tensor of shape [frames, batch_size, num_labels], if logits_time_major == False, shape is [batch_size, frames, num_labels]. label_length: tensor of shape [batch_size] Length of reference label sequence in labels. logit_length: tensor of shape [batch_size] Length of input sequence in logits. logits_time_major: (optional) If True (default), logits is shaped [time, batch, logits]. If False, shape is [batch, time, logits] unique: (optional) Unique label indices as computed by unique(labels). If supplied, enable a faster, memory efficient implementation on TPU. blank_index: (optional) Set the class index to use for the blank label. Negative values will start from num_classes, ie, -1 will reproduce the ctc_loss behavior of using num_classes - 1 for the blank symbol. There is some memory/performance overhead to switching from the default of 0 as an additional shifted copy of the logits may be created. name: A name for this `Op`. Defaults to ""ctc_loss_dense"". Returns: loss: tensor of shape [batch_size], negative log probabilities. References: Connectionist Temporal Classification - Labeling Unsegmented Sequence Data with Recurrent Neural Networks: [Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891) ([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf)) Improving the efficiency of forward-backward algorithm using batched computation in TensorFlow: [Sim et al., 2017](https://ieeexplore.ieee.org/document/8268944) ([pdf](http://bacchiani.net/resume/papers/ASRU2017.pdf))" 8592,collapse_repeated,tensorflow/tensorflow/python/ops/ctc_ops.py,1067,function,"Merge repeated labels into single labels. Args: labels: Tensor of shape [batch, max value in seq_length] seq_length: Tensor of shape [batch], sequence length of each batch element. name: A name for this `Op`. Defaults to ""collapse_repeated_labels"". Returns: A tuple `(collapsed_labels, new_seq_length)` where collapsed_labels: Tensor of shape [batch, max_seq_length] with repeated labels collapsed and padded to max_seq_length, eg: `[[A, A, B, B, A], [A, B, C, D, E]] => [[A, B, A, 0, 0], [A, B, C, D, E]]` new_seq_length: int tensor of shape [batch] with new sequence lengths." 8593,dense_labels_to_sparse,tensorflow/tensorflow/python/ops/ctc_ops.py,1129,function,"Convert dense labels with sequence lengths to sparse tensor. Args: dense: tensor of shape [batch, max_length] length: int tensor of shape [batch] The length of each sequence in dense. Returns: tf.sparse.SparseTensor with values only for the valid elements of sequences." 8594,ctc_unique_labels,tensorflow/tensorflow/python/ops/ctc_ops.py,1165,function,"Get unique labels and indices for batched labels for `tf.nn.ctc_loss`. For use with `tf.nn.ctc_loss` optional argument `unique`: This op can be used to preprocess labels in input pipeline to for better speed/memory use computing the ctc loss on TPU. Example: ctc_unique_labels([[3, 4, 4, 3]]) -> unique labels padded with 0: [[3, 4, 0, 0]] indices of original labels in unique: [0, 1, 1, 0] Args: labels: tensor of shape [batch_size, max_label_length] padded with 0. name: A name for this `Op`. Defaults to ""ctc_unique_labels"". Returns: tuple of - unique labels, tensor of shape `[batch_size, max_label_length]` - indices into unique labels, shape `[batch_size, max_label_length]`" 8595,_sum_states,tensorflow/tensorflow/python/ops/ctc_ops.py,1199,function,"Take logsumexp for each unique state out of all label states. Args: idx: tensor of shape [batch, label_length] For each sequence, indices into a set of unique labels as computed by calling unique. states: tensor of shape [frames, batch, label_length] Log probabilities for each label state. Returns: tensor of shape [frames, batch_size, label_length], log probabilites summed for each unique label of the sequence." 8596,_forward_backward_log,tensorflow/tensorflow/python/ops/ctc_ops.py,1226,function,"Forward-backward algorithm computed in log domain. Args: state_trans_log_probs: tensor of shape [states, states] or if different transition matrix per batch [batch_size, states, states] initial_state_log_probs: tensor of shape [batch_size, states] final_state_log_probs: tensor of shape [batch_size, states] observed_log_probs: tensor of shape [frames, batch_size, states] sequence_length: tensor of shape [batch_size] Returns: forward backward log probabilites: tensor of shape [frames, batch, states] log_likelihood: tensor of shape [batch_size] Raises: ValueError: If state_trans_log_probs has unknown or incorrect rank." 8597,_scan,tensorflow/tensorflow/python/ops/ctc_ops.py,1316,function,"Repeatedly applies callable `fn` to a sequence of elements. Implemented by functional_ops.While, tpu friendly, no gradient. This is similar to functional_ops.scan but significantly faster on tpu/gpu for the forward backward use case. Examples: scan(lambda a, e: a + e, [1.0, 2.0, 3.0], 1.0) => [2.0, 4.0, 7.0] Multiple accumulators: scan(lambda a, e: (a[0] + e, a[1] * e), [1.0, 2.0, 3.0], (0.0, 1.0)) Multiple inputs: scan(lambda a, e: a + (e[0] * e[1]), (elems1, elems2), 0.0) Args: fn: callable, fn(accumulators, element) return new accumulator values. The (possibly nested) sequence of accumulators is the same as `initial` and the return value must have the same structure. elems: A (possibly nested) tensor which will be unpacked along the first dimension. The resulting slices will be the second argument to fn. The first dimension of all nested input tensors must be the same. initial: A tensor or (possibly nested) sequence of tensors with initial values for the accumulators. reverse: (optional) True enables scan and output elems in reverse order. inclusive: (optional) True includes the initial accumulator values in the output. Length of output will be len(elem sequence) + 1. Not meaningful if final_only is True. final_only: (optional) When True, return only the final accumulated values, not the concatenation of accumulated values for each input. Returns: A (possibly nested) sequence of tensors with the results of applying fn to tensors unpacked from elems and previous accumulator values." 8598,_get_dim,tensorflow/tensorflow/python/ops/ctc_ops.py,1429,function,Get value of tensor shape[i] preferring static value if available. 8599,_cudnn_rnn_backward,tensorflow/tensorflow/python/ops/cudnn_rnn_grad.py,25,function,Gradients for the CudnnRNN op. 8600,_cudnn_rnn_backward_v2,tensorflow/tensorflow/python/ops/cudnn_rnn_grad.py,51,function, 8601,_cudnn_rnn_backwardv3,tensorflow/tensorflow/python/ops/cudnn_rnn_grad.py,77,function,Gradients for the CudnnRNNV3 op. 8602,copy_handle_data,tensorflow/tensorflow/python/ops/custom_gradient.py,45,function,"Copies HandleData for variant and resource type tensors if available. The CppShapeInferenceResult::HandleData proto contains information about the shapes and types of the element tensors of resource/variant type tensors. We need to copy this across function boundaries, i.e., when capturing a placeholder or when returning a function tensor as output. If we don't do this the element tensors will have unknown shapes, e.g., if a TensorList variant tensor is captured as a placeholder, elements popped from that list would have unknown shape. Args: source_t: The tensor to copy HandleData from. target_t: The tensor to copy HandleData to." 8603,custom_gradient,tensorflow/tensorflow/python/ops/custom_gradient.py,89,function,"Decorator to define a function with a custom gradient. This decorator allows fine grained control over the gradients of a sequence for operations. This may be useful for multiple reasons, including providing a more efficient or numerically stable gradient for a sequence of operations. For example, consider the following function that commonly occurs in the computation of cross entropy and log likelihoods: ```python def log1pexp(x): return tf.math.log(1 + tf.exp(x)) ``` Due to numerical instability, the gradient of this function evaluated at x=100 is NaN. For example: ```python x = tf.constant(100.) y = log1pexp(x) dy = tf.gradients(y, x) # Will be NaN when evaluated. ``` The gradient expression can be analytically simplified to provide numerical stability: ```python @tf.custom_gradient def log1pexp(x): e = tf.exp(x) def grad(dy): return dy * (1 - 1 / (1 + e)) return tf.math.log(1 + e), grad ``` With this definition, the gradient at x=100 will be correctly evaluated as 1.0. Nesting custom gradients can lead to unintuitive results. The default behavior does not correspond to n-th order derivatives. For example ```python @tf.custom_gradient def op(x): y = op1(x) @tf.custom_gradient def grad_fn(dy): gdy = op2(x, y, dy) def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x. return op3(x, y, dy, ddy) return gdy, grad_grad_fn return y, grad_fn ``` The function `grad_grad_fn` will be calculating the first order gradient of `grad_fn` with respect to `dy`, which is used to generate forward-mode gradient graphs from backward-mode gradient graphs, but is not the same as the second order gradient of `op` with respect to `x`. Instead, wrap nested `@tf.custom_gradients` in another function: ```python @tf.custom_gradient def op_with_fused_backprop(x): y, x_grad = fused_op(x) def first_order_gradient(dy): @tf.custom_gradient def first_order_custom(unused_x): def second_order_and_transpose(ddy): return second_order_for_x(...), gradient_wrt_dy(...) return x_grad, second_order_and_transpose return dy * first_order_custom(x) return y, first_order_gradient ``` Additional arguments to the inner `@tf.custom_gradient`-decorated function control the expected return values of the innermost function. See also `tf.RegisterGradient` which registers a gradient function for a primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows for fine grained control over the gradient computation of a sequence of operations. Note that if the decorated function uses `Variable`s, the enclosing variable scope must be using `ResourceVariable`s. Args: f: function `f(*x)` that returns a tuple `(y, grad_fn)` where: - `x` is a sequence of (nested structures of) `Tensor` inputs to the function. - `y` is a (nested structure of) `Tensor` outputs of applying TensorFlow operations in `f` to `x`. - `grad_fn` is a function with the signature `g(*grad_ys)` which returns a list of `Tensor`s the same size as (flattened) `x` - the derivatives of `Tensor`s in `y` with respect to the `Tensor`s in `x`. `grad_ys` is a sequence of `Tensor`s the same size as (flattened) `y` holding the initial value gradients for each `Tensor` in `y`. In a pure mathematical sense, a vector-argument vector-valued function `f`'s derivatives should be its Jacobian matrix `J`. Here we are expressing the Jacobian `J` as a function `grad_fn` which defines how `J` will transform a vector `grad_ys` when left-multiplied with it (`grad_ys * J`, the vector-Jacobian product, or VJP). This functional representation of a matrix is convenient to use for chain-rule calculation (in e.g. the back-propagation algorithm). If `f` uses `Variable`s (that are not part of the inputs), i.e. through `get_variable`, then `grad_fn` should have signature `g(*grad_ys, variables=None)`, where `variables` is a list of the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where `grad_xs` is the same as above, and `grad_vars` is a `list` with the derivatives of `Tensor`s in `y` with respect to the variables (that is, grad_vars has one Tensor per variable in variables). Returns: A function `h(x)` which returns the same value as `f(x)[0]` and whose gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`." 8604,Bind,tensorflow/tensorflow/python/ops/custom_gradient.py,225,class,"When called evaluates `d(f, args, kwargs)` but supports binding `f`. >>> @Bind.decorator ... def my_decorator(f, args, kwargs): ... print(""my_decorator called with"", args, kwargs) ... return f(*args, **kwargs) >>> class Foo(object): ... @my_decorator ... def bar(self, a, b, c): ... return a * b * c >>> Foo.bar(None, 1, 2, c=3) my_decorator called with (None, 1, 2) {'c': 3} 6 >>> foo = Foo() >>> foo.bar(1, 2, c=3) my_decorator called with (1, 2) {'c': 3} 6" 8605,get_variable_by_name,tensorflow/tensorflow/python/ops/custom_gradient.py,267,function,"Given a variable name, retrieves a handle on the tensorflow Variable." 8606,_get_dependent_variables,tensorflow/tensorflow/python/ops/custom_gradient.py,290,function,"Finds variables involved in the subgraph between input_ops and output_ops. Args: input_ops: Flattened list of input ops output_ops: Flattened list of output ops Returns: A list of variables" 8607,_graph_mode_decorator,tensorflow/tensorflow/python/ops/custom_gradient.py,315,function,Implement custom gradient decorator for graph mode. 8608,_eager_mode_decorator,tensorflow/tensorflow/python/ops/custom_gradient.py,438,function,Implement custom gradient decorator for eager mode. 8609,recompute_grad,tensorflow/tensorflow/python/ops/custom_gradient.py,492,function,"An eager-compatible version of recompute_grad. For f(*args, **kwargs), this supports gradients with respect to args or kwargs, but kwargs are currently only supported in eager-mode. Note that for keras layer and model objects, this is handled automatically. Warning: If `f` was originally a tf.keras Model or Layer object, `g` will not be able to access the member variables of that object, because `g` returns through the wrapper function `inner`. When recomputing gradients through objects that inherit from keras, we suggest keeping a reference to the underlying object around for the purpose of accessing these variables. Args: f: function `f(*x)` that returns a `Tensor` or sequence of `Tensor` outputs. Returns: A function `g` that wraps `f`, but which recomputes `f` on the backwards pass of a gradient call." 8610,grad_pass_through,tensorflow/tensorflow/python/ops/custom_gradient.py,565,function,"Creates a grad-pass-through op with the forward behavior provided in f. Use this function to wrap any op, maintaining its behavior in the forward pass, but replacing the original op in the backward graph with an identity. For example: ```python x = tf.Variable(1.0, name=""x"") z = tf.Variable(3.0, name=""z"") with tf.GradientTape() as tape: # y will evaluate to 9.0 y = tf.grad_pass_through(x.assign)(z**2) # grads will evaluate to 6.0 grads = tape.gradient(y, z) ``` Another example is a 'differentiable' moving average approximation, where gradients are allowed to flow into the last value fed to the moving average, but the moving average is still used for the forward pass: ```python x = ... # Some scalar value # A moving average object, we don't need to know how this is implemented moving_average = MovingAverage() with backprop.GradientTape() as tape: # mavg_x will evaluate to the current running average value mavg_x = tf.grad_pass_through(moving_average)(x) grads = tape.gradient(mavg_x, x) # grads will evaluate to 1.0 ``` Args: f: function `f(*x)` that returns a `Tensor` or nested structure of `Tensor` outputs. Returns: A function `h(x)` which returns the same values as `f(x)` and whose gradients are the same as those of an identity function." 8611,_DynamicPartitionGrads,tensorflow/tensorflow/python/ops/data_flow_grad.py,31,function,Gradients for DynamicPartition. 8612,_DynamicStitchGrads,tensorflow/tensorflow/python/ops/data_flow_grad.py,50,function,Gradients for DynamicStitch and ParallelDynamicStitch. 8613,_as_type_list,tensorflow/tensorflow/python/ops/data_flow_ops.py,48,function,Convert dtypes to a list of types. 8614,_as_shape_list,tensorflow/tensorflow/python/ops/data_flow_ops.py,59,function,Convert shapes to a list of tuples of int (or None). 8615,_as_name_list,tensorflow/tensorflow/python/ops/data_flow_ops.py,91,function, 8616,_shape_common,tensorflow/tensorflow/python/ops/data_flow_ops.py,102,function,The greatest lower bound (ordered by specificity) TensorShape. 8617,QueueBase,tensorflow/tensorflow/python/ops/data_flow_ops.py,119,class,"Base class for queue implementations. A queue is a TensorFlow data structure that stores tensors across multiple steps, and exposes operations that enqueue and dequeue tensors. Each queue element is a tuple of one or more tensors, where each tuple component has a static dtype, and may have a static shape. The queue implementations support versions of enqueue and dequeue that handle single elements, versions that support enqueuing and dequeuing a batch of elements at once. See `tf.queue.FIFOQueue` and `tf.queue.RandomShuffleQueue` for concrete implementations of this class, and instructions on how to create them." 8618,_shared_name,tensorflow/tensorflow/python/ops/data_flow_ops.py,613,function, 8619,RandomShuffleQueue,tensorflow/tensorflow/python/ops/data_flow_ops.py,625,class,"A queue implementation that dequeues elements in a random order. See `tf.queue.QueueBase` for a description of the methods on this class." 8620,FIFOQueue,tensorflow/tensorflow/python/ops/data_flow_ops.py,711,class,"A queue implementation that dequeues elements in first-in first-out order. See `tf.queue.QueueBase` for a description of the methods on this class." 8621,GPUCompatibleFIFOQueue,tensorflow/tensorflow/python/ops/data_flow_ops.py,771,class,"A queue implementation that dequeues elements in first-in first-out order. GPUCompatibleFIFOQueue is like FIFOQueue, but the queue resource may be placed either on a CPU or on a GPU. It is not cross-device: enqueues and dequeues will be colocated with the queue resource. GPUCompatibleFIFOQueue only supports enqueue and dequeue at the moment, not enqueue_many or dequeue_many. See `tf.queue.QueueBase` for a description of the methods on this class." 8622,PaddingFIFOQueue,tensorflow/tensorflow/python/ops/data_flow_ops.py,849,class,"A FIFOQueue that supports batching variable-sized tensors by padding. A `PaddingFIFOQueue` may contain components with dynamic shape, while also supporting `dequeue_many`. See the constructor for more details. See `tf.queue.QueueBase` for a description of the methods on this class." 8623,PriorityQueue,tensorflow/tensorflow/python/ops/data_flow_ops.py,925,class,"A queue implementation that dequeues elements in prioritized order. See `tf.queue.QueueBase` for a description of the methods on this class." 8624,Barrier,tensorflow/tensorflow/python/ops/data_flow_ops.py,994,class,Represents a key-value map that persists across graph executions. 8625,ConditionalAccumulatorBase,tensorflow/tensorflow/python/ops/data_flow_ops.py,1240,class,"A conditional accumulator for aggregating gradients. Up-to-date gradients (i.e., time step at which gradient was computed is equal to the accumulator's time step) are added to the accumulator. Extraction of the average gradient is blocked until the required number of gradients has been accumulated." 8626,ConditionalAccumulator,tensorflow/tensorflow/python/ops/data_flow_ops.py,1320,class,"A conditional accumulator for aggregating gradients. Up-to-date gradients (i.e., time step at which gradient was computed is equal to the accumulator's time step) are added to the accumulator. Extraction of the average gradient is blocked until the required number of gradients has been accumulated." 8627,SparseConditionalAccumulator,tensorflow/tensorflow/python/ops/data_flow_ops.py,1412,class,"A conditional accumulator for aggregating sparse gradients. Sparse gradients are represented by `IndexedSlices`. Up-to-date gradients (i.e., time step at which gradient was computed is equal to the accumulator's time step) are added to the accumulator. Extraction of the average gradient is blocked until the required number of gradients has been accumulated. Args: dtype: Datatype of the accumulated gradients. shape: Shape of the accumulated gradients. shared_name: Optional. If non-empty, this accumulator will be shared under the given name across multiple sessions. name: Optional name for the accumulator. reduction_type: Reduction type to use when taking the gradient." 8628,BaseStagingArea,tensorflow/tensorflow/python/ops/data_flow_ops.py,1606,class,Base class for Staging Areas. 8629,StagingArea,tensorflow/tensorflow/python/ops/data_flow_ops.py,1819,class,"Class for staging inputs. No ordering guarantees. A `StagingArea` is a TensorFlow data structure that stores tensors across multiple steps, and exposes operations that can put and get tensors. Each `StagingArea` element is a tuple of one or more tensors, where each tuple component has a static dtype, and may have a static shape. The capacity of a `StagingArea` may be bounded or unbounded. It supports multiple concurrent producers and consumers; and provides exactly-once delivery. Each element of a `StagingArea` is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a staging area element must have the respective fixed shape. If it is unspecified, different elements may have different shapes, It can be configured with a capacity in which case put(values) will block until space becomes available. Similarly, it can be configured with a memory limit which will block put(values) until space is available. This is mostly useful for limiting the number of tensors on devices such as GPUs. All get() and peek() commands block if the requested data is not present in the Staging Area." 8630,MapStagingArea,tensorflow/tensorflow/python/ops/data_flow_ops.py,2045,class,"A `MapStagingArea` is a TensorFlow data structure that stores tensors across multiple steps, and exposes operations that can put and get tensors. Each `MapStagingArea` element is a (key, value) pair. Only int64 keys are supported, other types should be hashed to produce a key. Values are a tuple of one or more tensors. Each tuple component has a static dtype, and may have a static shape. The capacity of a `MapStagingArea` may be bounded or unbounded. It supports multiple concurrent producers and consumers; and provides exactly-once delivery. Each value tuple of a `MapStagingArea` is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a staging area element must have the respective fixed shape. If it is unspecified, different elements may have different shapes, It behaves like an associative container with support for: - put(key, values) - peek(key) like dict.get(key) - get(key) like dict.pop(key) - get(key=None) like dict.popitem() - size() - clear() If ordered a tree structure ordered by key will be used and get(key=None) will remove (key, value) pairs in increasing key order. Otherwise a hashtable It can be configured with a capacity in which case put(key, values) will block until space becomes available. Similarly, it can be configured with a memory limit which will block put(key, values) until space is available. This is mostly useful for limiting the number of tensors on devices such as GPUs. All get() and peek() commands block if the requested (key, value) pair is not present in the staging area. Partial puts are supported and will be placed in an incomplete map until such time as all values associated with the key have been inserted. Once completed, this (key, value) pair will be inserted into the map. Data in the incomplete map counts towards the memory limit, but not towards capacity limit. Partial gets from the map are also supported. This removes the partially requested tensors from the entry, but the entry is only removed from the map once all tensors associated with it are removed." 8631,RecordInput,tensorflow/tensorflow/python/ops/data_flow_ops.py,2430,class,"RecordInput asynchronously reads and randomly yields TFRecords. A RecordInput Op will continuously read a batch of records asynchronously into a buffer of some fixed capacity. It can also asynchronously yield random records from this buffer. It will not start yielding until at least `buffer_size / 2` elements have been placed into the buffer so that sufficient randomization can take place. The order the files are read will be shifted each epoch by `shift_amount` so that the data is presented in a different order every epoch." 8632,get_zeros_dtype,tensorflow/tensorflow/python/ops/default_gradient.py,26,function,Return the dtype for the default gradient for a Tensor. 8633,shape_and_dtype,tensorflow/tensorflow/python/ops/default_gradient.py,38,function,Return the shape and dtype for the default gradient for a Tensor. 8634,zeros_like,tensorflow/tensorflow/python/ops/default_gradient.py,52,function,"Like array_ops.zeros_like, but respects resource handles." 8635,ones_like,tensorflow/tensorflow/python/ops/default_gradient.py,60,function,"Like array_ops.ones_like, but respects resource handles." 8636,supports_default_grad,tensorflow/tensorflow/python/ops/default_gradient.py,68,function,"Whether tensor `t` supports creating a default gradient. This function assumes that `t` is of a trainable type. Args: t: Tensor Returns: Bool" 8637,DequantizeOpTest,tensorflow/tensorflow/python/ops/dequantize_op_test.py,29,class, 8638,_clip,tensorflow/tensorflow/python/ops/embedding_ops.py,43,function,"Helper function for _embedding_lookup_and_transform. This function optionally clips embeddings to an l2-norm of max_norm. Args: params: A `Tensor` of embeddings retrieved by `gather`. ids: The `ids` argument that was passed to `gather`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value. Returns: A `Tensor` with the same type as `params`." 8639,_embedding_lookup_and_transform,tensorflow/tensorflow/python/ops/embedding_ops.py,86,function,"Helper function for embedding_lookup and _compute_sampled_logits. This function is a generalization of embedding_lookup that optionally applies a caller-specified transformation to each embedding. This is done through the `transform_fn` argument. If provided, the function is applied to each partitioned tensor of retrieved embeddings, colocated with the embeddings. This function will be called with a single `Tensor` argument of the same type as the `params` tensor and should return a `Tensor`. The shape of the argument will be the same as `params` except for the size of the first dimension. The first dimension of the result's shape must be the same size as the argument's. Args: params: See embedding_lookup. ids: See embedding_lookup. partition_strategy: See embedding_lookup. name: See embedding_lookup. max_norm: See embedding_lookup. transform_fn: An optional function to apply to each retrieved embedding. If max_norm is provided, transform_fn is applied to the norm-limited embeddings. Returns: See embedding_lookup for details. Raises: ValueError: If `params` is empty." 8640,embedding_lookup,tensorflow/tensorflow/python/ops/embedding_ops.py,255,function,"Looks up embeddings for the given `ids` from a list of tensors. This function is used to perform parallel lookups on the list of tensors in `params`. It is a generalization of `tf.gather`, where `params` is interpreted as a partitioning of a large embedding tensor. `params` may be a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` with a partitioner. If `len(params) > 1`, each element `id` of `ids` is partitioned between the elements of `params` according to the `partition_strategy`. In all strategies, if the id space does not evenly divide the number of partitions, each of the first `(max_id + 1) % len(params)` partitions will be assigned one more id. If `partition_strategy` is `""mod""`, we assign each id to partition `p = id % len(params)`. For instance, 13 ids are split across 5 partitions as: `[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]` If `partition_strategy` is `""div""`, we assign ids to partitions in a contiguous manner. In this case, 13 ids are split across 5 partitions as: `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]` If the input ids are ragged tensors, partition variables are not supported and the partition strategy and the max_norm are ignored. The results of the lookup are concatenated into a dense tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`. Args: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. ids: A `Tensor` or a 'RaggedTensor' with type `int32` or `int64` containing the ids to be looked up in `params`. partition_strategy: A string specifying the partitioning strategy, relevant if `len(params) > 1`. Currently `""div""` and `""mod""` are supported. Default is `""mod""`. name: A name for the operation (optional). validate_indices: DEPRECATED. If this operation is assigned to CPU, values in `indices` are always validated to be within range. If assigned to GPU, out-of-bound indices result in safe but unspecified behavior, which may include raising an error. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value. Returns: A `Tensor` or a 'RaggedTensor', depending on the input, with the same type as the tensors in `params`. Raises: ValueError: If `params` is empty." 8641,embedding_lookup_v2,tensorflow/tensorflow/python/ops/embedding_ops.py,333,function,"Looks up embeddings for the given `ids` from a list of tensors. This function is used to perform parallel lookups on the list of tensors in `params`. It is a generalization of `tf.gather`, where `params` is interpreted as a partitioning of a large embedding tensor. If `len(params) > 1`, each element `id` of `ids` is partitioned between the elements of `params` according to the ""div"" partition strategy, which means we assign ids to partitions in a contiguous manner. For instance, 13 ids are split across 5 partitions as: `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`. If the id space does not evenly divide the number of partitions, each of the first `(max_id + 1) % len(params)` partitions will be assigned one more id. The results of the lookup are concatenated into a dense tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`. Args: params: A single tensor representing the complete embedding tensor, or a list of tensors all of same shape except for the first dimension, representing sharded embedding tensors following ""div"" partition strategy. ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked up in `params`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value. name: A name for the operation (optional). Returns: A `Tensor` with the same type as the tensors in `params`. For instance, if `params` is a 5x2 matrix: ```python [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] ``` or a list of matrices: ```python params[0]: [[1, 2], [3, 4]] params[1]: [[5, 6], [7, 8]] params[2]: [[9, 10]] ``` and `ids` is: ```python [0, 3, 4] ``` The output will be a 3x2 matrix: ```python [[1, 2], [7, 8], [9, 10]] ``` Raises: ValueError: If `params` is empty." 8642,embedding_lookup_sparse,tensorflow/tensorflow/python/ops/embedding_ops.py,399,function,"Looks up embeddings for the given ids and weights from a list of tensors. This op assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. `sp_ids` and `sp_weights` (if not None) are `SparseTensor`s with rank of 2. Embeddings are always aggregated along the last dimension. It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. Args: params: A single tensor representing the complete embedding tensor, or a list tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size and M is arbitrary. sp_weights: either a `SparseTensor` of float / double weights, or `None` to indicate all weights should be taken to be 1. If specified, `sp_weights` must have exactly the same shape and indices as `sp_ids`. partition_strategy: A string specifying the partitioning strategy, relevant if `len(params) > 1`. Currently `""div""` and `""mod""` are supported. Default is `""mod""`. See `tf.nn.embedding_lookup` for more details. name: Optional name for the op. combiner: A string specifying the reduction op. Currently ""mean"", ""sqrtn"" and ""sum"" are supported. ""sum"" computes the weighted sum of the embedding results for each row. ""mean"" is the weighted sum divided by the total weight. ""sqrtn"" is the weighted sum divided by the square root of the sum of the squares of the weights. Defaults to `mean`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. Returns: A dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sp_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. In other words, if `shape(combined params) = [p0, p1, ..., pm]` and `shape(sp_ids) = shape(sp_weights) = [d0, d1]` then `shape(output) = [d0, p1, ..., pm]`. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are ```python [0, 0]: id 1, weight 2.0 [0, 1]: id 3, weight 0.5 [1, 0]: id 0, weight 1.0 [2, 3]: id 1, weight 3.0 ``` with `combiner`=""mean"", then the output will be a 3x20 matrix where ```python output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) output[1, :] = (params[0, :] * 1.0) / 1.0 output[2, :] = (params[1, :] * 3.0) / 3.0 ``` Raises: TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is neither `None` nor `SparseTensor`. ValueError: If `combiner` is not one of {""mean"", ""sqrtn"", ""sum""}." 8643,embedding_lookup_sparse_v2,tensorflow/tensorflow/python/ops/embedding_ops.py,582,function,"Looks up embeddings for the given ids and weights from a list of tensors. This op assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. `sp_ids` and `sp_weights` (if not None) are `SparseTensor`s with rank of 2. Embeddings are always aggregated along the last dimension. It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. If `len(params) > 1`, each element of `sp_ids` is partitioned between the elements of `params` according to the ""div"" partition strategy, which means we assign ids to partitions in a contiguous manner. For instance, 13 ids are split across 5 partitions as: `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`. If the id space does not evenly divide the number of partitions, each of the first `(max_id + 1) % len(params)` partitions will be assigned one more id. Args: params: A single tensor representing the complete embedding tensor, or a list of tensors all of same shape except for the first dimension, representing sharded embedding tensors following ""div"" partition strategy. sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size and M is arbitrary. sp_weights: either a `SparseTensor` of float / double weights, or `None` to indicate all weights should be taken to be 1. If specified, `sp_weights` must have exactly the same shape and indices as `sp_ids`. combiner: A string specifying the reduction op. Currently ""mean"", ""sqrtn"" and ""sum"" are supported. ""sum"" computes the weighted sum of the embedding results for each row. ""mean"" is the weighted sum divided by the total weight. ""sqrtn"" is the weighted sum divided by the square root of the sum of the squares of the weights. Defaults to `mean`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. name: Optional name for the op. Returns: A dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sp_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. In other words, if `shape(combined params) = [p0, p1, ..., pm]` and `shape(sp_ids) = shape(sp_weights) = [d0, d1]` then `shape(output) = [d0, p1, ..., pm]`. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are ```python [0, 0]: id 1, weight 2.0 [0, 1]: id 3, weight 0.5 [1, 0]: id 0, weight 1.0 [2, 3]: id 1, weight 3.0 ``` with `combiner`=""mean"", then the output will be a 3x20 matrix where ```python output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) output[1, :] = (params[0, :] * 1.0) / 1.0 output[2, :] = (params[1, :] * 3.0) / 3.0 ``` Raises: TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is neither `None` nor `SparseTensor`. ValueError: If `combiner` is not one of {""mean"", ""sqrtn"", ""sum""}." 8644,safe_embedding_lookup_sparse_v2,tensorflow/tensorflow/python/ops/embedding_ops.py,673,function,"Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of num of shards. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. If `len(embedding_weights) > 1`, each element `id` of `ids` is partitioned between the elements of `embedding_weights` according to the ""div"" partition strategy, which means we assign ids to partitions in a contiguous manner. For instance, 13 ids are split across 5 partitions as: `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`. If the id space does not evenly divide the number of partitions, each of the first `(max_id + 1) % len(embedding_weights)` partitions will be assigned one more id. Args: embedding_weights: A single tensor representing the complete embedding tensor, or a list of tensors all of same shape except for the first dimension, representing sharded embedding tensors following ""div"" partition strategy. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently ""mean"", ""sqrtn"" and ""sum"" are supported, with ""mean"" the default. default_id: The id to use for an entry with no features. Defaults to 0-vector. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. name: A name for this operation (optional). Returns: A dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sparse_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. In other words, if `shape(combined embedding_weights) = [p0, p1, ..., pm]` and `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]` then `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are ```python [0, 0]: id 1, weight 2.0 [0, 1]: id 3, weight 0.5 [1, 0]: id -1, weight 1.0 [2, 3]: id 1, weight 3.0 ``` `default_id` is 0. with `combiner`=""mean"", then the output will be a 3x20 matrix where ```python output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) output[1, :] = (params[0, :] * 1.0) / 1.0 output[2, :] = (params[1, :] * 3.0) / 3.0 ``` Raises: ValueError: if `embedding_weights` is empty." 8645,safe_embedding_lookup_sparse,tensorflow/tensorflow/python/ops/embedding_ops.py,775,function,"Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of `P`. `embedding_weights` may be a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` with a partitioner. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. Args: embedding_weights: A single tensor representing the complete embedding tensor, or a list tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently ""mean"", ""sqrtn"" and ""sum"" are supported, with ""mean"" the default. default_id: The id to use for an entry with no features. name: A name for this operation (optional). partition_strategy: A string specifying the partitioning strategy. Currently `""div""` and `""mod""` are supported. Default is `""div""`. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. Returns: A dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sp_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. In other words, if `shape(combined embedding_weights) = [p0, p1, ..., pm]` and `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]` then `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are ```python [0, 0]: id 1, weight 2.0 [0, 1]: id 3, weight 0.5 [1, 0]: id -1, weight 1.0 [2, 3]: id 1, weight 3.0 ``` `default_id` is 0. with `combiner`=""mean"", then the output will be a 3x20 matrix where ```python output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) output[1, :] = (params[0, :] * 1.0) / 1.0 output[2, :] = (params[1, :] * 3.0) / 3.0 ``` Raises: ValueError: if `embedding_weights` is empty." 8646,embedding_lookup_ragged,tensorflow/tensorflow/python/ops/embedding_ops.py,943,function,"Look up the ragged ids in a list of embedding tensors. Args: embedding_weights: A tensor representing the complete embedding tensor having the shape [e1, ...eM] ragged_ids: A 'RaggedTensor' with type 'int32' or 'int64' containing the ids to be looked up in 'embedding_weights' of shape [r0, ..rN]. Values must be in the range '[0, embedding_weights.shape[0]]'. partition_strategy: A string specifying the partitioning strategy. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value. name: A name for the operation (optional) Returns: A ragged tensor of shape [r0, r1, ...rN, e1, ...eM]. Raises: ValueError: whether the embedding_weights is empty or the ragged_ids is not a RaggedTensor." 8647,_prune_invalid_ids,tensorflow/tensorflow/python/ops/embedding_ops.py,989,function,Prune invalid IDs (< 0) from the input ids and weights. 8648,_prune_invalid_weights,tensorflow/tensorflow/python/ops/embedding_ops.py,1002,function,Prune invalid weights (< 0) from the input ids and weights. 8649,foldl,tensorflow/tensorflow/python/ops/functional_ops.py,50,function,"foldl on the list of tensors unpacked from `elems` on dimension 0. This foldl operator repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems` on dimension 0. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is fn(initializer, values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Args: fn: The callable to be performed. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, as the initial value for the accumulator. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors, resulting from applying `fn` consecutively to the list of tensors unpacked from `elems`, from first to last. Raises: TypeError: if `fn` is not callable. Example: ```python elems = tf.constant([1, 2, 3, 4, 5, 6]) sum = foldl(lambda a, x: a + x, elems) # sum == 21 ```" 8650,foldl_v2,tensorflow/tensorflow/python/ops/functional_ops.py,177,function,"foldl on the list of tensors unpacked from `elems` on dimension 0. This foldl operator repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems` on dimension 0. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is fn(initializer, values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Args: fn: The callable to be performed. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, as the initial value for the accumulator. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) Deprecated. False disables support for back propagation. Prefer using `tf.stop_gradient` instead. swap_memory: (optional) True enables GPU-CPU memory swapping. name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors, resulting from applying `fn` consecutively to the list of tensors unpacked from `elems`, from first to last. Raises: TypeError: if `fn` is not callable. Example: ```python elems = tf.constant([1, 2, 3, 4, 5, 6]) sum = foldl(lambda a, x: a + x, elems) # sum == 21 ```" 8651,foldr,tensorflow/tensorflow/python/ops/functional_ops.py,245,function,"foldr on the list of tensors unpacked from `elems` on dimension 0. This foldr operator repeatedly applies the callable `fn` to a sequence of elements from last to first. The elements are made of the tensors unpacked from `elems`. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `fn(initializer, values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Args: fn: The callable to be performed. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, as the initial value for the accumulator. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors, resulting from applying `fn` consecutively to the list of tensors unpacked from `elems`, from last to first. Raises: TypeError: if `fn` is not callable. Example: ```python elems = [1, 2, 3, 4, 5, 6] sum = foldr(lambda a, x: a + x, elems) # sum == 21 ```" 8652,foldr_v2,tensorflow/tensorflow/python/ops/functional_ops.py,373,function,"foldr on the list of tensors unpacked from `elems` on dimension 0. This foldr operator repeatedly applies the callable `fn` to a sequence of elements from last to first. The elements are made of the tensors unpacked from `elems`. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `fn(initializer, values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Args: fn: The callable to be performed. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, as the initial value for the accumulator. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) Deprecated. False disables support for back propagation. Prefer using `tf.stop_gradient` instead. swap_memory: (optional) True enables GPU-CPU memory swapping. name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors, resulting from applying `fn` consecutively to the list of tensors unpacked from `elems`, from last to first. Raises: TypeError: if `fn` is not callable. Example: ```python elems = [1, 2, 3, 4, 5, 6] sum = foldr(lambda a, x: a + x, elems) # sum == 21 ```" 8653,scan,tensorflow/tensorflow/python/ops/functional_ops.py,441,function,"scan on the list of tensors unpacked from `elems` on dimension 0. See also `tf.map_fn`. The simplest version of `scan` repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems` on dimension 0. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`. If reverse=True, it's fn(initializer, values[-1]).shape. This method also allows multi-arity `elems` and accumulator. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The second argument of `fn` must match the structure of `elems`. If no `initializer` is provided, the output structure and dtypes of `fn` are assumed to be the same as its input; and in this case, the first argument of `fn` must match the structure of `elems`. If an `initializer` is provided, then the output of `fn` must have the same structure as `initializer`; and the first argument of `fn` must match this structure. For example, if `elems` is `(t1, [t2, t3])` and `initializer` is `[i1, i2]` then an appropriate signature for `fn` in `python2` is: `fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list, `[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the one that works in `python3`, is: `fn = lambda a, t:`, where `a` and `t` correspond to the input tuples. Args: fn: The callable to be performed. It accepts two arguments. The first will have the same structure as `initializer` if one is provided, otherwise it will have the same structure as `elems`. The second will have the same (possibly nested) structure as `elems`. Its output must have the same structure as `initializer` if one is provided, otherwise it must have the same structure as `elems`. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, initial value for the accumulator, and the expected output type of `fn`. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. infer_shape: (optional) False disables tests for consistent output shapes. reverse: (optional) True scans the tensor last to first (instead of first to last). name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors. Each tensor packs the results of applying `fn` to tensors unpacked from `elems` along the first dimension, and the previous accumulator value(s), from first to last (or last to first, if `reverse=True`). Raises: TypeError: if `fn` is not callable or the structure of the output of `fn` and `initializer` do not match. ValueError: if the lengths of the output of `fn` and `initializer` do not match. Examples: ```python elems = np.array([1, 2, 3, 4, 5, 6]) sum = scan(lambda a, x: a + x, elems) # sum == [1, 3, 6, 10, 15, 21] sum = scan(lambda a, x: a + x, elems, reverse=True) # sum == [21, 20, 18, 15, 11, 6] ``` ```python elems = np.array([1, 2, 3, 4, 5, 6]) initializer = np.array(0) sum_one = scan( lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer) # sum_one == [1, 2, 3, 4, 5, 6] ``` ```python elems = np.array([1, 0, 0, 0, 0, 0]) initializer = (np.array(0), np.array(1)) fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer) # fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13]) ```" 8654,scan_v2,tensorflow/tensorflow/python/ops/functional_ops.py,705,function,"scan on the list of tensors unpacked from `elems` on dimension 0. The simplest version of `scan` repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems` on dimension 0. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn, and the second is the value at the current position of `elems`. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `[len(values)] + fn(initializer, values[0]).shape`. If reverse=True, it's fn(initializer, values[-1]).shape. This method also allows multi-arity `elems` and accumulator. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The second argument of `fn` must match the structure of `elems`. If no `initializer` is provided, the output structure and dtypes of `fn` are assumed to be the same as its input; and in this case, the first argument of `fn` must match the structure of `elems`. If an `initializer` is provided, then the output of `fn` must have the same structure as `initializer`; and the first argument of `fn` must match this structure. For example, if `elems` is `(t1, [t2, t3])` and `initializer` is `[i1, i2]` then an appropriate signature for `fn` in `python2` is: `fn = lambda (acc_p1, acc_p2), (t1, [t2, t3]):` and `fn` must return a list, `[acc_n1, acc_n2]`. An alternative correct signature for `fn`, and the one that works in `python3`, is: `fn = lambda a, t:`, where `a` and `t` correspond to the input tuples. Args: fn: The callable to be performed. It accepts two arguments. The first will have the same structure as `initializer` if one is provided, otherwise it will have the same structure as `elems`. The second will have the same (possibly nested) structure as `elems`. Its output must have the same structure as `initializer` if one is provided, otherwise it must have the same structure as `elems`. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, initial value for the accumulator, and the expected output type of `fn`. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) Deprecated. False disables support for back propagation. Prefer using `tf.stop_gradient` instead. swap_memory: (optional) True enables GPU-CPU memory swapping. infer_shape: (optional) False disables tests for consistent output shapes. reverse: (optional) True scans the tensor last to first (instead of first to last). name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors. Each tensor packs the results of applying `fn` to tensors unpacked from `elems` along the first dimension, and the previous accumulator value(s), from first to last (or last to first, if `reverse=True`). Raises: TypeError: if `fn` is not callable or the structure of the output of `fn` and `initializer` do not match. ValueError: if the lengths of the output of `fn` and `initializer` do not match. Examples: ```python elems = np.array([1, 2, 3, 4, 5, 6]) sum = scan(lambda a, x: a + x, elems) # sum == [1, 3, 6, 10, 15, 21] sum = scan(lambda a, x: a + x, elems, reverse=True) # sum == [21, 20, 18, 15, 11, 6] ``` ```python elems = np.array([1, 2, 3, 4, 5, 6]) initializer = np.array(0) sum_one = scan( lambda a, x: x[0] - x[1] + a, (elems + 1, elems), initializer) # sum_one == [1, 2, 3, 4, 5, 6] ``` ```python elems = np.array([1, 0, 0, 0, 0, 0]) initializer = (np.array(0), np.array(1)) fibonaccis = scan(lambda a, _: (a[1], a[0] + a[1]), elems, initializer) # fibonaccis == ([1, 1, 2, 3, 5, 8], [1, 2, 3, 5, 8, 13]) ```" 8655,If,tensorflow/tensorflow/python/ops/functional_ops.py,819,function,"output = Cond(inputs) ? then_branch(inputs) : else_branch(inputs). Args: cond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is converted to a boolean according to the following rule: if the scalar is a numerical value, non-zero means True and zero means False; if the scalar is a string, non-empty means True and empty means False. inputs: A list of input tensors. then_branch: A function takes 'inputs' and returns a list of tensors, whose types are the same as what else_branch returns. else_branch: A function takes 'inputs' and returns a list of tensors. whose types are the same as what then_branch returns. name: A name for the operation (optional). Returns: A list of tensors returned by either then_branch(inputs) or else_branch(inputs)." 8656,Gradient,tensorflow/tensorflow/python/ops/functional_ops.py,850,function,"Computes the gradient function for function f via backpropagation. Args: inputs: A list of tensors of size N + M. f: The function we want to compute the gradient for. The function 'f' must be a numerical function which takes N inputs and produces M outputs. Its gradient function 'g', which is a function taking N + M inputs and produces N outputs. I.e. if we have (y1, y2, ..., yM) = f(x1, x2, ..., xN), then, g is (dL/dx1, dL/dx2, ..., dL/dxN) = g(x1, x2, ..., xN, dL/dy1, dL/dy2, ..., dL/dyM), where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the loss function). dL/dxi is the partial derivative of L with respect to xi. name: A name for the operation (optional). Returns: A list of tensors of size N." 8657,_GetInputDtypes,tensorflow/tensorflow/python/ops/functional_ops.py,874,function,"Returns the input dtypes of func, excluding dtypes for captured inputs." 8658,_LoopBodyCaptureWrapper,tensorflow/tensorflow/python/ops/functional_ops.py,888,function,Returns a wrapper for `func` that handles loop-carried captured inputs. 8659,While,tensorflow/tensorflow/python/ops/functional_ops.py,911,function,"output = input; While (Cond(output)) { output = Body(output) }. Args: input_: A list of `Tensor` objects. A list of input tensors whose types are T. cond: . A function takes 'input' and returns a tensor. If the tensor is a scalar of non-boolean, the scalar is converted to a boolean according to the following rule: if the scalar is a numerical value, non-zero means True and zero means False; if the scalar is a string, non-empty means True and empty means False. If the tensor is not a scalar, non-emptiness means True and False otherwise. body: . A function takes a list of tensors and returns another list tensors. Both lists have the same types as specified by T. name: A name for the operation (optional). hostmem: A list of integer. If i is in the list, input[i] is a host memory tensor. Raises: ValueError: if `cond` has implicitly captured inputs or if `cond` and `body` have different signatures. Returns: A list of `Tensor` objects. Has the same type as `input`. A list of output tensors whose types are T." 8660,_ForUsingWhile,tensorflow/tensorflow/python/ops/functional_ops.py,992,function,Helper to implement a For loop using a While. 8661,For,tensorflow/tensorflow/python/ops/functional_ops.py,1054,function,"out = input; for i in range(start, limit, delta) out = body(i, out). Args: start: A `Tensor` of type `int32`. limit: A `Tensor` of type `int32`. delta: A `Tensor` of type `int32`. inputs: A list of `Tensor` objects. A list of input tensors whose types are T. body: A function takes a list of tensors and returns another list of tensors. Both lists have the same types as (int32, T...). name: A name for the operation (optional). hostmem: A list of integer. If i is in the list, inputs[i] is a host memory tensor. In other words, (i+1)-th argument of the body function is expecting a host memory. rewrite_with_while: If True, using While op to implement the For. Returns: A list of `Tensor` objects. Has the same type as `input`. A list of output tensors whose types are T." 8662,partitioned_call,tensorflow/tensorflow/python/ops/functional_ops.py,1112,function,"Executes a function while respecting device annotations. Currently, only those functions that execute within the same address space can be executed. Args: args: The arguments of the function, including captured inputs. f: The function to execute; an instance of `_DefinedFunction` or `_EagerDefinedFunction`. tout: a list containing the output dtypes enums; if `None`, inferred from the signature of `f`. executing_eagerly: (Optional) A boolean indicating whether the context is executing eagerly. If `None`, fetched from the global context. config: (Optional) A `tensorflow::ConfigProto` proto, serialized. If `None`, all optimizations are disabled. Currently only handled for eager defined functions. executor_type: (Optional) A string for the name of the executor to be used in the function call. If not set, or set to an empty string, the default tensorflow executor will be used. Returns: The list of `Tensor`s returned by invoking `f(args)`. If the function does not return anything, then returns `None` if eager execution is enabled, or the `Operation` if not." 8663,_set_read_only_resource_inputs_attr,tensorflow/tensorflow/python/ops/functional_ops.py,1217,function,"Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. Args: op: PartitionedCall Operation. func_graph: FuncGraph." 8664,FunctionalOpsTest,tensorflow/tensorflow/python/ops/functional_ops_test.py,30,class, 8665,_product,tensorflow/tensorflow/python/ops/gradient_checker.py,38,function, 8666,_extra_feeds,tensorflow/tensorflow/python/ops/gradient_checker.py,48,function, 8667,_compute_theoretical_jacobian,tensorflow/tensorflow/python/ops/gradient_checker.py,57,function,"Computes the theoretical Jacobian for dy/dx. Computes the theoretical Jacobian using the ops generated by compute_gradient(). Args: x: the tensor ""x"". x_shape: the dimensions of x as a tuple or an array of ints. x_data: a numpy parray as the input data for x dy: the tensor ""dy"". dy_shape: the dimensions of dy as a tuple or an array of ints. dx: Tensor or IndexedSlices representing dx extra_feed_dict: dict that allows fixing specified tensor values during the jacobian calculation. Returns: A 2-d numpy array representing the Jacobian for dy/dx. It has ""x_size"" rows and ""dy_size"" columns where ""x_size"" is the number of elements in x and ""dy_size"" is the number of elements in dy. Raises: ValueError: If `dy` is empty but the gradient is nonzero." 8668,_compute_numeric_jacobian,tensorflow/tensorflow/python/ops/gradient_checker.py,135,function,"Computes the numeric Jacobian for dy/dx. Computes the numeric Jacobian by slightly perturbing the inputs and measuring the differences on the output. Args: x: the tensor ""x"". x_shape: the dimensions of x as a tuple or an array of ints. x_data: a numpy array as the input data for x y: the tensor ""y"". y_shape: the dimensions of y as a tuple or an array of ints. delta: the amount of perturbation we give to the input extra_feed_dict: dict that allows fixing specified tensor values during the jacobian calculation. Returns: A 2-d numpy array representing the Jacobian for dy/dx. It has ""x_size"" rows and ""y_size"" columns where ""x_size"" is the number of elements in x and ""y_size"" is the number of elements in y." 8669,_compute_dx_and_dy,tensorflow/tensorflow/python/ops/gradient_checker.py,196,function,Returns a node to compute gradient of y wrt x. 8670,_compute_gradient,tensorflow/tensorflow/python/ops/gradient_checker.py,211,function,Computes the theoretical and numerical jacobian. 8671,_compute_gradient_list,tensorflow/tensorflow/python/ops/gradient_checker.py,245,function,Compute gradients for a list of x values. 8672,compute_gradient,tensorflow/tensorflow/python/ops/gradient_checker.py,277,function,"Computes and returns the theoretical and numerical Jacobian. If `x` or `y` is complex, the Jacobian will still be real but the corresponding Jacobian dimension(s) will be twice as large. This is required even if both input and output is complex since TensorFlow graphs are not necessarily holomorphic, and may have gradients not expressible as complex numbers. For example, if `x` is complex with shape `[m]` and `y` is complex with shape `[n]`, each Jacobian `J` will have shape `[m * 2, n * 2]` with J[:m, :n] = d(Re y)/d(Re x) J[:m, n:] = d(Im y)/d(Re x) J[m:, :n] = d(Re y)/d(Im x) J[m:, n:] = d(Im y)/d(Im x) Args: x: a tensor or list of tensors x_shape: the dimensions of x as a tuple or an array of ints. If x is a list, then this is the list of shapes. y: a tensor y_shape: the dimensions of y as a tuple or an array of ints. x_init_value: (optional) a numpy array of the same shape as ""x"" representing the initial value of x. If x is a list, this should be a list of numpy arrays. If this is none, the function will pick a random tensor as the initial value. delta: (optional) the amount of perturbation. init_targets: list of targets to run to initialize model params. extra_feed_dict: dict that allows fixing specified tensor values during the Jacobian calculation. Returns: Two 2-d numpy arrays representing the theoretical and numerical Jacobian for dy/dx. Each has ""x_size"" rows and ""y_size"" columns where ""x_size"" is the number of elements in x and ""y_size"" is the number of elements in y. If x is a list, returns a list of two numpy arrays." 8673,_compute_error,tensorflow/tensorflow/python/ops/gradient_checker.py,338,function, 8674,compute_gradient_error,tensorflow/tensorflow/python/ops/gradient_checker.py,354,function,"Computes the gradient error. Computes the maximum error for dy/dx between the computed Jacobian and the numerically estimated Jacobian. This function will modify the tensors passed in as it adds more operations and hence changing the consumers of the operations of the input tensors. This function adds operations to the current session. To compute the error using a particular device, such as a GPU, use the standard methods for setting a device (e.g. using with sess.graph.device() or setting a device function in the session constructor). Args: x: a tensor or list of tensors x_shape: the dimensions of x as a tuple or an array of ints. If x is a list, then this is the list of shapes. y: a tensor y_shape: the dimensions of y as a tuple or an array of ints. x_init_value: (optional) a numpy array of the same shape as ""x"" representing the initial value of x. If x is a list, this should be a list of numpy arrays. If this is none, the function will pick a random tensor as the initial value. delta: (optional) the amount of perturbation. init_targets: list of targets to run to initialize model params. extra_feed_dict: dict that allows fixing specified tensor values during the Jacobian calculation. Returns: The maximum error in between the two Jacobians." 8675,_bad_grad,tensorflow/tensorflow/python/ops/gradient_checker_test.py,37,function,A gradient that returns the wrong shape. 8676,_nan_grad,tensorflow/tensorflow/python/ops/gradient_checker_test.py,43,function,A gradient that returns NaN. 8677,GradientCheckerTest,tensorflow/tensorflow/python/ops/gradient_checker_test.py,48,class, 8678,MiniMNISTTest,tensorflow/tensorflow/python/ops/gradient_checker_test.py,203,class, 8679,_product,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,35,function, 8680,_eval_indexed_slices,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,45,function,"Converts IndexedSlices to IndexedSlicesValue with numpy indices/values. When eager execution is enabled, converts IndexedSlices to IndexedSlicesValue with numpy indices/values. Args: a: any value. Returns: If a is IndexedSlices and eager execution is enabled, calls numpy() on a's fields. Otherwise returns a unchanged." 8681,_to_numpy,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,66,function,"Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays. Args: a: any value. Returns: If a is EagerTensor or Tensor, returns the evaluation of a by calling numpy() or run(). If a is IndexedSlicesValue, constructs the corresponding dense numpy array. Otherwise returns a unchanged." 8682,_prepare,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,95,function,"Return a function that executes 'f'. In TF 2.x, this is the same as `f`. In TF 1.x, returns a Python function that executes the graph defined by `f` in a Session. Args: f: the function. xs_dtypes: dtypes of f's arguments. xs_shapes: shapes of f's arguments. Returns:" 8683,_compute_theoretical_jacobian,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,129,function,"Computes the theoretical Jacobian for f regarding xs[param]. One can think of the relation among f, xs and y as y = f(xs). Args: f: the function. y_shape: the shape of the result. y_dtype: the dtype of the result. xs: a list of tensors. param: the index of the target parameter. Returns: A 2-d numpy array representing the Jacobian. It has ""y_size"" rows and ""x_size"" columns where ""x_size"" is the number of elements in xs[param] and ""y_size"" is the number of elements in the result. Raises: ValueError: If result is empty but the gradient is nonzero." 8684,_compute_numeric_jacobian,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,201,function,"Computes the numeric Jacobian for f regarding xs[param]. One can think of the relation among f, xs and y as y = f(xs). Args: f: the function. y_size: the number of elements of the result. y_dtype: the dtype of the result. xs: a list of tensors. param: the index of the target parameter. delta: the amount of perturbation we give to the input. Returns: A 2-d numpy array representing the Jacobian. It has ""y_size"" rows and ""x_size"" columns where ""x_size"" is the number of elements in xs[param] and ""y_size"" is the number of elements in the result." 8685,_compute_gradient,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,259,function,Computes the theoretical and numerical jacobian. 8686,_compute_gradient_list,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,279,function,Compute gradients for a list of x values. 8687,compute_gradient,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,295,function,"Computes the theoretical and numeric Jacobian of `f`. With y = f(x), computes the theoretical and numeric Jacobian dy/dx. Args: f: the function. x: the arguments for the function as a list or tuple of values convertible to a Tensor. delta: (optional) perturbation used to compute numeric Jacobian. Returns: A pair of lists, where the first is a list of 2-d numpy arrays representing the theoretical Jacobians for each argument, and the second list is the numerical ones. Each 2-d array has ""y_size"" rows and ""x_size"" columns where ""x_size"" is the number of elements in the corresponding argument and ""y_size"" is the number of elements in f(x). Raises: ValueError: If result is empty but the gradient is nonzero. ValueError: If x is not list, but any other type. Example: ```python @tf.function def test_func(x): return x*x theoretical, numerical = tf.test.compute_gradient(test_func, [1.0]) theoretical, numerical # ((array([[2.]], dtype=float32),), (array([[2.000004]], dtype=float32),)) ```" 8688,max_error,tensorflow/tensorflow/python/ops/gradient_checker_v2.py,335,function,"Computes maximum elementwise gap. Computes the maximum elementwise gap between two lists of tensors of the same shape. Args: grad1: a lists of tensors. grad2: a lists of tensors with the same shape as grad1. Returns: The maximum elementwise gap between the two." 8689,_random_complex,tensorflow/tensorflow/python/ops/gradient_checker_v2_test.py,41,function, 8690,GradientCheckerTest,tensorflow/tensorflow/python/ops/gradient_checker_v2_test.py,49,class, 8691,MiniMNISTTest,tensorflow/tensorflow/python/ops/gradient_checker_v2_test.py,280,class, 8692,gradients,tensorflow/tensorflow/python/ops/gradients_impl.py,44,function,"Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`. `ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys` is a list of `Tensor`, holding the gradients received by the `ys`. The list must be the same length as `ys`. `gradients()` adds ops to the graph to output the derivatives of `ys` with respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`. `grad_ys` is a list of tensors of the same length as `ys` that holds the initial gradients for each y in `ys`. When `grad_ys` is None, we fill in a tensor of '1's of the shape of y for each y in `ys`. A user can provide their own initial `grad_ys` to compute the derivatives using a different initial gradient for each y (e.g., if one wanted to weight the gradient differently for each value in each y). `stop_gradients` is a `Tensor` or a list of tensors to be considered constant with respect to all `xs`. These tensors will not be backpropagated through, as though they had been explicitly disconnected using `stop_gradient`. Among other things, this allows computation of partial derivatives as opposed to total derivatives. For example: ```python a = tf.constant(0.) b = 2 * a g = tf.gradients(a + b, [a, b], stop_gradients=[a, b]) ``` Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the total derivatives `tf.gradients(a + b, [a, b])`, which take into account the influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is equivalent to: ```python a = tf.stop_gradient(tf.constant(0.)) b = tf.stop_gradient(2 * a) g = tf.gradients(a + b, [a, b]) ``` `stop_gradients` provides a way of stopping gradient after the graph has already been constructed, as compared to `tf.stop_gradient` which is used during graph construction. When the two approaches are combined, backpropagation stops at both `tf.stop_gradient` nodes and nodes in `stop_gradients`, whichever is encountered first. All integer tensors are considered constant with respect to all `xs`, as if they were included in `stop_gradients`. `unconnected_gradients` determines the value returned for each x in xs if it is unconnected in the graph to ys. By default this is None to safeguard against errors. Mathematically these gradients are zero which can be requested using the `'zero'` option. `tf.UnconnectedGradients` provides the following options and behaviors: ```python a = tf.ones([1, 2]) b = tf.ones([3, 1]) g1 = tf.gradients([b], [a], unconnected_gradients='none') sess.run(g1) # [None] g2 = tf.gradients([b], [a], unconnected_gradients='zero') sess.run(g2) # [array([[0., 0.]], dtype=float32)] ``` Let us take one practical example which comes during the back propogation phase. This function is used to evaluate the derivatives of the cost function with respect to Weights `Ws` and Biases `bs`. Below sample implementation provides the exaplantion of what it is actually used for : ```python Ws = tf.constant(0.) bs = 2 * Ws cost = Ws + bs # This is just an example. So, please ignore the formulas. g = tf.gradients(cost, [Ws, bs]) dCost_dW, dCost_db = g ``` Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. grad_ys: Optional. A `Tensor` or list of tensors the same size as `ys` and holding the gradients computed for each y in `ys`. name: Optional name to use for grouping all the gradient ops together. defaults to 'gradients'. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. gate_gradients: If True, add a tuple around the gradients returned for an operations. This avoids some race conditions. aggregation_method: Specifies the method used to combine gradient terms. Accepted values are constants defined in the class `AggregationMethod`. stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate through. unconnected_gradients: Optional. Specifies the gradient value returned when the given input tensors are unconnected. Accepted values are constants defined in the class `tf.UnconnectedGradients` and the default value is `none`. Returns: A list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`. Raises: LookupError: if one of the operations between `x` and `y` does not have a registered gradient function. ValueError: if the arguments are invalid. RuntimeError: if called in Eager mode." 8693,gradients_v2,tensorflow/tensorflow/python/ops/gradients_impl.py,177,function,"Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`. `tf.gradients` is only valid in a graph context. In particular, it is valid in the context of a `tf.function` wrapper, where code is executing as a graph. `ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys` is a list of `Tensor`, holding the gradients received by the `ys`. The list must be the same length as `ys`. `gradients()` adds ops to the graph to output the derivatives of `ys` with respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`. `grad_ys` is a list of tensors of the same length as `ys` that holds the initial gradients for each y in `ys`. When `grad_ys` is None, we fill in a tensor of '1's of the shape of y for each y in `ys`. A user can provide their own initial `grad_ys` to compute the derivatives using a different initial gradient for each y (e.g., if one wanted to weight the gradient differently for each value in each y). `stop_gradients` is a `Tensor` or a list of tensors to be considered constant with respect to all `xs`. These tensors will not be backpropagated through, as though they had been explicitly disconnected using `stop_gradient`. Among other things, this allows computation of partial derivatives as opposed to total derivatives. For example: >>> @tf.function ... def example(): ... a = tf.constant(0.) ... b = 2 * a ... return tf.gradients(a + b, [a, b], stop_gradients=[a, b]) >>> example() [, ] Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the total derivatives `tf.gradients(a + b, [a, b])`, which take into account the influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is equivalent to: >>> @tf.function ... def example(): ... a = tf.stop_gradient(tf.constant(0.)) ... b = tf.stop_gradient(2 * a) ... return tf.gradients(a + b, [a, b]) >>> example() [, ] `stop_gradients` provides a way of stopping gradient after the graph has already been constructed, as compared to `tf.stop_gradient` which is used during graph construction. When the two approaches are combined, backpropagation stops at both `tf.stop_gradient` nodes and nodes in `stop_gradients`, whichever is encountered first. All integer tensors are considered constant with respect to all `xs`, as if they were included in `stop_gradients`. `unconnected_gradients` determines the value returned for each x in xs if it is unconnected in the graph to ys. By default this is None to safeguard against errors. Mathematically these gradients are zero which can be requested using the `'zero'` option. `tf.UnconnectedGradients` provides the following options and behaviors: >>> @tf.function ... def example(use_zero): ... a = tf.ones([1, 2]) ... b = tf.ones([3, 1]) ... if use_zero: ... return tf.gradients([b], [a], unconnected_gradients='zero') ... else: ... return tf.gradients([b], [a], unconnected_gradients='none') >>> example(False) [None] >>> example(True) [] Let us take one practical example which comes during the back propogation phase. This function is used to evaluate the derivatives of the cost function with respect to Weights `Ws` and Biases `bs`. Below sample implementation provides the exaplantion of what it is actually used for : >>> @tf.function ... def example(): ... Ws = tf.constant(0.) ... bs = 2 * Ws ... cost = Ws + bs # This is just an example. Please ignore the formulas. ... g = tf.gradients(cost, [Ws, bs]) ... dCost_dW, dCost_db = g ... return dCost_dW, dCost_db >>> example() (, ) Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. grad_ys: Optional. A `Tensor` or list of tensors the same size as `ys` and holding the gradients computed for each y in `ys`. name: Optional name to use for grouping all the gradient ops together. defaults to 'gradients'. gate_gradients: If True, add a tuple around the gradients returned for an operations. This avoids some race conditions. aggregation_method: Specifies the method used to combine gradient terms. Accepted values are constants defined in the class `AggregationMethod`. stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate through. unconnected_gradients: Optional. Specifies the gradient value returned when the given input tensors are unconnected. Accepted values are constants defined in the class `tf.UnconnectedGradients` and the default value is `none`. Returns: A list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`. Raises: LookupError: if one of the operations between `x` and `y` does not have a registered gradient function. ValueError: if the arguments are invalid. RuntimeError: if called in Eager mode." 8694,_hessian_vector_product,tensorflow/tensorflow/python/ops/gradients_impl.py,323,function,"Multiply the Hessian of `ys` wrt `xs` by `v`. This is an efficient construction that uses a backprop-like approach to compute the product between the Hessian and another vector. The Hessian is usually too large to be explicitly computed or even represented, but this method allows us to at least multiply by it for the same big-O cost as backprop. Implicit Hessian-vector products are the main practical, scalable way of using second derivatives with neural networks. They allow us to do things like construct Krylov subspaces and approximate conjugate gradient descent. Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y, x, v)` will return an expression that evaluates to the same values as (A + A.T) `v`. Args: ys: A scalar value, or a tensor or list of tensors to be summed to yield a scalar. xs: A list of tensors that we should construct the Hessian over. v: A list of tensors, with the same shapes as xs, that we want to multiply by the Hessian. Returns: A list of tensors (or if the list would be length 1, a single tensor) containing the product between the Hessian and `v`. Raises: ValueError: `xs` and `v` have different length." 8695,hessians,tensorflow/tensorflow/python/ops/gradients_impl.py,377,function,"Constructs the Hessian of sum of `ys` with respect to `x` in `xs`. `hessians()` adds ops to the graph to output the Hessian matrix of `ys` with respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where each tensor is the Hessian of `sum(ys)`. The Hessian is a matrix of second-order partial derivatives of a scalar tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details). Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. name: Optional name to use for grouping all the gradient ops together. defaults to 'hessians'. colocate_gradients_with_ops: See `gradients()` documentation for details. gate_gradients: See `gradients()` documentation for details. aggregation_method: See `gradients()` documentation for details. Returns: A list of Hessian matrices of `sum(ys)` for each `x` in `xs`. Raises: LookupError: if one of the operations between `xs` and `ys` does not have a registered gradient function." 8696,HessiansV2,tensorflow/tensorflow/python/ops/gradients_impl.py,444,function,"Constructs the Hessian of sum of `ys` with respect to `x` in `xs`. `hessians()` adds ops to the graph to output the Hessian matrix of `ys` with respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where each tensor is the Hessian of `sum(ys)`. The Hessian is a matrix of second-order partial derivatives of a scalar tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details). Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. gate_gradients: See `gradients()` documentation for details. aggregation_method: See `gradients()` documentation for details. name: Optional name to use for grouping all the gradient ops together. defaults to 'hessians'. Returns: A list of Hessian matrices of `sum(ys)` for each `x` in `xs`. Raises: LookupError: if one of the operations between `xs` and `ys` does not have a registered gradient function." 8697,GradientsTest,tensorflow/tensorflow/python/ops/gradients_test.py,66,class, 8698,FunctionGradientsTest,tensorflow/tensorflow/python/ops/gradients_test.py,492,class, 8699,StopGradientTest,tensorflow/tensorflow/python/ops/gradients_test.py,691,class, 8700,PreventGradientTest,tensorflow/tensorflow/python/ops/gradients_test.py,701,class, 8701,HessianVectorProductTest,tensorflow/tensorflow/python/ops/gradients_test.py,711,class, 8702,HessianTest,tensorflow/tensorflow/python/ops/gradients_test.py,740,class, 8703,IndexedSlicesToTensorTest,tensorflow/tensorflow/python/ops/gradients_test.py,833,class, 8704,OnlyRealGradientsTest,tensorflow/tensorflow/python/ops/gradients_test.py,917,class, 8705,ResourceCondTest,tensorflow/tensorflow/python/ops/gradients_test.py,929,class, 8706,GetDependentVariablesTest,tensorflow/tensorflow/python/ops/gradients_test.py,954,class, 8707,CustomGradientTest,tensorflow/tensorflow/python/ops/gradients_test.py,1075,class, 8708,TensorListGradientsTest,tensorflow/tensorflow/python/ops/gradients_test.py,1427,class, 8709,VariablesGradientTest,tensorflow/tensorflow/python/ops/gradients_test.py,1447,class, 8710,GradPassThroughTest,tensorflow/tensorflow/python/ops/gradients_test.py,1647,class, 8711,_MarkReachedOps,tensorflow/tensorflow/python/ops/gradients_util.py,52,function,"Mark all ops reached from ""from_ops"". Args: from_ops: list of Operations. reached_ops: set of Operations. func_graphs: list of FuncGraphs. This method will traverse through these functions if they capture from_ops or any reachable ops." 8712,_PendingCount,tensorflow/tensorflow/python/ops/gradients_util.py,72,function,"Initialize the pending count for ops between two lists of Operations. 'pending_count[op]' indicates the number of backprop inputs to this operation. Args: to_ops: list of Operations. from_ops: list of Operations. colocate_gradients_with_ops: Python bool. See docstring of gradients(). func_graphs: list of FuncGraphs. This method will traverse through these functions if they capture from_ops or any reachable ops. This is useful if to_ops occur in a function and from_ops are in an outer function or graph. xs_set: ObjectIdentitySet of Tensors. Returns: A tuple containing: (1) the subset of to_ops reachable from from_ops by a path of zero or more backpropagatable tensors, (2) a mapping from operation to the number of backprop inputs to that op, and (3) a ControlFlowState object which is not None if the ops between from_ops and to_ops contain control flow loops." 8713,_AsList,tensorflow/tensorflow/python/ops/gradients_util.py,136,function, 8714,_DefaultGradYs,tensorflow/tensorflow/python/ops/gradients_util.py,140,function,"Fill in default values for grad_ys. Args: grad_ys: List of gradients, can contain None. ys: List of tensors. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. gradient_uid: A unique identifier within the graph indicating which invocation of gradients is being executed. Used to cluster ops for compilation. Returns: A list of gradients to use, without None. Raises: ValueError: If sizes of gradients and inputs don't match TypeError: If type of any gradient is not valid for its input." 8715,_IsBackpropagatable,tensorflow/tensorflow/python/ops/gradients_util.py,230,function, 8716,_VerifyGeneratedGradients,tensorflow/tensorflow/python/ops/gradients_util.py,237,function,"Verify that gradients are valid in number and type. Args: grads: List of generated gradients. op: Operation for which the gradients where generated. Raises: ValueError: if sizes of gradients and inputs don't match. TypeError: if type of any gradient is not valid for its input." 8717,_StopOps,tensorflow/tensorflow/python/ops/gradients_util.py,258,function,"The set of ops that terminate the gradient computation. This computes the frontier of the forward graph *before* which backprop should stop. Operations in the returned set will not be differentiated. This set is defined as the subset of `from_ops` containing ops that have no predecessor in `from_ops`. `pending_count` is the result of `_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops` iff pending_count[op] > 0. In addition, none of `stop_gradient_ops` will be differentiated. Args: from_ops: list of Operations. stop_gradient_ops: list of Operations never to backprop through. pending_count: mapping from operation to number of backprop inputs. xs_set: ObjectIdentitySet of Tensors. Returns: The set of operations." 8718,_maybe_colocate_with,tensorflow/tensorflow/python/ops/gradients_util.py,293,function,Context to colocate with `op` if `colocate_gradients_with_ops`. 8719,_IsPartitionedCall,tensorflow/tensorflow/python/ops/gradients_util.py,302,function, 8720,_SymGrad,tensorflow/tensorflow/python/ops/gradients_util.py,306,function,Backprop through a function call node op given its outputs' gradients. 8721,_MaybeCompile,tensorflow/tensorflow/python/ops/gradients_util.py,321,function,Compile the calculation in grad_fn if op was marked as compiled. 8722,_RaiseNoGradWrtInitialLoopValError,tensorflow/tensorflow/python/ops/gradients_util.py,358,function,Raises an error if we backprop through a loop var. 8723,_IsFunction,tensorflow/tensorflow/python/ops/gradients_util.py,382,function, 8724,_Captures,tensorflow/tensorflow/python/ops/gradients_util.py,387,function, 8725,_MaybeCaptured,tensorflow/tensorflow/python/ops/gradients_util.py,395,function,"If t is a captured value placeholder, returns the original captured value. Args: t: Tensor Returns: A tensor, potentially from a different Graph/FuncGraph." 8726,_NonEagerInputs,tensorflow/tensorflow/python/ops/gradients_util.py,414,function,"Returns the inputs of op, crossing closure boundaries where necessary. Does not return any captured EagerTensors, i.e., the number of tensors returned may be less than than the actual number of inputs. Args: op: Operation xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t. Returns: A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op is in a FuncGraph and has captured inputs." 8727,_Inputs,tensorflow/tensorflow/python/ops/gradients_util.py,433,function,"Returns the inputs of op, crossing closure boundaries where necessary. Args: op: Operation xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t. Returns: A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op is in a FuncGraph and has captured inputs." 8728,_Consumers,tensorflow/tensorflow/python/ops/gradients_util.py,460,function,"Returns the consumers of t, crossing closure boundaries where necessary. Args: t: Tensor func_graphs: a list of FuncGraphs that may have captured t. Returns: A list of tensors. The tensors will be from the current graph and/or func_graphs." 8729,_GradientsHelper,tensorflow/tensorflow/python/ops/gradients_util.py,479,function,Implementation of gradients(). 8730,_HasAnyNotNoneGrads,tensorflow/tensorflow/python/ops/gradients_util.py,733,function,Return true iff op has real gradient. 8731,_UpdatePendingAndEnqueueReady,tensorflow/tensorflow/python/ops/gradients_util.py,745,function,Update pending count for the inputs of op and enqueue ready ops. 8732,_SetGrad,tensorflow/tensorflow/python/ops/gradients_util.py,783,function,"Sets gradient ""grad"" in ""grads"" for tensor ""t""." 8733,_ZerosLike,tensorflow/tensorflow/python/ops/gradients_util.py,798,function, 8734,_GetGrad,tensorflow/tensorflow/python/ops/gradients_util.py,807,function,"Gets gradient for tensor ""t""." 8735,_GetGrads,tensorflow/tensorflow/python/ops/gradients_util.py,830,function,Gets all gradients for op. 8736,_AccumulatorShape,tensorflow/tensorflow/python/ops/gradients_util.py,838,function, 8737,_LogOpGradients,tensorflow/tensorflow/python/ops/gradients_util.py,846,function,Log the in and out grads of an op. 8738,_MultiDeviceAddN,tensorflow/tensorflow/python/ops/gradients_util.py,864,function,Adds tensors from potentially multiple devices. 8739,AggregationMethod,tensorflow/tensorflow/python/ops/gradients_util.py,893,class,"A class listing aggregation methods used to combine gradients. Computing partial derivatives can require aggregating gradient contributions. This class lists the various methods that can be used to combine gradients in the graph. The following aggregation methods are part of the stable API for aggregating gradients: * `ADD_N`: All of the gradient terms are summed as part of one operation using the ""AddN"" op (see `tf.add_n`). This method has the property that all gradients must be ready and buffered separately in memory before any aggregation is performed. * `DEFAULT`: The system-chosen default aggregation method. The following aggregation methods are experimental and may not be supported in future releases: * `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using using the ""AddN"" op. This method of summing gradients may reduce performance, but it can improve memory utilization because the gradients can be released earlier." 8740,_AggregatedGrads,tensorflow/tensorflow/python/ops/gradients_util.py,925,function,"Get the aggregated gradients for op. Args: grads: The map of memoized gradients. op: The op to get gradients for. gradient_uid: A unique identifier within the graph indicating which invocation of gradients is being executed. Used to cluster ops for compilation. loop_state: An object for maintaining the state of the while loops in the graph. It is of type ControlFlowState. None if the graph contains no while loops. aggregation_method: Specifies the method used to combine gradient terms. Accepted values are constants defined in the class `AggregationMethod`. Returns: A list of gradients, one per each output of `op`. If the gradients for a particular output is a list, this function aggregates it before returning. Raises: TypeError: if the incoming grads are not Tensors or IndexedSlices. ValueError: if the arguments are invalid." 8741,histogram_fixed_width_bins,tensorflow/tensorflow/python/ops/histogram_ops.py,35,function,"Bins the given values for use in a histogram. Given the tensor `values`, this operation returns a rank 1 `Tensor` representing the indices of a histogram into which each element of `values` would be binned. The bins are equal width and determined by the arguments `value_range` and `nbins`. Args: values: Numeric `Tensor`. value_range: Shape [2] `Tensor` of same `dtype` as `values`. values <= value_range[0] will be mapped to hist[0], values >= value_range[1] will be mapped to hist[-1]. nbins: Scalar `int32 Tensor`. Number of histogram bins. dtype: dtype for returned histogram. name: A name for this operation (defaults to 'histogram_fixed_width'). Returns: A `Tensor` holding the indices of the binned values whose shape matches `values`. Raises: TypeError: If any unsupported dtype is provided. tf.errors.InvalidArgumentError: If value_range does not satisfy value_range[0] < value_range[1]. Examples: >>> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) ... >>> nbins = 5 >>> value_range = [0.0, 5.0] >>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] >>> indices = tf.histogram_fixed_width_bins(new_values, value_range, nbins=5) >>> indices.numpy() array([0, 0, 1, 2, 4, 4], dtype=int32)" 8742,histogram_fixed_width,tensorflow/tensorflow/python/ops/histogram_ops.py,104,function,"Return histogram of values. Given the tensor `values`, this operation returns a rank 1 histogram counting the number of entries in `values` that fell into every bin. The bins are equal width and determined by the arguments `value_range` and `nbins`. Args: values: Numeric `Tensor`. value_range: Shape [2] `Tensor` of same `dtype` as `values`. values <= value_range[0] will be mapped to hist[0], values >= value_range[1] will be mapped to hist[-1]. nbins: Scalar `int32 Tensor`. Number of histogram bins. dtype: dtype for returned histogram. name: A name for this operation (defaults to 'histogram_fixed_width'). Returns: A 1-D `Tensor` holding histogram of values. Raises: TypeError: If any unsupported dtype is provided. tf.errors.InvalidArgumentError: If value_range does not satisfy value_range[0] < value_range[1]. Examples: >>> # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) ... >>> nbins = 5 >>> value_range = [0.0, 5.0] >>> new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] >>> hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) >>> hist.numpy() array([2, 1, 1, 0, 2], dtype=int32)" 8743,BinValuesFixedWidth,tensorflow/tensorflow/python/ops/histogram_ops_test.py,31,class, 8744,HistogramFixedWidthTest,tensorflow/tensorflow/python/ops/histogram_ops_test.py,83,class, 8745,_ResizeNearestNeighborGrad,tensorflow/tensorflow/python/ops/image_grad.py,29,function,"The derivatives for nearest neighbor resizing. Args: op: The ResizeNearestNeighbor op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input and the output." 8746,_ResizeBilinearGrad,tensorflow/tensorflow/python/ops/image_grad.py,54,function,"The derivatives for bilinear resizing. Args: op: The ResizeBilinear op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input." 8747,_ScaleAndTranslateGrad,tensorflow/tensorflow/python/ops/image_grad.py,73,function,"The derivatives for ScaleAndTranslate transformation op. Args: op: The ScaleAndTranslate op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input." 8748,_ResizeBicubicGrad,tensorflow/tensorflow/python/ops/image_grad.py,95,function,"The derivatives for bicubic resizing. Args: op: The ResizeBicubic op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input." 8749,_CropAndResizeGrad,tensorflow/tensorflow/python/ops/image_grad.py,117,function,"The derivatives for crop_and_resize. We back-propagate to the image only when the input image tensor has floating point dtype but we always back-propagate to the input boxes tensor. Args: op: The CropAndResize op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input image, boxes, as well as the always-None gradients w.r.t. box_ind and crop_size." 8750,_CustomReciprocal,tensorflow/tensorflow/python/ops/image_grad.py,159,function,"Wrapper function around `math_ops.div_no_nan()` to perform a ""safe"" reciprocal incase the input is zero. Avoids divide by zero and NaNs. Input: x -> input tensor to be reciprocat-ed. Returns: x_reciprocal -> reciprocal of x without NaNs." 8751,_RGBToHSVGrad,tensorflow/tensorflow/python/ops/image_grad.py,171,function,"The gradients for `rgb_to_hsv` operation. This function is a piecewise continuous function as defined here: https://en.wikipedia.org/wiki/HSL_and_HSV#From_RGB We perform the multivariate derivative and compute all partial derivatives separately before adding them in the end. Formulas are given before each partial derivative calculation. Args: op: The `rgb_to_hsv` `Operation` that we are differentiating. grad: Gradient with respect to the output of the `rgb_to_hsv` op. Returns: Gradients with respect to the input of `rgb_to_hsv`." 8752,ResizeNearestNeighborOpTest,tensorflow/tensorflow/python/ops/image_grad_test.py,38,class, 8753,ResizeBilinearOpTest,tensorflow/tensorflow/python/ops/image_grad_test.py,113,class, 8754,ResizeBicubicOpTest,tensorflow/tensorflow/python/ops/image_grad_test.py,202,class, 8755,ScaleAndTranslateOpTest,tensorflow/tensorflow/python/ops/image_grad_test.py,265,class, 8756,CropAndResizeOpTest,tensorflow/tensorflow/python/ops/image_grad_test.py,329,class, 8757,RGBToHSVOpTest,tensorflow/tensorflow/python/ops/image_grad_test.py,458,class, 8758,flat_transforms_to_matrices,tensorflow/tensorflow/python/ops/image_ops.py,177,function,"Converts `tf.contrib.image` projective transforms to affine matrices. Note that the output matrices map output coordinates to input coordinates. For the forward transformation matrix, call `tf.linalg.inv` on the result. Args: transforms: Vector of length 8, or batches of transforms with shape `(N, 8)`. Returns: 3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the *output coordinates* (in homogeneous coordinates) of each transform to the corresponding *input coordinates*. Raises: ValueError: If `transforms` have an invalid shape." 8759,matrices_to_flat_transforms,tensorflow/tensorflow/python/ops/image_ops.py,209,function,"Converts affine matrices to `tf.contrib.image` projective transforms. Note that we expect matrices that map output coordinates to input coordinates. To convert forward transformation matrices, call `tf.linalg.inv` on the matrices and use the result here. Args: transform_matrices: One or more affine transformation matrices, for the reverse transformation in homogeneous coordinates. Shape `(3, 3)` or `(N, 3, 3)`. Returns: 2D tensor of flat transforms with shape `(N, 8)`, which may be passed into `tf.contrib.image.transform`. Raises: ValueError: If `transform_matrices` have an invalid shape." 8760,_image_projective_transform_grad,tensorflow/tensorflow/python/ops/image_ops.py,243,function,Computes the gradient for ImageProjectiveTransform. 8761,_assert,tensorflow/tensorflow/python/ops/image_ops_impl.py,64,function,"A polymorphic assert, works with tensors and boolean expressions. If `cond` is not a tensor, behave like an ordinary assert statement, except that a empty list is returned. If `cond` is a tensor, return a list containing a single TensorFlow assert op. Args: cond: Something evaluates to a boolean value. May be a tensor. ex_type: The exception class to use. msg: The error message. Returns: A list, containing at most one assert op." 8762,_is_tensor,tensorflow/tensorflow/python/ops/image_ops_impl.py,88,function,"Returns `True` if `x` is a symbolic tensor-like object. Args: x: A python object to check. Returns: `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`." 8763,_ImageDimensions,tensorflow/tensorflow/python/ops/image_ops_impl.py,100,function,"Returns the dimensions of an image tensor. Args: image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`. rank: The expected rank of the image Returns: A list of corresponding to the dimensions of the input image. Dimensions that are statically known are python integers, otherwise, they are integer scalar tensors." 8764,_Check3DImage,tensorflow/tensorflow/python/ops/image_ops_impl.py,122,function,"Assert that we are working with a properly shaped image. Args: image: 3-D Tensor of shape [height, width, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if `image.shape` is not a 3-vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned." 8765,_Assert3DImage,tensorflow/tensorflow/python/ops/image_ops_impl.py,157,function,"Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: 3-D Tensor of shape [height, width, channels] Raises: ValueError: if `image.shape` is not a 3-vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape." 8766,_AssertAtLeast3DImage,tensorflow/tensorflow/python/ops/image_ops_impl.py,179,function,"Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 3-D Tensor of size [*, height, width, depth] Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape." 8767,_CheckAtLeast3DImage,tensorflow/tensorflow/python/ops/image_ops_impl.py,201,function,"Assert that we are working with a properly shaped image. Args: image: >= 3-D Tensor of size [*, height, width, depth] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned." 8768,_AssertGrayscaleImage,tensorflow/tensorflow/python/ops/image_ops_impl.py,244,function,"Assert that we are working with a properly shaped grayscale image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 2-D Tensor of size [*, 1] Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape." 8769,_CheckGrayscaleImage,tensorflow/tensorflow/python/ops/image_ops_impl.py,267,function,"Assert that we are working with properly shaped grayscale image. Args: image: >= 2-D Tensor of size [*, 1] require_static: Boolean, whether static shape is required. Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned." 8770,fix_image_flip_shape,tensorflow/tensorflow/python/ops/image_ops_impl.py,310,function,"Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least (None, None, None)." 8771,random_flip_up_down,tensorflow/tensorflow/python/ops/image_ops_impl.py,331,function,"Randomly flips an image vertically (upside down). With a 1 in 2 chance, outputs the contents of `image` flipped along the first dimension, which is `height`. Otherwise, output the image as-is. When passing a batch of images, each image will be randomly flipped independent of other images. Example usage: >>> import numpy as np >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> tf.image.random_flip_up_down(image, 3).numpy().tolist() [[[3], [4]], [[1], [2]]] Randomly flip multiple images. >>> images = np.array( ... [ ... [[[1], [2]], [[3], [4]]], ... [[[5], [6]], [[7], [8]]] ... ]) >>> tf.image.random_flip_up_down(images, 4).numpy().tolist() [[[[3], [4]], [[1], [2]]], [[[5], [6]], [[7], [8]]]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported." 8772,random_flip_left_right,tensorflow/tensorflow/python/ops/image_ops_impl.py,372,function,"Randomly flip an image horizontally (left to right). With a 1 in 2 chance, outputs the contents of `image` flipped along the second dimension, which is `width`. Otherwise output the image as-is. When passing a batch of images, each image will be randomly flipped independent of other images. Example usage: >>> import numpy as np >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> tf.image.random_flip_left_right(image, 5).numpy().tolist() [[[2], [1]], [[4], [3]]] Randomly flip multiple images. >>> images = np.array( ... [ ... [[[1], [2]], [[3], [4]]], ... [[[5], [6]], [[7], [8]]] ... ]) >>> tf.image.random_flip_left_right(images, 6).numpy().tolist() [[[[2], [1]], [[4], [3]]], [[[5], [6]], [[7], [8]]]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported." 8773,_random_flip,tensorflow/tensorflow/python/ops/image_ops_impl.py,413,function,"Randomly (50% chance) flip an image along axis `flip_index`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: Dimension along which to flip the image. Vertical: 0, Horizontal: 1 seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. scope_name: Name of the scope in which the ops are added. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported." 8774,flip_left_right,tensorflow/tensorflow/python/ops/image_ops_impl.py,472,function,"Flip an image horizontally (left to right). Outputs the contents of `image` flipped along the width dimension. See also `reverse()`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.flip_left_right(x) Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported." 8775,flip_up_down,tensorflow/tensorflow/python/ops/image_ops_impl.py,507,function,"Flip an image vertically (upside down). Outputs the contents of `image` flipped along the height dimension. See also `reverse()`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.flip_up_down(x) Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A `Tensor` of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported." 8776,_flip,tensorflow/tensorflow/python/ops/image_ops_impl.py,540,function,"Flip an image either horizontally or vertically. Outputs the contents of `image` flipped along the dimension `flip_index`. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: 0 For vertical, 1 for horizontal. scope_name: string, scope name. Returns: A `Tensor` of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported." 8777,rot90,tensorflow/tensorflow/python/ops/image_ops_impl.py,584,function,"Rotate image(s) counter-clockwise by 90 degrees. For example: >>> a=tf.constant([[[1],[2]], ... [[3],[4]]]) >>> # rotating `a` counter clockwise by 90 degrees >>> a_rot=tf.image.rot90(a) >>> print(a_rot[...,0].numpy()) [[2 4] [1 3]] >>> # rotating `a` counter clockwise by 270 degrees >>> a_rot=tf.image.rot90(a, k=3) >>> print(a_rot[...,0].numpy()) [[3 1] [4 2]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name: A name for this operation (optional). Returns: A rotated tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported." 8778,_rot90_3D,tensorflow/tensorflow/python/ops/image_ops_impl.py,642,function,"Rotate image counter-clockwise by 90 degrees `k` times. Args: image: 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 3-D tensor of the same type and shape as `image`." 8779,_rot90_4D,tensorflow/tensorflow/python/ops/image_ops_impl.py,673,function,"Rotate batch of images counter-clockwise by 90 degrees `k` times. Args: images: 4-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the images are rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 4-D `Tensor` of the same type and shape as `images`." 8780,transpose,tensorflow/tensorflow/python/ops/image_ops_impl.py,707,function,"Transpose image(s) by swapping the height and width dimension. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.transpose(x) Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for this operation (optional). Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, width, height, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[width, height, channels]` Raises: ValueError: if the shape of `image` not supported. Usage Example: >>> image = [[[1, 2], [3, 4]], ... [[5, 6], [7, 8]], ... [[9, 10], [11, 12]]] >>> image = tf.constant(image) >>> tf.image.transpose(image) " 8781,central_crop,tensorflow/tensorflow/python/ops/image_ops_impl.py,777,function,"Crop the central region of the image(s). Remove the outer parts of an image but retain the central region of the image along each dimension. If we specify central_fraction = 0.5, this function returns the region marked with ""X"" in the below diagram. -------- | | | XXXX | | XXXX | | | where ""X"" is the central 50% of the image. -------- This function works on either a single image (`image` is a 3-D Tensor), or a batch of images (`image` is a 4-D Tensor). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0], ... [7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]], ... [[13.0, 14.0, 15.0], ... [16.0, 17.0, 18.0], ... [19.0, 20.0, 21.0], ... [22.0, 23.0, 24.0]], ... [[25.0, 26.0, 27.0], ... [28.0, 29.0, 30.0], ... [31.0, 32.0, 33.0], ... [34.0, 35.0, 36.0]], ... [[37.0, 38.0, 39.0], ... [40.0, 41.0, 42.0], ... [43.0, 44.0, 45.0], ... [46.0, 47.0, 48.0]]] >>> tf.image.central_crop(x, 0.5) Args: image: Either a 3-D float Tensor of shape [height, width, depth], or a 4-D Tensor of shape [batch_size, height, width, depth]. central_fraction: float (0, 1], fraction of size to crop Raises: ValueError: if central_crop_fraction is not within (0, 1]. Returns: 3-D / 4-D float Tensor, as per the input." 8782,pad_to_bounding_box,tensorflow/tensorflow/python/ops/image_ops_impl.py,910,function,"Pad `image` with zeros to the specified `height` and `width`. Adds `offset_height` rows of zeros on top, `offset_width` columns of zeros on the left, and then pads the image on the bottom and right with zeros until it has dimensions `target_height`, `target_width`. This op does nothing if `offset_*` is zero and the image already has size `target_height` by `target_width`. Usage Example: >>> x = [[[1., 2., 3.], ... [4., 5., 6.]], ... [[7., 8., 9.], ... [10., 11., 12.]]] >>> padded_image = tf.image.pad_to_bounding_box(x, 1, 1, 4, 4) >>> padded_image Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. offset_height: Number of rows of zeros to add on top. offset_width: Number of columns of zeros to add on the left. target_height: Height of output image. target_width: Width of output image. Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, target_height, target_width, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, or either `offset_height` or `offset_width` is negative." 8783,crop_to_bounding_box,tensorflow/tensorflow/python/ops/image_ops_impl.py,1022,function,"Crops an image to a specified bounding box. This op cuts a rectangular part out of `image`. The top-left corner of the returned image is at `offset_height, offset_width` in `image`, and its lower-right corner is at `offset_height + target_height, offset_width + target_width`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. offset_height: Vertical coordinate of the top-left corner of the result in the input. offset_width: Horizontal coordinate of the top-left corner of the result in the input. target_height: Height of the result. target_width: Width of the result. Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, target_height, target_width, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, or either `offset_height` or `offset_width` is negative, or either `target_height` or `target_width` is not positive." 8784,resize_image_with_crop_or_pad,tensorflow/tensorflow/python/ops/image_ops_impl.py,1107,function,"Crops and/or pads an image to a target width and height. Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. If `width` or `height` is greater than the specified `target_width` or `target_height` respectively, this op centrally crops along that dimension. If `width` or `height` is smaller than the specified `target_width` or `target_height` respectively, this op centrally pads with 0 along that dimension. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Cropped and/or padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`." 8785,ResizeMethodV1,tensorflow/tensorflow/python/ops/image_ops_impl.py,1226,class,See `v1.image.resize` for details. 8786,ResizeMethod,tensorflow/tensorflow/python/ops/image_ops_impl.py,1235,class,See `tf.image.resize` for details. 8787,_resize_images_common,tensorflow/tensorflow/python/ops/image_ops_impl.py,1247,function,Core functionality for v1 and v2 resize functions. 8788,resize_images,tensorflow/tensorflow/python/ops/image_ops_impl.py,1327,function,"Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_with_pad` or `tf.image.resize_with_crop_or_pad`. The `method` can be one of: * `tf.image.ResizeMethod.BILINEAR`: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) * `tf.image.ResizeMethod.NEAREST_NEIGHBOR`: [ Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) * `tf.image.ResizeMethod.BICUBIC`: [Bicubic interpolation.]( https://en.wikipedia.org/wiki/Bicubic_interpolation) * `tf.image.ResizeMethod.AREA`: Area interpolation. The return value has the same type as `images` if `method` is `tf.image.ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type as `images` if the size of `images` can be statically determined to be the same as `size`, because `images` is returned in this case. Otherwise, the return value has type `float32`. Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: ResizeMethod. Defaults to `tf.image.ResizeMethod.BILINEAR`. align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`." 8789,resize_images_v2,tensorflow/tensorflow/python/ops/image_ops_impl.py,1413,function,"Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_with_pad`. >>> image = tf.constant([ ... [1,0,0,0,0], ... [0,1,0,0,0], ... [0,0,1,0,0], ... [0,0,0,1,0], ... [0,0,0,0,1], ... ]) >>> # Add ""batch"" and ""channels"" dimensions >>> image = image[tf.newaxis, ..., tf.newaxis] >>> image.shape.as_list() # [batch, height, width, channels] [1, 5, 5, 1] >>> tf.image.resize(image, [3,5])[0,...,0].numpy() array([[0.6666667, 0.3333333, 0. , 0. , 0. ], [0. , 0. , 1. , 0. , 0. ], [0. , 0. , 0. , 0.3333335, 0.6666665]], dtype=float32) It works equally well with a single image instead of a batch of images: >>> tf.image.resize(image[0], [3,5]).shape.as_list() [3, 5, 1] When `antialias` is true, the sampling filter will anti-alias the input image as well as interpolate. When downsampling an image with [anti-aliasing]( https://en.wikipedia.org/wiki/Spatial_anti-aliasing) the sampling filter kernel is scaled in order to properly anti-alias the input image signal. `antialias` has no effect when upsampling an image: >>> a = tf.image.resize(image, [5,10]) >>> b = tf.image.resize(image, [5,10], antialias=True) >>> tf.reduce_max(abs(a - b)).numpy() 0.0 The `method` argument expects an item from the `image.ResizeMethod` enum, or the string equivalent. The options are: * `bilinear`: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) If `antialias` is true, becomes a hat/tent filter function with radius 1 when downsampling. * `lanczos3`: [Lanczos kernel]( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 3. High-quality practical filter but may have some ringing, especially on synthetic images. * `lanczos5`: [Lanczos kernel] ( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 5. Very-high-quality filter but may have stronger ringing. * `bicubic`: [Cubic interpolant]( https://en.wikipedia.org/wiki/Bicubic_interpolation) of Keys. Equivalent to Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel, particularly when upsampling. * `gaussian`: [Gaussian kernel]( https://en.wikipedia.org/wiki/Gaussian_filter) with radius 3, sigma = 1.5 / 3.0. * `nearest`: [Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) `antialias` has no effect when used with nearest neighbor interpolation. * `area`: Anti-aliased resampling with area interpolation. `antialias` has no effect when used with area interpolation; it always anti-aliases. * `mitchellcubic`: Mitchell-Netravali Cubic non-interpolating filter. For synthetic images (especially those lacking proper prefiltering), less ringing than Keys cubic kernel but less sharp. Note: Near image edges the filtering kernel may be partially outside the image boundaries. For these pixels, only input pixels inside the image will be included in the filter sum, and the output value will be appropriately normalized. The return value has type `float32`, unless the `method` is `ResizeMethod.NEAREST_NEIGHBOR`, then the return dtype is the dtype of `images`: >>> nn = tf.image.resize(image, [5,7], method='nearest') >>> nn[0,...,0].numpy() array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1]], dtype=int32) With `preserve_aspect_ratio=True`, the aspect ratio is preserved, so `size` is the maximum for each dimension: >>> max_10_20 = tf.image.resize(image, [10,20], preserve_aspect_ratio=True) >>> max_10_20.shape.as_list() [1, 10, 10, 1] Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: An `image.ResizeMethod`, or string equivalent. Defaults to `bilinear`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. antialias: Whether to use an anti-aliasing filter when downsampling an image. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has an invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`." 8790,_resize_image_with_pad_common,tensorflow/tensorflow/python/ops/image_ops_impl.py,1590,function,Core functionality for v1 and v2 resize_image_with_pad functions. 8791,resize_image_with_pad_v1,tensorflow/tensorflow/python/ops/image_ops_impl.py,1667,function,"Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `resize_images()` align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`." 8792,resize_image_with_pad_v2,tensorflow/tensorflow/python/ops/image_ops_impl.py,1710,function,"Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `image.resize()` antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`." 8793,per_image_standardization,tensorflow/tensorflow/python/ops/image_ops_impl.py,1751,function,"Linearly scales each image in `image` to have mean 0 and variance 1. For each 3-D image `x` in `image`, computes `(x - mean) / adjusted_stddev`, where - `mean` is the average of all values in `x` - `adjusted_stddev = max(stddev, 1.0/sqrt(N))` is capped away from 0 to protect against division by 0 when handling uniform images - `N` is the number of elements in `x` - `stddev` is the standard deviation of all values in `x` Args: image: An n-D Tensor with at least 3 dimensions, the last 3 of which are the dimensions of each image. Returns: A `Tensor` with the same shape and dtype as `image`. Raises: ValueError: if the shape of 'image' is incompatible with this function." 8794,random_brightness,tensorflow/tensorflow/python/ops/image_ops_impl.py,1797,function,"Adjust the brightness of images by a random factor. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Args: image: An image or images to adjust. max_delta: float, must be non-negative. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_brightness(x, 0.2) Returns: The brightness-adjusted image(s). Raises: ValueError: if `max_delta` is negative." 8795,random_contrast,tensorflow/tensorflow/python/ops/image_ops_impl.py,1833,function,"Adjust the contrast of an image or images by a random factor. Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly picked in the interval `[lower, upper)`. Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_contrast(x, 0.2, 0.5) Returns: The contrast-adjusted image(s). Raises: ValueError: if `upper <= lower` or if `lower < 0`." 8796,adjust_brightness,tensorflow/tensorflow/python/ops/image_ops_impl.py,1873,function,"Adjust the brightness of RGB or Grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their brightness, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. The value `delta` is added to all components of the tensor `image`. `image` is converted to `float` and scaled appropriately if it is in fixed-point representation, and `delta` is converted to the same data type. For regular images, `delta` should be in the range `[0,1)`, as it is added to the image in floating point representation, where pixel values are in the `[0,1)` range. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_brightness(x, delta=0.1) Args: image: RGB image or images to adjust. delta: A scalar. Amount to add to the pixel values. Returns: A brightness-adjusted tensor of the same shape and type as `image`." 8797,adjust_contrast,tensorflow/tensorflow/python/ops/image_ops_impl.py,1925,function,"Adjust contrast of RGB or grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their contrast, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. `images` is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as `[height, width, channels]`. The other dimensions only represent a collection of images, such as `[batch, height, width, channels].` Contrast is adjusted independently for each channel of each image. For each channel, this Op computes the mean of the image pixels in the channel and then adjusts each component `x` of each pixel to `(x - mean) * contrast_factor + mean`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_contrast(x, 2) Args: images: Images to adjust. At least 3-D. contrast_factor: A float multiplier for adjusting contrast. Returns: The contrast-adjusted image or images." 8798,adjust_gamma,tensorflow/tensorflow/python/ops/image_ops_impl.py,1982,function,"Performs [Gamma Correction](http://en.wikipedia.org/wiki/Gamma_correction). on the input image. Also known as Power Law Transform. This function converts the input images at first to float representation, then transforms them pixelwise according to the equation `Out = gain * In**gamma`, and then converts the back to the original data type. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_gamma(x, 0.2) Args: image : RGB image or images to adjust. gamma : A scalar or tensor. Non-negative real number. gain : A scalar or tensor. The constant multiplier. Returns: A Tensor. A Gamma-adjusted tensor of the same shape and type as `image`. Raises: ValueError: If gamma is negative. Notes: For gamma greater than 1, the histogram will shift towards left and the output image will be darker than the input image. For gamma less than 1, the histogram will shift towards right and the output image will be brighter than the input image. References: [Wikipedia](http://en.wikipedia.org/wiki/Gamma_correction)" 8799,convert_image_dtype,tensorflow/tensorflow/python/ops/image_ops_impl.py,2047,function,"Convert `image` to `dtype`, scaling its values if needed. Images that are represented using floating point values are expected to have values in the range [0,1). Image data stored in integer data types are expected to have values in the range `[0,MAX]`, where `MAX` is the largest positive representable number for the data type. This op converts between data types, scaling the values appropriately before casting. Note that converting from floating point inputs to integer types may lead to over/underflow problems. Set saturate to `True` to avoid such problem in problematic conversions. If enabled, saturation will clip the output into the allowed range before performing a potentially dangerous cast (and only before performing such a cast, i.e., when casting from a floating point to an integer type, and when casting from a signed to an unsigned type; `saturate` has no effect on casts between floats, or on casts that increase the type's range). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.convert_image_dtype(x, dtype=tf.float16, saturate=False) Args: image: An image. dtype: A `DType` to convert `image` to. saturate: If `True`, clip the input before casting (if necessary). name: A name for this operation (optional). Returns: `image`, converted to `dtype`. Raises: AttributeError: Raises an attribute error when dtype is neither float nor integer" 8800,rgb_to_grayscale,tensorflow/tensorflow/python/ops/image_ops_impl.py,2147,function,"Converts one or more images from RGB to Grayscale. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. >>> original = tf.constant([[[1.0, 2.0, 3.0]]]) >>> converted = tf.image.rgb_to_grayscale(original) >>> print(converted.numpy()) [[[1.81...]]] Args: images: The RGB tensor to convert. The last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s)." 8801,grayscale_to_rgb,tensorflow/tensorflow/python/ops/image_ops_impl.py,2183,function,"Converts one or more images from Grayscale to RGB. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 3, containing the RGB value of the pixels. The input images' last dimension must be size 1. >>> original = tf.constant([[[1.0], [2.0], [3.0]]]) >>> converted = tf.image.grayscale_to_rgb(original) >>> print(converted.numpy()) [[[1. 1. 1.] [2. 2. 2.] [3. 3. 3.]]] Args: images: The Grayscale tensor to convert. The last dimension must be size 1. name: A name for the operation (optional). Returns: The converted grayscale image(s)." 8802,random_hue,tensorflow/tensorflow/python/ops/image_ops_impl.py,2220,function,"Adjust the hue of RGB images by a random factor. Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the interval `[-max_delta, max_delta)`. `max_delta` must be in the interval `[0, 0.5]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_hue(x, 0.2) Args: image: RGB image or images. The size of the last dimension must be 3. max_delta: float. The maximum value for the random delta. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `max_delta` is invalid." 8803,adjust_hue,tensorflow/tensorflow/python/ops/image_ops_impl.py,2263,function,"Adjust hue of RGB images. This is a convenience method that converts an RGB image to float representation, converts it to HSV, adds an offset to the hue channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image. The image hue is adjusted by converting the image(s) to HSV and rotating the hue channel (H) by `delta`. The image is then converted back to RGB. `delta` must be in the interval `[-1, 1]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_hue(x, 0.2) Args: image: RGB image or images. The size of the last dimension must be 3. delta: float. How much to add to the hue channel. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. Usage Example: >>> image = [[[1, 2, 3], [4, 5, 6]], ... [[7, 8, 9], [10, 11, 12]], ... [[13, 14, 15], [16, 17, 18]]] >>> image = tf.constant(image) >>> tf.image.adjust_hue(image, 0.2) " 8804,random_jpeg_quality,tensorflow/tensorflow/python/ops/image_ops_impl.py,2331,function,"Randomly changes jpeg encoding quality for inducing jpeg noise. `min_jpeg_quality` must be in the interval `[0, 100]` and less than `max_jpeg_quality`. `max_jpeg_quality` must be in the interval `[0, 100]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_jpeg_quality(x, 75, 95) Args: image: 3D image. Size of the last dimension must be 1 or 3. min_jpeg_quality: Minimum jpeg encoding quality to use. max_jpeg_quality: Maximum jpeg encoding quality to use. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid." 8805,adjust_jpeg_quality,tensorflow/tensorflow/python/ops/image_ops_impl.py,2379,function,"Adjust jpeg encoding quality of an image. This is a convenience method that converts an image to uint8 representation, encodes it to jpeg with `jpeg_quality`, decodes it, and then converts back to the original data type. `jpeg_quality` must be in the interval `[0, 100]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_jpeg_quality(x, 75) Args: image: 3D image. The size of the last dimension must be None, 1 or 3. jpeg_quality: Python int or Tensor of type int32. jpeg encoding quality. name: A name for this operation (optional). Returns: Adjusted image, same shape and DType as `image`. Raises: InvalidArgumentError: quality must be in [0,100] InvalidArgumentError: image must have 1 or 3 channels" 8806,random_saturation,tensorflow/tensorflow/python/ops/image_ops_impl.py,2430,function,"Adjust the saturation of RGB images by a random factor. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper)`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_saturation(x, 5, 10) Args: image: RGB image or images. The size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`." 8807,adjust_saturation,tensorflow/tensorflow/python/ops/image_ops_impl.py,2476,function,"Adjust saturation of RGB images. This is a convenience method that converts RGB images to float representation, converts them to HSV, adds an offset to the saturation channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image or images. The image saturation is adjusted by converting the images to HSV and multiplying the saturation (S) channel by `saturation_factor` and clipping. The images are then converted back to RGB. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_saturation(x, 0.5) Args: image: RGB image or images. The size of the last dimension must be 3. saturation_factor: float. Factor to multiply the saturation by. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. Raises: InvalidArgumentError: input must have 3 channels" 8808,is_jpeg,tensorflow/tensorflow/python/ops/image_ops_impl.py,2528,function,"Convenience function to check if the 'contents' encodes a JPEG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a JPEG image. is_jpeg is susceptible to false positives." 8809,_is_png,tensorflow/tensorflow/python/ops/image_ops_impl.py,2547,function,"Convenience function to check if the 'contents' encodes a PNG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a PNG image. is_png is susceptible to false positives." 8810,encode_png,tensorflow/tensorflow/python/ops/image_ops_impl.py,2604,function,"PNG-encode an image. `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` where `channels` is: * 1: for grayscale. * 2: for grayscale + alpha. * 3: for RGB. * 4: for RGBA. The ZLIB compression level, `compression`, can be -1 for the PNG-encoder default or a value from 0 to 9. 9 is the highest compression level, generating the smallest output, but is slower. Args: image: A `Tensor`. Must be one of the following types: `uint8`, `uint16`. 3-D with shape `[height, width, channels]`. compression: An optional `int`. Defaults to `-1`. Compression level. name: A name for the operation (optional). Returns: A `Tensor` of type `string`." 8811,decode_image,tensorflow/tensorflow/python/ops/image_ops_impl.py,2637,function,"Function for `decode_bmp`, `decode_gif`, `decode_jpeg`, and `decode_png`. Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the appropriate operation to convert the input bytes `string` into a `Tensor` of type `dtype`. Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as opposed to `decode_bmp`, `decode_jpeg` and `decode_png`, which return 3-D arrays `[height, width, num_channels]`. Make sure to take this into account when constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or PNG files. Alternately, set the `expand_animations` argument of this function to `False`, in which case the op will return 3-dimensional tensors and will truncate animated GIF files to the first frame. Args: contents: 0-D `string`. The encoded image bytes. channels: An optional `int`. Defaults to `0`. Number of color channels for the decoded image. dtype: The desired DType of the returned `Tensor`. name: A name for the operation (optional) expand_animations: Controls the shape of the returned op's output. If `True`, the returned op will produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all GIFs, whether animated or not. If, `False`, the returned op will produce a 3-D tensor for all file types and will truncate animated GIFs to the first frame. Returns: `Tensor` with type `dtype` and a 3- or 4-dimensional shape, depending on the file type and the value of the `expand_animations` parameter. Raises: ValueError: On incorrect number of channels." 8812,total_variation,tensorflow/tensorflow/python/ops/image_ops_impl.py,2770,function,"Calculate and return the total variation for one or more images. The total variation is the sum of the absolute differences for neighboring pixel-values in the input images. This measures how much noise is in the images. This can be used as a loss-function during optimization so as to suppress noise in images. If you have a batch of images, then you should calculate the scalar loss-value as the sum: `loss = tf.reduce_sum(tf.image.total_variation(images))` This implements the anisotropic 2-D version of the formula described here: https://en.wikipedia.org/wiki/Total_variation_denoising Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for the operation (optional). Raises: ValueError: if images.shape is not a 3-D or 4-D vector. Returns: The total variation of `images`. If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the total variation for each image in the batch. If `images` was 3-D, return a scalar float with the total variation for that image." 8813,sample_distorted_bounding_box_v2,tensorflow/tensorflow/python/ops/image_ops_impl.py,2842,function,"Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.compat.v1.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If `seed` is set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect `ratio = width / height` within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`." 8814,sample_distorted_bounding_box,tensorflow/tensorflow/python/ops/image_ops_impl.py,2946,function,"Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.compat.v1.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = True` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If either `seed` or `seed2` are set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. seed2: An optional `int`. Defaults to `0`. A second seed to avoid seed collision. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`." 8815,non_max_suppression,tensorflow/tensorflow/python/ops/image_ops_impl.py,3057,function,"Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`." 8816,non_max_suppression_with_scores,tensorflow/tensorflow/python/ops/image_ops_impl.py,3110,function,"Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices, selected_scores = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold=1.0, score_threshold=0.1, soft_nms_sigma=0.5) selected_boxes = tf.gather(boxes, selected_indices) ``` This function generalizes the `tf.image.non_max_suppression` op by also supporting a Soft-NMS (with Gaussian weighting) mode (c.f. Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score of other overlapping boxes instead of directly causing them to be pruned. Consequently, in contrast to `tf.image.non_max_suppression`, `tf.image.non_max_suppression_padded` returns the new scores of each input box in the second output, `selected_scores`. To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be larger than 0. When `soft_nms_sigma` equals 0, the behavior of `tf.image.non_max_suppression_padded` is identical to that of `tf.image.non_max_suppression` (except for the extra output) both in function and in running time. Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter; See Bodla et al, https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) NMS. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding scores for each selected box, where `M <= max_output_size`. Scores only differ from corresponding input scores when using Soft NMS (i.e. when `soft_nms_sigma>0`)" 8817,non_max_suppression_with_overlaps,tensorflow/tensorflow/python/ops/image_ops_impl.py,3197,function,"Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high overlap with previously selected boxes. N-by-n overlap values are supplied as square matrix. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression_overlaps( overlaps, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. overlap_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to the provided overlap values. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the overlaps tensor, where `M <= max_output_size`." 8818,rgb_to_yiq,tensorflow/tensorflow/python/ops/image_ops_impl.py,3249,function,"Converts one or more images from RGB to YIQ. Outputs a tensor of the same shape as the `images` tensor, containing the YIQ value of the pixels. The output is only well defined if the value in images are in [0,1]. Usage Example: >>> x = tf.constant([[[1.0, 2.0, 3.0]]]) >>> tf.image.rgb_to_yiq(x) Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`." 8819,yiq_to_rgb,tensorflow/tensorflow/python/ops/image_ops_impl.py,3283,function,"Converts one or more images from YIQ to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`." 8820,rgb_to_yuv,tensorflow/tensorflow/python/ops/image_ops_impl.py,3312,function,"Converts one or more images from RGB to YUV. Outputs a tensor of the same shape as the `images` tensor, containing the YUV value of the pixels. The output is only well defined if the value in images are in [0,1]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`." 8821,yuv_to_rgb,tensorflow/tensorflow/python/ops/image_ops_impl.py,3339,function,"Converts one or more images from YUV to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], U and V value are in [-0.5,0.5]. As per the above description, you need to scale your YUV images if their pixel values are not in the required range. Below given example illustrates preprocessing of each channel of images before feeding them to `yuv_to_rgb`. ```python yuv_images = tf.random.uniform(shape=[100, 64, 64, 3], maxval=255) last_dimension_axis = len(yuv_images.shape) - 1 yuv_tensor_images = tf.truediv( tf.subtract( yuv_images, tf.reduce_min(yuv_images) ), tf.subtract( tf.reduce_max(yuv_images), tf.reduce_min(yuv_images) ) ) y, u, v = tf.split(yuv_tensor_images, 3, axis=last_dimension_axis) target_uv_min, target_uv_max = -0.5, 0.5 u = u * (target_uv_max - target_uv_min) + target_uv_min v = v * (target_uv_max - target_uv_min) + target_uv_min preprocessed_yuv_images = tf.concat([y, u, v], axis=last_dimension_axis) rgb_tensor_images = tf.image.yuv_to_rgb(preprocessed_yuv_images) ``` Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`." 8822,_verify_compatible_image_shapes,tensorflow/tensorflow/python/ops/image_ops_impl.py,3386,function,"Checks if two image tensors are compatible for applying SSIM or PSNR. This function checks if two sets of images have ranks at least 3, and if the last three dimensions match. Args: img1: Tensor containing the first image batch. img2: Tensor containing the second image batch. Returns: A tuple containing: the first tensor shape, the second tensor shape, and a list of control_flow_ops.Assert() ops implementing the checks. Raises: ValueError: When static shape check fails." 8823,psnr,tensorflow/tensorflow/python/ops/image_ops_impl.py,3433,function,"Returns the Peak Signal-to-Noise Ratio between a and b. This is intended to be used on signals (or images). Produces a PSNR value for each image in batch. The last three dimensions of input are expected to be [height, width, depth]. Example: ```python # Read images from file. im1 = tf.decode_png('path/to/im1.png') im2 = tf.decode_png('path/to/im2.png') # Compute PSNR over tf.uint8 Tensors. psnr1 = tf.image.psnr(im1, im2, max_val=255) # Compute PSNR over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) psnr2 = tf.image.psnr(im1, im2, max_val=1.0) # psnr1 and psnr2 both have type tf.float32 and are almost equal. ``` Arguments: a: First set of images. b: Second set of images. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). name: Namespace to embed the computation in. Returns: The scalar PSNR between a and b. The returned tensor has type `tf.float32` and shape [batch_size, 1]." 8824,_ssim_helper,tensorflow/tensorflow/python/ops/image_ops_impl.py,3486,function,"Helper function for computing SSIM. SSIM estimates covariances with weighted sums. The default parameters use a biased estimate of the covariance: Suppose `reducer` is a weighted sum, then the mean estimators are \mu_x = \sum_i w_i x_i, \mu_y = \sum_i w_i y_i, where w_i's are the weighted-sum weights, and covariance estimator is cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) with assumption \sum_i w_i = 1. This covariance estimator is biased, since E[cov_{xy}] = (1 - \sum_i w_i ^ 2) Cov(X, Y). For SSIM measure with unbiased covariance estimators, pass as `compensation` argument (1 - \sum_i w_i ^ 2). Arguments: x: First set of images. y: Second set of images. reducer: Function that computes 'local' averages from the set of images. For non-convolutional version, this is usually tf.reduce_mean(x, [1, 2]), and for convolutional version, this is usually tf.nn.avg_pool2d or tf.nn.conv2d with weighted-sum kernel. max_val: The dynamic range (i.e., the difference between the maximum possible allowed value and the minimum allowed value). compensation: Compensation factor. See above. k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A pair containing the luminance measure, and the contrast-structure measure." 8825,_fspecial_gauss,tensorflow/tensorflow/python/ops/image_ops_impl.py,3544,function,Function to mimic the 'fspecial' gaussian MATLAB function. 8826,_ssim_per_channel,tensorflow/tensorflow/python/ops/image_ops_impl.py,3561,function,"Computes SSIM index between img1 and img2 per color channel. This function matches the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. Args: img1: First image batch. img2: Second image batch. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A pair of tensors containing and channel-wise SSIM and contrast-structure values. The shape is [..., channels]." 8827,ssim,tensorflow/tensorflow/python/ops/image_ops_impl.py,3645,function,"Computes SSIM index between img1 and img2. This function is based on the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If the input is already YUV, then it will compute YUV SSIM average.) Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. The image sizes must be at least 11x11 because of the filter size. Example: ```python # Read images from file. im1 = tf.decode_png('path/to/im1.png') im2 = tf.decode_png('path/to/im2.png') # Compute SSIM over tf.uint8 Tensors. ssim1 = tf.image.ssim(im1, im2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) # Compute SSIM over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) ssim2 = tf.image.ssim(im1, im2, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) # ssim1 and ssim2 both have type tf.float32 and are almost equal. ``` Args: img1: First image batch. img2: Second image batch. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A tensor containing an SSIM value for each image in batch. Returned SSIM values are in range (-1, 1], when pixel values are non-negative. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3])." 8828,ssim_multiscale,tensorflow/tensorflow/python/ops/image_ops_impl.py,3730,function,"Computes the MS-SSIM between img1 and img2. This function assumes that `img1` and `img2` are image batches, i.e. the last three dimensions are [height, width, channels]. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If the input is already YUV, then it will compute YUV SSIM average.) Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. ""Multiscale structural similarity for image quality assessment."" Signals, Systems and Computers, 2004. Arguments: img1: First image batch. img2: Second image batch. Must have the same rank as img1. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). power_factors: Iterable of weights for each of the scales. The number of scales used is the length of the list. Index 0 is the unscaled resolution's weight and each increasing scale corresponds to the image being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), which are the values obtained in the original paper. filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A tensor containing an MS-SSIM value for each image in batch. The values are in range [0, 1]. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3])." 8829,image_gradients,tensorflow/tensorflow/python/ops/image_ops_impl.py,3858,function,"Returns image gradients (dy, dx) for each color channel. Both output tensors have the same shape as the input: [batch_size, h, w, d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in location (x, y). That means that dy will always have zeros in the last row, and dx will always have zeros in the last column. Usage Example: ```python BATCH_SIZE = 1 IMAGE_HEIGHT = 5 IMAGE_WIDTH = 5 CHANNELS = 1 image = tf.reshape(tf.range(IMAGE_HEIGHT * IMAGE_WIDTH * CHANNELS, delta=1, dtype=tf.float32), shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS)) dx, dy = tf.image.image_gradients(image) print(image[0, :,:,0]) tf.Tensor( [[ 0. 1. 2. 3. 4.] [ 5. 6. 7. 8. 9.] [10. 11. 12. 13. 14.] [15. 16. 17. 18. 19.] [20. 21. 22. 23. 24.]], shape=(5, 5), dtype=float32) print(dx[0, :,:,0]) tf.Tensor( [[5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [0. 0. 0. 0. 0.]], shape=(5, 5), dtype=float32) print(dy[0, :,:,0]) tf.Tensor( [[1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.]], shape=(5, 5), dtype=float32) ``` Arguments: image: Tensor with shape [batch_size, h, w, d]. Returns: Pair of tensors (dy, dx) holding the vertical and horizontal image gradients (1-step finite difference). Raises: ValueError: If `image` is not a 4D tensor." 8830,sobel_edges,tensorflow/tensorflow/python/ops/image_ops_impl.py,3932,function,"Returns a tensor holding Sobel edge maps. Arguments: image: Image tensor with shape [batch_size, h, w, d] and type float32 or float64. The image(s) must be 2x2 or larger. Returns: Tensor holding edge maps for each channel. Returns a tensor with shape [batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]], [dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter." 8831,resize_bicubic,tensorflow/tensorflow/python/ops/image_ops_impl.py,3972,function, 8832,resize_bilinear,tensorflow/tensorflow/python/ops/image_ops_impl.py,3985,function, 8833,resize_nearest_neighbor,tensorflow/tensorflow/python/ops/image_ops_impl.py,3998,function, 8834,crop_and_resize_v2,tensorflow/tensorflow/python/ops/image_ops_impl.py,4045,function,"Extracts crops from the input image tensor and resizes them. Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by `crop_size`. This is more general than the `crop_to_bounding_box` op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change. Returns a tensor with `crops` from the input `image` at positions defined at the bounding box locations in `boxes`. The cropped boxes are all resized (with bilinear or nearest neighbor interpolation) to a fixed `size = [crop_height, crop_width]`. The result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical results to using `tf.compat.v1.image.resize_bilinear()` or `tf.compat.v1.image.resize_nearest_neighbor()`(depends on the `method` argument) with `align_corners=True`. Args: image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. Both `image_height` and `image_width` need to be positive. boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use `extrapolation_value` to extrapolate the input image values. box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box refers to. crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both `crop_height` and `crop_width` need to be positive. method: An optional string specifying the sampling method for resizing. It can be either `""bilinear""` or `""nearest""` and default to `""bilinear""`. Currently two sampling methods are supported: Bilinear and Nearest Neighbor. extrapolation_value: An optional `float`. Defaults to `0`. Value used for extrapolation, when applicable. name: A name for the operation (optional). Returns: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. Example: ```python import tensorflow as tf BATCH_SIZE = 1 NUM_BOXES = 5 IMAGE_HEIGHT = 256 IMAGE_WIDTH = 256 CHANNELS = 3 CROP_SIZE = (24, 24) image = tf.random.normal(shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS) ) boxes = tf.random.uniform(shape=(NUM_BOXES, 4)) box_indices = tf.random.uniform(shape=(NUM_BOXES,), minval=0, maxval=BATCH_SIZE, dtype=tf.int32) output = tf.image.crop_and_resize(image, boxes, box_indices, CROP_SIZE) output.shape #=> (5, 24, 24, 3) ```" 8835,crop_and_resize_v1,tensorflow/tensorflow/python/ops/image_ops_impl.py,4132,function, 8836,extract_glimpse,tensorflow/tensorflow/python/ops/image_ops_impl.py,4152,function,"Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non-overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Usage Example: >>> x = [[[[0.0], ... [1.0], ... [2.0]], ... [[3.0], ... [4.0], ... [5.0]], ... [[6.0], ... [7.0], ... [8.0]]]] >>> tf.compat.v1.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]], ... centered=False, normalized=False) Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. uniform_noise: An optional `bool`. Defaults to `True`. indicates if the noise should be generated using a uniform distribution or a Gaussian distribution. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`." 8837,extract_glimpse_v2,tensorflow/tensorflow/python/ops/image_ops_impl.py,4238,function,"Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non-overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Usage Example: >>> x = [[[[0.0], ... [1.0], ... [2.0]], ... [[3.0], ... [4.0], ... [5.0]], ... [[6.0], ... [7.0], ... [8.0]]]] >>> tf.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]], ... centered=False, normalized=False) Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. noise: An optional `string`. Defaults to `uniform`. indicates if the noise should be `uniform` (uniform distribution), `gaussian` (gaussian distribution), or `zero` (zero padding). name: A name for the operation (optional). Returns: A `Tensor` of type `float32`." 8838,combined_non_max_suppression,tensorflow/tensorflow/python/ops/image_ops_impl.py,4325,function,"Greedily selects a subset of bounding boxes in descending order of score. This operation performs non_max_suppression on the inputs per batch, across all classes. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Also note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is the final boxes, scores and classes tensor returned after performing non_max_suppression. Args: boxes: A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]` representing a single score corresponding to each box (each row of boxes). max_output_size_per_class: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression per class max_total_size: A scalar representing the maximum number of boxes retained over all classes. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_per_class: If false, the output nmsed boxes, scores and classes are padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false. clip_boxes: If true, the coordinates of output nmsed boxes will be clipped to [0, 1]. If false, output the box coordinates as it is. Defaults to true. name: A name for the operation (optional). Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'valid_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top valid_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings." 8839,_bbox_overlap,tensorflow/tensorflow/python/ops/image_ops_impl.py,4396,function,"Calculates the overlap (iou - intersection over union) between boxes_a and boxes_b. Args: boxes_a: a tensor with a shape of [batch_size, N, 4]. N is the number of boxes per image. The last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. boxes_b: a tensor with a shape of [batch_size, M, 4]. M is the number of boxes. The last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. Returns: intersection_over_union: a tensor with as a shape of [batch_size, N, M], representing the ratio of intersection area over union area (IoU) between two boxes" 8840,_self_suppression,tensorflow/tensorflow/python/ops/image_ops_impl.py,4442,function,"Suppress boxes in the same tile. Compute boxes that cannot be suppressed by others (i.e., can_suppress_others), and then use them to suppress boxes in the same tile. Args: iou: a tensor of shape [batch_size, num_boxes_with_padding] representing intersection over union. iou_sum: a scalar tensor. iou_threshold: a scalar tensor. Returns: iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding]. iou_diff: a scalar tensor representing whether any box is supressed in this step. iou_sum_new: a scalar tensor of shape [batch_size] that represents the iou sum after suppression. iou_threshold: a scalar tensor." 8841,_cross_suppression,tensorflow/tensorflow/python/ops/image_ops_impl.py,4480,function,"Suppress boxes between different tiles. Args: boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4] box_slice: a tensor of shape [batch_size, tile_size, 4] iou_threshold: a scalar tensor inner_idx: a scalar tensor representing the tile index of the tile that is used to supress box_slice tile_size: an integer representing the number of boxes in a tile Returns: boxes: unchanged boxes as input box_slice_after_suppression: box_slice after suppression iou_threshold: unchanged" 8842,_suppression_loop_body,tensorflow/tensorflow/python/ops/image_ops_impl.py,4508,function,"Process boxes in the range [idx*tile_size, (idx+1)*tile_size). Args: boxes: a tensor with a shape of [batch_size, anchors, 4]. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. output_size: an int32 tensor of size [batch_size]. Representing the number of selected boxes for each batch. idx: an integer scalar representing induction variable. tile_size: an integer representing the number of boxes in a tile Returns: boxes: updated boxes. iou_threshold: pass down iou_threshold to the next iteration. output_size: the updated output_size. idx: the updated induction variable." 8843,non_max_suppression_padded,tensorflow/tensorflow/python/ops/image_ops_impl.py,4578,function,"Greedily selects a subset of bounding boxes in descending order of score. Performs algorithmically equivalent operation to tf.image.non_max_suppression, with the addition of an optional parameter which zero-pads the output to be of size `max_output_size`. The output of this operation is a tuple containing the set of integers indexing into the input collection of bounding boxes representing the selected boxes and the number of valid indices in the index set. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.slice` and `tf.gather` operations. For example: ```python selected_indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size=True) selected_indices = tf.slice( selected_indices_padded, tf.constant([0]), num_valid) selected_boxes = tf.gather(boxes, selected_indices) Args: boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4]. Dimensions except the last two are batch dimensions. scores: a tensor of rank 1 or higher with a shape of [..., num_boxes]. max_output_size: a scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IoU (intersection over union). score_threshold: a float representing the threshold for box scores. Boxes with a score that is not larger than this threshold will be suppressed. pad_to_max_output_size: whether to pad the output idx to max_output_size. Must be set to True when the input is a batch of images. name: name of operation. sorted_input: a boolean indicating whether the input boxes and scores are sorted in descending order by the score. canonicalized_coordinates: if box coordinates are given as `[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant computation to canonicalize box coordinates. tile_size: an integer representing the number of boxes in a tile, i.e., the maximum number of boxes per image that can be used to suppress other boxes in parallel; larger tile_size means larger parallelism and potentially more redundant work. Returns: idx: a tensor with a shape of [..., num_boxes] representing the indices selected by non-max suppression. The leading dimensions are the batch dimensions of the input boxes. All numbers are within [0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i] indices (i.e., idx[i][:num_valid[i]]) are valid. num_valid: a tensor of rank 0 or higher with a shape of [...] representing the number of valid indices in idx. Its dimensions are the batch dimensions of the input boxes. Raises: ValueError: When set pad_to_max_output_size to False for batched input." 8844,non_max_suppression_padded_v2,tensorflow/tensorflow/python/ops/image_ops_impl.py,4680,function,"Non-maximum suppression. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. The bounding box coordinates are cannonicalized to `[y_min, x_min, y_max, x_max]`, where `(y_min, x_min)` and `(y_max, x_mas)` are the coordinates of the lower left and upper right corner. User may indiciate the input box coordinates are already canonicalized to eliminate redundant work by setting canonicalized_coordinates to `True`. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. Similar to tf.image.non_max_suppression, non_max_suppression_padded implements hard NMS but can operate on a batch of images and improves performance by titling the bounding boxes. Non_max_suppression_padded should be preferred over tf.image_non_max_suppression when running on devices with abundant parallelsim for higher computation speed. For soft NMS, refer to tf.image.non_max_suppression_with_scores. While a serial NMS algorithm iteratively uses the highest-scored unprocessed box to suppress boxes, this algorithm uses many boxes to suppress other boxes in parallel. The key idea is to partition boxes into tiles based on their score and suppresses boxes tile by tile, thus achieving parallelism within a tile. The tile size determines the degree of parallelism. In cross suppression (using boxes of tile A to suppress boxes of tile B), all boxes in A can independently suppress boxes in B. Self suppression (suppressing boxes of the same tile) needs to be iteratively applied until there's no more suppression. In each iteration, boxes that cannot be suppressed are used to suppress boxes in the same tile. boxes = boxes.pad_to_multiply_of(tile_size) num_tiles = len(boxes) // tile_size output_boxes = [] for i in range(num_tiles): box_tile = boxes[i*tile_size : (i+1)*tile_size] for j in range(i - 1): # in parallel suppress boxes in box_tile using boxes from suppressing_tile suppressing_tile = boxes[j*tile_size : (j+1)*tile_size] iou = _bbox_overlap(box_tile, suppressing_tile) # if the box is suppressed in iou, clear it to a dot box_tile *= _update_boxes(iou) # Iteratively handle the diagnal tile. iou = _box_overlap(box_tile, box_tile) iou_changed = True while iou_changed: # boxes that are not suppressed by anything else suppressing_boxes = _get_suppressing_boxes(iou) # boxes that are suppressed by suppressing_boxes suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes) # clear iou to 0 for boxes that are suppressed, as they cannot be used # to suppress other boxes any more new_iou = _clear_iou(iou, suppressed_boxes) iou_changed = (new_iou != iou) iou = new_iou # remaining boxes that can still suppress others, are selected boxes. output_boxes.append(_get_suppressing_boxes(iou)) if len(output_boxes) >= max_output_size: break Args: boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4]. Dimensions except the last two are batch dimensions. The last dimension represents box coordinates, given as [y_1, x_1, y_2, x_2]. The coordinates on each dimension can be given in any order (see also `canonicalized_coordinates`) but must describe a box with a positive area. scores: a tensor of rank 1 or higher with a shape of [..., num_boxes]. max_output_size: a scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IoU (intersection over union). score_threshold: a float representing the threshold for box scores. Boxes with a score that is not larger than this threshold will be suppressed. sorted_input: a boolean indicating whether the input boxes and scores are sorted in descending order by the score. canonicalized_coordinates: if box coordinates are given as `[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant computation to canonicalize box coordinates. tile_size: an integer representing the number of boxes in a tile, i.e., the maximum number of boxes per image that can be used to suppress other boxes in parallel; larger tile_size means larger parallelism and potentially more redundant work. Returns: idx: a tensor with a shape of [..., num_boxes] representing the indices selected by non-max suppression. The leading dimensions are the batch dimensions of the input boxes. All numbers are within [0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i] indices (i.e., idx[i][:num_valid[i]]) are valid. num_valid: a tensor of rank 0 or higher with a shape of [...] representing the number of valid indices in idx. Its dimensions are the batch dimensions of the input boxes. Raises: ValueError: When set pad_to_max_output_size to False for batched input." 8845,non_max_suppression_padded_v1,tensorflow/tensorflow/python/ops/image_ops_impl.py,4917,function,"Greedily selects a subset of bounding boxes in descending order of score. Performs algorithmically equivalent operation to tf.image.non_max_suppression, with the addition of an optional parameter which zero-pads the output to be of size `max_output_size`. The output of this operation is a tuple containing the set of integers indexing into the input collection of bounding boxes representing the selected boxes and the number of valid indices in the index set. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.slice` and `tf.gather` operations. For example: ```python selected_indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size=True) selected_indices = tf.slice( selected_indices_padded, tf.constant([0]), num_valid) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_to_max_output_size: bool. If True, size of `selected_indices` output is padded to `max_output_size`. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. valid_outputs: A scalar integer `Tensor` denoting how many elements in `selected_indices` are valid. Valid elements occur first, then padding." 8846,draw_bounding_boxes_v2,tensorflow/tensorflow/python/ops/image_ops_impl.py,4974,function,"Draw bounding boxes on a batch of images. Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). Parts of the bounding box may fall outside the image. Args: images: A `Tensor`. Must be one of the following types: `float32`, `half`. 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle through for the boxes. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. Usage Example: >>> # create an empty image >>> img = tf.zeros([1, 3, 3, 3]) >>> # draw a box around the image >>> box = np.array([0, 0, 1, 1]) >>> boxes = box.reshape([1, 1, 4]) >>> # alternate between red and blue >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes(img, boxes, colors) " 8847,draw_bounding_boxes,tensorflow/tensorflow/python/ops/image_ops_impl.py,5029,function,"Draw bounding boxes on a batch of images. Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). Parts of the bounding box may fall outside the image. Args: images: A `Tensor`. Must be one of the following types: `float32`, `half`. 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. name: A name for the operation (optional). colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle through for the boxes. Returns: A `Tensor`. Has the same type as `images`. Usage Example: >>> # create an empty image >>> img = tf.zeros([1, 3, 3, 3]) >>> # draw a box around the image >>> box = np.array([0, 0, 1, 1]) >>> boxes = box.reshape([1, 1, 4]) >>> # alternate between red and blue >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes(img, boxes, colors) " 8848,generate_bounding_box_proposals,tensorflow/tensorflow/python/ops/image_ops_impl.py,5082,function,"Generate bounding box proposals from encoded bounding boxes. Returns: rois: Region of interest boxes sorted by their scores. roi_probabilities: scores of the ROI boxes in the ROIs' tensor." 8849,RGBToHSVTest,tensorflow/tensorflow/python/ops/image_ops_test.py,56,class, 8850,RGBToYIQTest,tensorflow/tensorflow/python/ops/image_ops_test.py,96,class, 8851,RGBToYUVTest,tensorflow/tensorflow/python/ops/image_ops_test.py,126,class, 8852,GrayscaleToRGBTest,tensorflow/tensorflow/python/ops/image_ops_test.py,156,class, 8853,AdjustGamma,tensorflow/tensorflow/python/ops/image_ops_test.py,277,class, 8854,AdjustHueTest,tensorflow/tensorflow/python/ops/image_ops_test.py,414,class, 8855,FlipImageBenchmark,tensorflow/tensorflow/python/ops/image_ops_test.py,539,class, 8856,AdjustHueBenchmark,tensorflow/tensorflow/python/ops/image_ops_test.py,660,class, 8857,AdjustSaturationBenchmark,tensorflow/tensorflow/python/ops/image_ops_test.py,703,class, 8858,ResizeBilinearBenchmark,tensorflow/tensorflow/python/ops/image_ops_test.py,747,class, 8859,ResizeBicubicBenchmark,tensorflow/tensorflow/python/ops/image_ops_test.py,795,class, 8860,ResizeAreaBenchmark,tensorflow/tensorflow/python/ops/image_ops_test.py,853,class, 8861,AdjustSaturationTest,tensorflow/tensorflow/python/ops/image_ops_test.py,900,class, 8862,FlipTransposeRotateTest,tensorflow/tensorflow/python/ops/image_ops_test.py,1004,class, 8863,AdjustContrastTest,tensorflow/tensorflow/python/ops/image_ops_test.py,1390,class, 8864,AdjustBrightnessTest,tensorflow/tensorflow/python/ops/image_ops_test.py,1479,class, 8865,PerImageWhiteningTest,tensorflow/tensorflow/python/ops/image_ops_test.py,1529,class, 8866,CropToBoundingBoxTest,tensorflow/tensorflow/python/ops/image_ops_test.py,1585,class, 8867,CentralCropTest,tensorflow/tensorflow/python/ops/image_ops_test.py,1768,class, 8868,PadToBoundingBoxTest,tensorflow/tensorflow/python/ops/image_ops_test.py,1910,class, 8869,SelectDistortedCropBoxTest,tensorflow/tensorflow/python/ops/image_ops_test.py,2106,class, 8870,ResizeImagesV2Test,tensorflow/tensorflow/python/ops/image_ops_test.py,2316,class, 8871,ResizeImagesTest,tensorflow/tensorflow/python/ops/image_ops_test.py,2856,class, 8872,ResizeImageWithPadV1Test,tensorflow/tensorflow/python/ops/image_ops_test.py,3367,class, 8873,ResizeImageWithPadV2Test,tensorflow/tensorflow/python/ops/image_ops_test.py,3467,class, 8874,ResizeImageWithCropOrPadTest,tensorflow/tensorflow/python/ops/image_ops_test.py,3566,class, 8875,simple_color_ramp,tensorflow/tensorflow/python/ops/image_ops_test.py,3815,function,Build a simple color ramp RGB image. 8876,JpegTest,tensorflow/tensorflow/python/ops/image_ops_test.py,3827,class, 8877,PngTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4046,class, 8878,GifTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4137,class, 8879,ConvertImageTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4180,class, 8880,TotalVariationTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4250,class,"Tests the function total_variation() in image_ops. We test a few small handmade examples, as well as some larger examples using an equivalent numpy implementation of the total_variation() function. We do NOT test for overflows and invalid / edge-case arguments." 8881,FormatTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4428,class, 8882,NonMaxSuppressionTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4461,class, 8883,NonMaxSuppressionWithScoresTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4585,class, 8884,NonMaxSuppressionPaddedTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4620,class, 8885,NonMaxSuppressionWithOverlapsTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4686,class, 8886,VerifyCompatibleImageShapesTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4711,class,Tests utility function used by ssim() and psnr(). 8887,PSNRTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4738,class,Tests for PSNR. 8888,SSIMTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4842,class,Tests for SSIM. 8889,MultiscaleSSIMTest,tensorflow/tensorflow/python/ops/image_ops_test.py,4976,class,Tests for MS-SSIM. 8890,ImageGradientsTest,tensorflow/tensorflow/python/ops/image_ops_test.py,5123,class, 8891,SobelEdgesTest,tensorflow/tensorflow/python/ops/image_ops_test.py,5172,class, 8892,DecodeImageTest,tensorflow/tensorflow/python/ops/image_ops_test.py,5208,class, 8893,Initializer,tensorflow/tensorflow/python/ops/init_ops.py,55,class,Initializer base class: all initializers inherit from this class. 8894,Zeros,tensorflow/tensorflow/python/ops/init_ops.py,102,class,Initializer that generates tensors initialized to 0. 8895,Ones,tensorflow/tensorflow/python/ops/init_ops.py,122,class,Initializer that generates tensors initialized to 1. 8896,Constant,tensorflow/tensorflow/python/ops/init_ops.py,142,class,"Initializer that generates tensors with constant values. The resulting tensor is populated with values of type `dtype`, as specified by arguments `value` following the desired `shape` of the new tensor (see examples below). The argument `value` can be a constant value, or a list of values of type `dtype`. If `value` is a list, then the length of the list must be less than or equal to the number of elements implied by the desired shape of the tensor. In the case where the total number of elements in `value` is less than the number of elements required by the tensor shape, the last element in `value` will be used to fill the remaining entries. If the total number of elements in `value` is greater than the number of elements required by the tensor shape, the initializer will raise a `ValueError`. Args: value: A Python scalar, list or tuple of values, or a N-dimensional numpy array. All elements of the initialized variable will be set to the corresponding value in the `value` argument. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. verify_shape: Boolean that enables verification of the shape of `value`. If `True`, the initializer will throw an error if the shape of `value` is not compatible with the shape of the initialized tensor. Raises: TypeError: If the input `value` is not one of the expected types. Examples: The following example can be rewritten using a numpy.ndarray instead of the `value` list, even reshaped, as shown in the two commented lines below the `value` list initialization. >>> value = [0, 1, 2, 3, 4, 5, 6, 7] >>> init = tf.compat.v1.constant_initializer(value) >>> # fitting shape >>> with tf.compat.v1.Session(): ... x = tf.compat.v1.get_variable('x', shape=[2, 4], initializer=init) ... x.initializer.run() ... print(x.eval()) [[0. 1. 2. 3.] [4. 5. 6. 7.]] >>> # Larger shape >>> with tf.compat.v1.Session(): ... y = tf.compat.v1.get_variable('y', shape=[3, 4], initializer=init) ... y.initializer.run() ... print(y.eval()) [[0. 1. 2. 3.] [4. 5. 6. 7.] [7. 7. 7. 7.]] >>> # Smaller shape >>> with tf.compat.v1.Session(): ... z = tf.compat.v1.get_variable('z', shape=[2, 3], initializer=init) Traceback (most recent call last): ... ValueError: Too many elements provided. Needed at most 6, but received 8 >>> # Shape verification >>> init_verify = tf.compat.v1.constant_initializer(value, verify_shape=True) >>> with tf.compat.v1.Session(): ... u = tf.compat.v1.get_variable('u', shape=[3, 4], ... initializer=init_verify) Traceback (most recent call last): ... TypeError: Expected Tensor's shape: (3, 4), got (8,)." 8897,RandomUniform,tensorflow/tensorflow/python/ops/init_ops.py,242,class,"Initializer that generates tensors with a uniform distribution. Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate. maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate. Defaults to 1 for float types. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer." 8898,RandomNormal,tensorflow/tensorflow/python/ops/init_ops.py,282,class,"Initializer that generates tensors with a normal distribution. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported." 8899,TruncatedNormal,tensorflow/tensorflow/python/ops/init_ops.py,323,class,"Initializer that generates a truncated normal distribution. These values are similar to values from a `random_normal_initializer` except that values more than two standard deviations from the mean are discarded and re-drawn. This is the recommended initializer for neural network weights and filters. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported." 8900,UniformUnitScaling,tensorflow/tensorflow/python/ops/init_ops.py,371,class,"Initializer that generates tensors without scaling variance. When initializing a deep network, it is in principle advantageous to keep the scale of the input variance constant, so it does not explode or diminish by reaching the final layer. If the input is `x` and the operation `x * W`, and we want to initialize `W` uniformly at random, we need to pick `W` from [-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)] to keep the scale intact, where `dim = W.shape[0]` (the size of the input). A similar calculation for convolutional networks gives an analogous result with `dim` equal to the product of the first 3 dimensions. When nonlinearities are present, we need to multiply this by a constant `factor`. See (Sussillo et al., 2014) for deeper motivation, experiments and the calculation of constants. In section 2.3 there, the constants were numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15. Args: factor: Float. A multiplicative factor by which the values will be scaled. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558) ([pdf](http://arxiv.org/pdf/1412.6558.pdf))" 8901,VarianceScaling,tensorflow/tensorflow/python/ops/init_ops.py,437,class,"Initializer capable of adapting its scale to the shape of weights tensors. With `distribution=""truncated_normal"" or ""untruncated_normal""`, samples are drawn from a truncated/untruncated normal distribution with a mean of zero and a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)` where n is: - number of input units in the weight tensor, if mode = ""fan_in"" - number of output units, if mode = ""fan_out"" - average of the numbers of input and output units, if mode = ""fan_avg"" With `distribution=""uniform""`, samples are drawn from a uniform distribution within [-limit, limit], with `limit = sqrt(3 * scale / n)`. Args: scale: Scaling factor (positive float). mode: One of ""fan_in"", ""fan_out"", ""fan_avg"". distribution: Random distribution to use. One of ""normal"", ""uniform"". seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. Raises: ValueError: In case of an invalid value for the ""scale"", mode"" or ""distribution"" arguments." 8902,Orthogonal,tensorflow/tensorflow/python/ops/init_ops.py,534,class,"Initializer that generates an orthogonal matrix. If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns. If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` is initialized, where `n` is the length of the shape vector. The matrix is subsequently reshaped to give a tensor of the desired shape. Args: gain: multiplicative factor to apply to the orthogonal matrix seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) ([pdf](https://arxiv.org/pdf/1312.6120.pdf))" 8903,ConvolutionDeltaOrthogonal,tensorflow/tensorflow/python/ops/init_ops.py,603,class,"Initializer that generates a delta orthogonal kernel for ConvNets. The shape of the tensor must have length 3, 4 or 5. The number of input filters must not exceed the number of output filters. The center pixels of the tensor form an orthogonal matrix. Other pixels are set to be zero. See algorithm 2 in (Xiao et al., 2018). Args: gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of `gain` after applying this convolution. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))" 8904,ConvolutionOrthogonal,tensorflow/tensorflow/python/ops/init_ops.py,669,class,"Initializer that generates orthogonal kernel for ConvNets. Base class used to construct 1D, 2D and 3D orthogonal kernels for convolution. Args: gain: multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of `gain` after applying this convolution. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))" 8905,ConvolutionOrthogonal2D,tensorflow/tensorflow/python/ops/init_ops.py,736,class,"Initializer that generates a 2D orthogonal kernel for ConvNets. The shape of the tensor must have length 4. The number of input filters must not exceed the number of output filters. The orthogonality(==isometry) is exact when the inputs are circular padded. There are finite-width effects with non-circular padding (e.g. zero padding). See algorithm 1 in (Xiao et al., 2018). Args: gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. This has the effect of scaling the output 2-norm by a factor of `gain`. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))" 8906,ConvolutionOrthogonal1D,tensorflow/tensorflow/python/ops/init_ops.py,880,class,"Initializer that generates a 1D orthogonal kernel for ConvNets. The shape of the tensor must have length 3. The number of input filters must not exceed the number of output filters. The orthogonality(==isometry) is exact when the inputs are circular padded. There are finite-width effects with non-circular padding (e.g. zero padding). See algorithm 1 in (Xiao et al., 2018). Args: gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of `gain` after applying this convolution. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))" 8907,ConvolutionOrthogonal3D,tensorflow/tensorflow/python/ops/init_ops.py,1005,class,"Initializer that generates a 3D orthogonal kernel for ConvNets. The shape of the tensor must have length 5. The number of input filters must not exceed the number of output filters. The orthogonality(==isometry) is exact when the inputs are circular padded. There are finite-width effects with non-circular padding (e.g. zero padding). See algorithm 1 (Xiao et al., 2018). Args: gain: Multiplicative factor to apply to the orthogonal matrix. Default is 1. The 2-norm of an input is multiplied by a factor of `gain` after applying this convolution. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html) ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))" 8908,Identity,tensorflow/tensorflow/python/ops/init_ops.py,1170,class,"Initializer that generates the identity matrix. Only use for 2D matrices. Args: gain: Multiplicative factor to apply to the identity matrix. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported." 8909,GlorotUniform,tensorflow/tensorflow/python/ops/init_ops.py,1210,class,"The Glorot uniform initializer, also called Xavier uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))" 8910,GlorotNormal,tensorflow/tensorflow/python/ops/init_ops.py,1242,class,"The Glorot normal initializer, also called Xavier normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))" 8911,lecun_normal,tensorflow/tensorflow/python/ops/init_ops.py,1295,function,"LeCun normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)" 8912,lecun_uniform,tensorflow/tensorflow/python/ops/init_ops.py,1323,function,"LeCun uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)" 8913,he_normal,tensorflow/tensorflow/python/ops/init_ops.py,1350,function,"He normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015] (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))" 8914,he_uniform,tensorflow/tensorflow/python/ops/init_ops.py,1375,function,"He uniform variance scaling initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015] (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))" 8915,_compute_fans,tensorflow/tensorflow/python/ops/init_ops.py,1401,function,"Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of integer scalars (fan_in, fan_out)." 8916,_assert_float_dtype,tensorflow/tensorflow/python/ops/init_ops.py,1428,function,"Validate and return floating point type based on `dtype`. `dtype` must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if `dtype` is not a floating point type." 8917,InitializersTest,tensorflow/tensorflow/python/ops/init_ops_test.py,36,class, 8918,Initializer,tensorflow/tensorflow/python/ops/init_ops_v2.py,48,class,"Initializer base class: all initializers inherit from this class. " 8919,Zeros,tensorflow/tensorflow/python/ops/init_ops_v2.py,94,class,"Initializer that generates tensors initialized to 0. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.zeros_initializer()) >>> v1 >>> v2 >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, >> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.ones_initializer()) >>> v1 >>> v2 >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, >> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.constant_initializer(2.)) >>> v1 >>> v2 >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, >> value = [0, 1, 2, 3, 4, 5, 6, 7] >>> init = tf.constant_initializer(value) >>> # Fitting shape >>> tf.Variable(init(shape=[2, 4], dtype=tf.float32)) >>> # Larger shape >>> tf.Variable(init(shape=[3, 4], dtype=tf.float32)) Traceback (most recent call last): ... TypeError: ...value has 8 elements, shape is (3, 4) with 12 elements... >>> # Smaller shape >>> tf.Variable(init(shape=[2, 3], dtype=tf.float32)) Traceback (most recent call last): ... TypeError: ...value has 8 elements, shape is (2, 3) with 6 elements... Args: value: A Python scalar, list or tuple of values, or a N-dimensional numpy array. All elements of the initialized variable will be set to the corresponding value in the `value` argument. Raises: TypeError: If the input `value` is not one of the expected types." 8922,RandomUniform,tensorflow/tensorflow/python/ops/init_ops_v2.py,270,class,"Initializer that generates tensors with a uniform distribution. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.ones_initializer()) >>> v1 >>> v2 >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, >> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, ... tf.random_normal_initializer(mean=1., stddev=2.)) >>> v1 >>> v2 >> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, >> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables( ... 3, tf.initializers.TruncatedNormal(mean=1., stddev=2.)) >>> v1 >>> v2 >> make_variables(4, tf.initializers.RandomUniform(minval=-1., maxval=1.)) (, >> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.VarianceScaling(scale=1.)) >>> v1 >>> v2 >> make_variables(4, tf.initializers.VarianceScaling(distribution='uniform')) (, >> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.Orthogonal()) >>> v1 >> v2 >> make_variables(4, tf.initializers.Orthogonal(gain=0.5)) (>> def make_variable(k, initializer): ... return tf.Variable(initializer(shape=[k, k], dtype=tf.float32)) >>> make_variable(2, tf.initializers.Identity()) >>> make_variable(3, tf.initializers.Identity(gain=0.5)) Args: gain: Multiplicative factor to apply to the identity matrix." 8928,GlorotUniform,tensorflow/tensorflow/python/ops/init_ops_v2.py,717,class,"The Glorot uniform initializer, also called Xavier uniform initializer. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.GlorotUniform()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.GlorotNormal()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.lecun_normal()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.lecun_uniform()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.he_normal()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.initializers.he_uniform()) >>> v1 >> v2 >> make_variables(4, tf.initializers.RandomNormal()) (>> a = tf.constant([[3, 0, 0, 0], ... [2, 1, 0, 0], ... [1, 0, 1, 0], ... [1, 1, 1, 1]], dtype=tf.float32) >>> b = tf.constant([[4], [2], [4], [2]], dtype=tf.float32) >>> x = tf.linalg.triangular_solve(a, b, lower=True) >>> x >>> tf.matmul(a, x) Args: matrix: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`. Shape is `[..., M, M]`. rhs: A `Tensor`. Must have the same type as `matrix`. Shape is `[..., M, N]`. lower: An optional `bool`. Defaults to `True`. Boolean indicating whether the innermost matrices in matrix are lower or upper triangular. adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether to solve with matrix or its (block-wise) adjoint. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as matrix, and shape is `[..., M, N]`." 8986,cholesky_solve,tensorflow/tensorflow/python/ops/linalg_ops.py,150,function,"Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations. ```python # Solve 10 separate 2x2 linear systems: A = ... # shape 10 x 2 x 2 RHS = ... # shape 10 x 2 x 1 chol = tf.linalg.cholesky(A) # shape 10 x 2 x 2 X = tf.linalg.cholesky_solve(chol, RHS) # shape 10 x 2 x 1 # tf.matmul(A, X) ~ RHS X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0] # Solve five linear systems (K = 5) for every member of the length 10 batch. A = ... # shape 10 x 2 x 2 RHS = ... # shape 10 x 2 x 5 ... X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2] ``` Args: chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`. Cholesky factorization of `A`, e.g. `chol = tf.linalg.cholesky(A)`. For that reason, only the lower triangular parts (including the diagonal) of the last two dimensions of `chol` are used. The strictly upper part is assumed to be zero and not accessed. rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`. name: A name to give this `Op`. Defaults to `cholesky_solve`. Returns: Solution to `A x = rhs`, shape `[..., M, K]`." 8987,eye,tensorflow/tensorflow/python/ops/linalg_ops.py,194,function,"Construct an identity matrix, or a batch of matrices. See also `tf.ones`, `tf.zeros`, `tf.fill`, `tf.one_hot`. ```python # Construct one identity matrix. tf.eye(2) ==> [[1., 0.], [0., 1.]] # Construct a batch of 3 identity matrices, each 2 x 2. # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2. batch_identity = tf.eye(2, batch_shape=[3]) # Construct one 2 x 3 ""identity"" matrix tf.eye(2, num_columns=3) ==> [[ 1., 0., 0.], [ 0., 1., 0.]] ``` Args: num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows in each batch matrix. num_columns: Optional non-negative `int32` scalar `Tensor` giving the number of columns in each batch matrix. Defaults to `num_rows`. batch_shape: A list or tuple of Python integers or a 1-D `int32` `Tensor`. If provided, the returned `Tensor` will have leading batch dimensions of this shape. dtype: The type of an element in the resulting `Tensor` name: A name for this `Op`. Defaults to ""eye"". Returns: A `Tensor` of shape `batch_shape + [num_rows, num_columns]`" 8988,matrix_solve_ls,tensorflow/tensorflow/python/ops/linalg_ops.py,243,function,"Solves one or more linear least-squares problems. `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K` matrices that solve the equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares sense. Below we will use the following notation for each pair of matrix and right-hand sides in the batch: `matrix`=\\(A \in \Re^{m \times n}\\), `rhs`=\\(B \in \Re^{m \times k}\\), `output`=\\(X \in \Re^{n \times k}\\), `l2_regularizer`=\\(\lambda\\). If `fast` is `True`, then the solution is computed by solving the normal equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then \\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 + \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as \\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the minimum-norm solution to the under-determined linear system, i.e. \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to \\(A Z = B\\). Notice that the fast path is only numerically stable when \\(A\\) is numerically full rank and has a condition number \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\) is sufficiently large. If `fast` is `False` an algorithm based on the numerically robust complete orthogonal decomposition is used. This computes the minimum-norm least-squares solution, even when \\(A\\) is rank deficient. This path is typically 6-7 times slower than the fast path. If `fast` is `False` then `l2_regularizer` is ignored. Args: matrix: `Tensor` of shape `[..., M, N]`. rhs: `Tensor` of shape `[..., M, K]`. l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`. fast: bool. Defaults to `True`. name: string, optional name of the operation. Returns: output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K` matrices that solve the equations `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares sense. Raises: NotImplementedError: linalg.lstsq is currently disabled for complex128 and l2_regularizer != 0 due to poor accuracy." 8989,eig,tensorflow/tensorflow/python/ops/linalg_ops.py,380,function,"Computes the eigen decomposition of a batch of matrices. The eigenvalues and eigenvectors for a non-Hermitian matrix in general are complex. The eigenvectors are not guaranteed to be linearly independent. Computes the eigenvalues and right eigenvectors of the innermost N-by-N matrices in `tensor` such that `tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1. Args: tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of each inner inner matrix is referenced. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is `[..., N]`. Sorted in non-decreasing order. v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most matrices contain eigenvectors of the corresponding matrices in `tensor`" 8990,eigvals,tensorflow/tensorflow/python/ops/linalg_ops.py,411,function,"Computes the eigenvalues of one or more matrices. Note: If your program backpropagates through this function, you should replace it with a call to tf.linalg.eig (possibly ignoring the second output) to avoid computing the eigen decomposition twice. This is because the eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See _SelfAdjointEigV2Grad in linalg_grad.py. Args: tensor: `Tensor` of shape `[..., N, N]`. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N` eigenvalues of `tensor[..., :, :]`." 8991,self_adjoint_eig,tensorflow/tensorflow/python/ops/linalg_ops.py,439,function,"Computes the eigen decomposition of a batch of self-adjoint matrices. Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in `tensor` such that `tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1. Args: tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of each inner inner matrix is referenced. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is `[..., N]`. Sorted in non-decreasing order. v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most matrices contain eigenvectors of the corresponding matrices in `tensor`" 8992,self_adjoint_eigvals,tensorflow/tensorflow/python/ops/linalg_ops.py,463,function,"Computes the eigenvalues of one or more self-adjoint matrices. Note: If your program backpropagates through this function, you should replace it with a call to tf.linalg.eigh (possibly ignoring the second output) to avoid computing the eigen decomposition twice. This is because the eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See _SelfAdjointEigV2Grad in linalg_grad.py. Args: tensor: `Tensor` of shape `[..., N, N]`. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N` eigenvalues of `tensor[..., :, :]`." 8993,svd,tensorflow/tensorflow/python/ops/linalg_ops.py,487,function,"Computes the singular value decompositions of one or more matrices. Computes the SVD of each inner matrix in `tensor` such that `tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(conj(v[..., :, :]))` ```python # a is a tensor. # s is a tensor of singular values. # u is a tensor of left singular vectors. # v is a tensor of right singular vectors. s, u, v = svd(a) s = svd(a, compute_uv=False) ``` Args: tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and `N`. full_matrices: If true, compute full-sized `u` and `v`. If false (the default), compute only the leading `P` singular vectors. Ignored if `compute_uv` is `False`. compute_uv: If `True` then left and right singular vectors will be computed and returned in `u` and `v`, respectively. Otherwise, only the singular values will be computed, which can be significantly faster. name: string, optional name of the operation. Returns: s: Singular values. Shape is `[..., P]`. The values are sorted in reverse order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the second largest, etc. u: Left singular vectors. If `full_matrices` is `False` (default) then shape is `[..., M, P]`; if `full_matrices` is `True` then shape is `[..., M, M]`. Not returned if `compute_uv` is `False`. v: Right singular vectors. If `full_matrices` is `False` (default) then shape is `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`. Not returned if `compute_uv` is `False`. @compatibility(numpy) Mostly equivalent to numpy.linalg.svd, except that * The order of output arguments here is `s`, `u`, `v` when `compute_uv` is `True`, as opposed to `u`, `s`, `v` for numpy.linalg.svd. * full_matrices is `False` by default as opposed to `True` for numpy.linalg.svd. * tf.linalg.svd uses the standard definition of the SVD \\(A = U \Sigma V^H\\), such that the left singular vectors of `a` are the columns of `u`, while the right singular vectors of `a` are the columns of `v`. On the other hand, numpy.linalg.svd returns the adjoint \\(V^H\\) as the third output argument. ```python import tensorflow as tf import numpy as np s, u, v = tf.linalg.svd(a) tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_b=True)) u, s, v_adj = np.linalg.svd(a, full_matrices=False) np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj)) # tf_a_approx and np_a_approx should be numerically close. ``` @end_compatibility" 8994,norm_v2,tensorflow/tensorflow/python/ops/linalg_ops.py,558,function,"Computes the norm of vectors, matrices, and tensors. This function can compute several different vector norms (the 1-norm, the Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and matrix norms (Frobenius, 1-norm, 2-norm and inf-norm). Args: tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128` ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`, `1`, `2`, `np.inf` and any positive real number yielding the corresponding p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if `tensor` is a matrix and equivalent to 2-norm for vectors. Some restrictions apply: a) The Frobenius norm `'fro'` is not defined for vectors, b) If axis is a 2-tuple (matrix norm), only `'euclidean'`, '`fro'`, `1`, `2`, `np.inf` are supported. See the description of `axis` on how to compute norms for a batch of vectors or matrices stored in a tensor. axis: If `axis` is `None` (the default), the input is considered a vector and a single vector norm is computed over the entire set of values in the tensor, i.e. `norm(tensor, ord=ord)` is equivalent to `norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the input is considered a batch of vectors, and `axis` determines the axis in `tensor` over which to compute vector norms. If `axis` is a 2-tuple of Python integers it is considered a batch of matrices and `axis` determines the axes in `tensor` over which to compute a matrix norm. Negative indices are supported. Example: If you are passing a tensor that can be either a matrix or a batch of matrices at runtime, pass `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are computed. keepdims: If True, the axis indicated in `axis` are kept with size 1. Otherwise, the dimensions in `axis` are removed from the output shape. name: The name of the op. Returns: output: A `Tensor` of the same type as tensor, containing the vector or matrix norms. If `keepdims` is True then the rank of output is equal to the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar, if `axis` is an integer, the rank of `output` is one less than the rank of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less than the rank of `tensor`. Raises: ValueError: If `ord` or `axis` is invalid. @compatibility(numpy) Mostly equivalent to numpy.linalg.norm. Not supported: ord <= 0, 2-norm for matrices, nuclear norm. Other differences: a) If axis is `None`, treats the flattened `tensor` as a vector regardless of rank. b) Explicitly supports 'euclidean' norm as the default, including for higher order tensors. @end_compatibility" 8995,norm,tensorflow/tensorflow/python/ops/linalg_ops.py,632,function,"Computes the norm of vectors, matrices, and tensors. This function can compute several different vector norms (the 1-norm, the Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and matrix norms (Frobenius, 1-norm, 2-norm and inf-norm). Args: tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128` ord: Order of the norm. Supported values are 'fro', 'euclidean', `1`, `2`, `np.inf` and any positive real number yielding the corresponding p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if `tensor` is a matrix and equivalent to 2-norm for vectors. Some restrictions apply: a) The Frobenius norm `fro` is not defined for vectors, b) If axis is a 2-tuple (matrix norm), only 'euclidean', 'fro', `1`, `2`, `np.inf` are supported. See the description of `axis` on how to compute norms for a batch of vectors or matrices stored in a tensor. axis: If `axis` is `None` (the default), the input is considered a vector and a single vector norm is computed over the entire set of values in the tensor, i.e. `norm(tensor, ord=ord)` is equivalent to `norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the input is considered a batch of vectors, and `axis` determines the axis in `tensor` over which to compute vector norms. If `axis` is a 2-tuple of Python integers it is considered a batch of matrices and `axis` determines the axes in `tensor` over which to compute a matrix norm. Negative indices are supported. Example: If you are passing a tensor that can be either a matrix or a batch of matrices at runtime, pass `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are computed. keepdims: If True, the axis indicated in `axis` are kept with size 1. Otherwise, the dimensions in `axis` are removed from the output shape. name: The name of the op. keep_dims: Deprecated alias for `keepdims`. Returns: output: A `Tensor` of the same type as tensor, containing the vector or matrix norms. If `keepdims` is True then the rank of output is equal to the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar, if `axis` is an integer, the rank of `output` is one less than the rank of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less than the rank of `tensor`. Raises: ValueError: If `ord` or `axis` is invalid. @compatibility(numpy) Mostly equivalent to numpy.linalg.norm. Not supported: ord <= 0, 2-norm for matrices, nuclear norm. Other differences: a) If axis is `None`, treats the flattened `tensor` as a vector regardless of rank. b) Explicitly supports 'euclidean' norm as the default, including for higher order tensors. @end_compatibility" 8996,eye,tensorflow/tensorflow/python/ops/linalg_ops_impl.py,33,function,"Construct an identity matrix, or a batch of matrices. See `linalg_ops.eye`." 8997,empty_tensor_list,tensorflow/tensorflow/python/ops/list_ops.py,45,function, 8998,tensor_list_reserve,tensorflow/tensorflow/python/ops/list_ops.py,59,function, 8999,tensor_list_from_tensor,tensorflow/tensorflow/python/ops/list_ops.py,67,function, 9000,tensor_list_get_item,tensorflow/tensorflow/python/ops/list_ops.py,74,function, 9001,tensor_list_pop_back,tensorflow/tensorflow/python/ops/list_ops.py,84,function, 9002,tensor_list_gather,tensorflow/tensorflow/python/ops/list_ops.py,92,function, 9003,tensor_list_scatter,tensorflow/tensorflow/python/ops/list_ops.py,105,function, 9004,tensor_list_stack,tensorflow/tensorflow/python/ops/list_ops.py,122,function, 9005,tensor_list_concat,tensorflow/tensorflow/python/ops/list_ops.py,135,function, 9006,tensor_list_split,tensorflow/tensorflow/python/ops/list_ops.py,147,function, 9007,tensor_list_set_item,tensorflow/tensorflow/python/ops/list_ops.py,155,function,Sets `item` at `index` in input list. 9008,_PushBackGrad,tensorflow/tensorflow/python/ops/list_ops.py,175,function, 9009,_PopBackGrad,tensorflow/tensorflow/python/ops/list_ops.py,183,function, 9010,_TensorListStackGrad,tensorflow/tensorflow/python/ops/list_ops.py,195,function, 9011,_TensorListConcatGrad,tensorflow/tensorflow/python/ops/list_ops.py,201,function,Gradient function for TensorListConcat. 9012,_TensorListSplitGrad,tensorflow/tensorflow/python/ops/list_ops.py,215,function, 9013,_TensorListFromTensorGrad,tensorflow/tensorflow/python/ops/list_ops.py,227,function,Gradient for TensorListFromTensor. 9014,_TensorListGetItemGrad,tensorflow/tensorflow/python/ops/list_ops.py,249,function,Gradient for TensorListGetItem. 9015,_TensorListSetItemGrad,tensorflow/tensorflow/python/ops/list_ops.py,265,function,Gradient function for TensorListSetItem. 9016,_TensorListResizeGrad,tensorflow/tensorflow/python/ops/list_ops.py,280,function, 9017,_TensorListGatherGrad,tensorflow/tensorflow/python/ops/list_ops.py,287,function,Gradient function for TensorListGather. 9018,_TensorListScatterGrad,tensorflow/tensorflow/python/ops/list_ops.py,301,function,Gradient function for TensorListScatter. 9019,_TensorListScatterIntoExistingListGrad,tensorflow/tensorflow/python/ops/list_ops.py,317,function,Gradient function for TensorListScatterIntoExistingList. 9020,_build_element_shape,tensorflow/tensorflow/python/ops/list_ops.py,330,function,"Converts shape to a format understood by list_ops for element_shape. If `shape` is already a `Tensor` it is returned as-is. We do not perform a type check here. If shape is None or a TensorShape with unknown rank, -1 is returned. If shape is a scalar, an int32 tensor with empty list is returned. Note we do directly return an empty list since ops.convert_to_tensor would conver it to a float32 which is not a valid type for element_shape. If shape is a sequence of dims, None's in the list are replaced with -1. We do not check the dtype of the other dims. Args: shape: Could be None, Tensor, TensorShape or a list of dims (each dim could be a None, scalar or Tensor). Returns: A None-free shape that can be converted to a tensor." 9021,Print,tensorflow/tensorflow/python/ops/logging_ops.py,74,function,"Prints a list of tensors. This is an identity op (behaves like `tf.identity`) with the side effect of printing `data` when evaluating. Note: This op prints to the standard error. It is not currently compatible with jupyter notebook (printing to the notebook *server's* output, not into the notebook). Args: input_: A tensor passed through this op. data: A list of tensors to print out when op is evaluated. message: A string, prefix of the error message. first_n: Only log `first_n` number of times. Negative numbers log always; this is the default. summarize: Only print this many entries of each tensor. If None, then a maximum of 3 elements are printed per input tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type and contents as `input_`. ```python sess = tf.compat.v1.Session() with sess.as_default(): tensor = tf.range(10) print_op = tf.print(tensor) with tf.control_dependencies([print_op]): out = tf.add(tensor, tensor) sess.run(out) ```" 9022,_generate_placeholder_string,tensorflow/tensorflow/python/ops/logging_ops.py,113,function,Generate and return a string that does not appear in `x`. 9023,_is_filepath,tensorflow/tensorflow/python/ops/logging_ops.py,122,function,Returns True if output_stream is a file path. 9024,print_v2,tensorflow/tensorflow/python/ops/logging_ops.py,135,function,"Print the specified inputs. A TensorFlow operator that prints the specified inputs to a desired output stream or logging level. The inputs may be dense or sparse Tensors, primitive python objects, data structures that contain tensors, and printable Python objects. Printed tensors will recursively show the first and last elements of each dimension to summarize. Example: Single-input usage: ```python tensor = tf.range(10) tf.print(tensor, output_stream=sys.stderr) ``` (This prints ""[0 1 2 ... 7 8 9]"" to sys.stderr) Multi-input usage: ```python tensor = tf.range(10) tf.print(""tensors:"", tensor, {2: tensor * 2}, output_stream=sys.stdout) ``` (This prints ""tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}"" to sys.stdout) Changing the input separator: ```python tensor_a = tf.range(2) tensor_b = tensor_a * 2 tf.print(tensor_a, tensor_b, output_stream=sys.stderr, sep=',') ``` (This prints ""[0 1],[0 2]"" to sys.stderr) Usage in a `tf.function`: ```python @tf.function def f(): tensor = tf.range(10) tf.print(tensor, output_stream=sys.stderr) return tensor range_tensor = f() ``` (This prints ""[0 1 2 ... 7 8 9]"" to sys.stderr) @compatibility(TF 1.x Graphs and Sessions) In graphs manually created outside of `tf.function`, this method returns the created TF operator that prints the data. To make sure the operator runs, users need to pass the produced op to `tf.compat.v1.Session`'s run method, or to use the op as a control dependency for executed ops by specifying `with tf.compat.v1.control_dependencies([print_op])`. @end_compatibility Compatibility usage in TF 1.x graphs: ```python sess = tf.compat.v1.Session() with sess.as_default(): tensor = tf.range(10) print_op = tf.print(""tensors:"", tensor, {2: tensor * 2}, output_stream=sys.stdout) with tf.control_dependencies([print_op]): tripled_tensor = tensor * 3 sess.run(tripled_tensor) ``` (This prints ""tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}"" to sys.stdout) Note: In Jupyter notebooks and colabs, `tf.print` prints to the notebook cell outputs. It will not write to the notebook kernel's console logs. Args: *inputs: Positional arguments that are the inputs to print. Inputs in the printed output will be separated by spaces. Inputs may be python primitives, tensors, data structures such as dicts and lists that may contain tensors (with the data structures possibly nested in arbitrary ways), and printable python objects. output_stream: The output stream, logging level, or file to print to. Defaults to sys.stderr, but sys.stdout, tf.compat.v1.logging.info, tf.compat.v1.logging.warning, tf.compat.v1.logging.error, absl.logging.info, absl.logging.warning and absl.logging.error are also supported. To print to a file, pass a string started with ""file://"" followed by the file path, e.g., ""file:///tmp/foo.out"". summarize: The first and last `summarize` elements within each dimension are recursively printed per Tensor. If None, then the first 3 and last 3 elements of each dimension are printed for each tensor. If set to -1, it will print all elements of every tensor. sep: The string to use to separate the inputs. Defaults to "" "". end: End character that is appended at the end the printed string. Defaults to the newline character. name: A name for the operation (optional). Returns: None when executing eagerly. During graph tracing this returns a TF operator that prints the specified inputs in the specified output stream or logging level. This operator will be automatically executed except inside of `tf.compat.v1` graphs and sessions. Raises: ValueError: If an unsupported output stream is specified." 9025,_PrintGrad,tensorflow/tensorflow/python/ops/logging_ops.py,373,function, 9026,_Collect,tensorflow/tensorflow/python/ops/logging_ops.py,377,function, 9027,histogram_summary,tensorflow/tensorflow/python/ops/logging_ops.py,389,function,"Outputs a `Summary` protocol buffer with a histogram. This ops is deprecated. Please switch to tf.summary.histogram. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. This op reports an `InvalidArgument` error if any value is not finite. Args: tag: A `string` `Tensor`. 0-D. Tag to use for the summary value. values: A real numeric `Tensor`. Any shape. Values to use to build the histogram. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer." 9028,image_summary,tensorflow/tensorflow/python/ops/logging_ops.py,429,function,"Outputs a `Summary` protocol buffer with images. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, channels]` and where `channels` can be: * 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA. The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range `[0, 255]`. `uint8` values are unchanged. The op uses two different normalization algorithms: * If the input values are all positive, they are rescaled so the largest one is 255. * If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_images` is 1, the summary value tag is '*tag*/image'. * If `max_images` is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. Args: tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the summary values. tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height, width, channels]` where `channels` is 1, 3, or 4. max_images: Max number of batch elements to generate images for. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [ops.GraphKeys.SUMMARIES] name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer." 9029,audio_summary,tensorflow/tensorflow/python/ops/logging_ops.py,490,function,"Outputs a `Summary` protocol buffer with audio. This op is deprecated. Please switch to tf.summary.audio. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. Args: tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the summary values. tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]` or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`. sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the signal in hertz. max_outputs: Max number of batch elements to generate audio for. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [ops.GraphKeys.SUMMARIES] name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer." 9030,merge_summary,tensorflow/tensorflow/python/ops/logging_ops.py,547,function,"Merges summaries. This op is deprecated. Please switch to tf.compat.v1.summary.merge, which has identical behavior. This op creates a [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) protocol buffer that contains the union of all the values in the input summaries. When the Op is run, it reports an `InvalidArgument` error if multiple values in the summaries to merge use the same tag. Args: inputs: A list of `string` `Tensor` objects containing serialized `Summary` protocol buffers. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer resulting from the merging." 9031,merge_all_summaries,tensorflow/tensorflow/python/ops/logging_ops.py,581,function,"Merges all summaries collected in the default graph. This op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which has identical behavior. Args: key: `GraphKey` used to collect the summaries. Defaults to `GraphKeys.SUMMARIES`. Returns: If no summaries were collected, returns None. Otherwise returns a scalar `Tensor` of type `string` containing the serialized `Summary` protocol buffer resulting from the merging." 9032,get_summary_op,tensorflow/tensorflow/python/ops/logging_ops.py,604,function,"Returns a single Summary op that would run all summaries. Either existing one from `SUMMARY_OP` collection or merges all existing summaries. Returns: If no summaries were collected, returns None. Otherwise returns a scalar `Tensor` of type `string` containing the serialized `Summary` protocol buffer resulting from the merging." 9033,scalar_summary,tensorflow/tensorflow/python/ops/logging_ops.py,635,function,"Outputs a `Summary` protocol buffer with scalar values. This ops is deprecated. Please switch to tf.summary.scalar. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The input `tags` and `values` must have the same shape. The generated summary has a summary value for each tag-value pair in `tags` and `values`. Args: tags: A `string` `Tensor`. Tags for the summaries. values: A real numeric Tensor. Values for the summaries. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer." 9034,initialize_all_tables,tensorflow/tensorflow/python/ops/lookup_ops.py,54,function,"Returns an Op that initializes all tables of the default graph. Args: name: Optional name for the initialization op. Returns: An Op that initializes all tables. Note that if there are not tables the returned Op is a NoOp." 9035,tables_initializer,tensorflow/tensorflow/python/ops/lookup_ops.py,68,function,"Returns an Op that initializes all tables of the default graph. See the [Low Level Intro](https://www.tensorflow.org/guide/low_level_intro#feature_columns) guide, for an example of usage. Args: name: Optional name for the initialization op. Returns: An Op that initializes all tables. Note that if there are not tables the returned Op is a NoOp." 9036,_check_table_dtypes,tensorflow/tensorflow/python/ops/lookup_ops.py,88,function,"Check that the given key_dtype and value_dtype matches the table dtypes. Args: table: The table to check types against to. key_dtype: The key data type to check. value_dtype: The value data type to check. Raises: TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data types." 9037,LookupInterface,tensorflow/tensorflow/python/ops/lookup_ops.py,108,class,Represent a lookup table that persists across different steps. 9038,InitializableLookupTableBase,tensorflow/tensorflow/python/ops/lookup_ops.py,149,class,"Initializable lookup table interface. An initializable lookup tables persist across different steps." 9039,InitializableLookupTableBaseV1,tensorflow/tensorflow/python/ops/lookup_ops.py,240,class, 9040,StaticHashTable,tensorflow/tensorflow/python/ops/lookup_ops.py,248,class,"A generic hash table that is immutable once initialized. Example usage: ```python keys_tensor = tf.constant([1, 2]) vals_tensor = tf.constant([3, 4]) input_tensor = tf.constant([1, 5]) table = tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1) print(table.lookup(input_tensor)) ```" 9041,StaticHashTableV1,tensorflow/tensorflow/python/ops/lookup_ops.py,330,class,"A generic hash table that is immutable once initialized. When running in graph mode, you must evaluate the tensor returned by `tf.tables_initializer()` before evaluating the tensor returned by this class's `lookup()` method. Example usage in graph mode: ```python keys_tensor = tf.constant([1, 2]) vals_tensor = tf.constant([3, 4]) input_tensor = tf.constant([1, 5]) table = tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1) out = table.lookup(input_tensor) with tf.Session() as sess: sess.run(tf.tables_initializer()) print(sess.run(out)) ``` In eager mode, no special code is needed to initialize the table. Example usage in eager mode: ```python tf.enable_eager_execution() keys_tensor = tf.constant([1, 2]) vals_tensor = tf.constant([3, 4]) input_tensor = tf.constant([1, 5]) table = tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1) print(table.lookup(input_tensor)) ```" 9042,HashTable,tensorflow/tensorflow/python/ops/lookup_ops.py,369,class, 9043,TableInitializerBase,tensorflow/tensorflow/python/ops/lookup_ops.py,376,class,Base class for lookup table initializers. 9044,DatasetInitializer,tensorflow/tensorflow/python/ops/lookup_ops.py,416,class,"Creates a table initializer from a `tf.data.Dataset`. Sample usage: ```python keys = tf.data.Dataset.range(100) values = tf.data.Dataset.range(100).map( lambda x: string_ops.as_string(x * 2)) ds = tf.data.Dataset.zip((keys, values)) init = tf.lookup.experimental.DatasetInitializer(ds) table = tf.lookup.StaticHashTable(init, """") output = table.lookup([0, 1, 2]) assertEquals(outputs, [""0"", ""2"", ""4""]) ``` Attributes: dataset: A `tf.data.Dataset` object that produces tuples of scalars. The first scalar is treated as a key and the second as value. Raises: ValueError if `dataset` doesn't conform to specifications." 9045,KeyValueTensorInitializer,tensorflow/tensorflow/python/ops/lookup_ops.py,476,class,Table initializers given `keys` and `values` tensors. 9046,TextFileIndex,tensorflow/tensorflow/python/ops/lookup_ops.py,532,class,"The key and value content to get from each line. This class defines the key and value used for tf.lookup.TextFileInitializer. The key and value content to get from each line is specified either by the following, or a value `>=0`. * `TextFileIndex.LINE_NUMBER` means use the line number starting from zero, expects data type int64. * `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data type string. A value `>=0` means use the index (starting at zero) of the split line based on `delimiter`." 9047,TextFileInitializer,tensorflow/tensorflow/python/ops/lookup_ops.py,552,class,"Table initializers from a text file. This initializer assigns one entry in the table for each line in the file. The key and value type of the table to initialize is given by `key_dtype` and `value_dtype`. The key and value content to get from each line is specified by the `key_index` and `value_index`. * `TextFileIndex.LINE_NUMBER` means use the line number starting from zero, expects data type int64. * `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data type string. * A value `>=0` means use the index (starting at zero) of the split line based on `delimiter`. For example if we have a file with the following content: ``` emerson 10 lake 20 palmer 30 ``` The following snippet initializes a table with the first column as keys and second column as values: * `emerson -> 10` * `lake -> 20` * `palmer -> 30` ```python table = tf.lookup.StaticHashTable(tf.lookup.TextFileInitializer( ""test.txt"", tf.string, 0, tf.int64, 1, delimiter="" ""), -1) ... table.init.run() ``` Similarly to initialize the whole line as keys and the line number as values. * `emerson 10 -> 0` * `lake 20 -> 1` * `palmer 30 -> 2` ```python table = tf.lookup.StaticHashTable(tf.lookup.TextFileInitializer( ""test.txt"", tf.string, tf.lookup.TextFileIndex.WHOLE_LINE, tf.int64, tf.lookup.TextFileIndex.LINE_NUMBER, delimiter="" ""), -1) ... table.init.run() ```" 9048,TextFileStringTableInitializer,tensorflow/tensorflow/python/ops/lookup_ops.py,733,class,Table initializer for `int64` IDs to string tables from a text file. 9049,TextFileIdTableInitializer,tensorflow/tensorflow/python/ops/lookup_ops.py,784,class,Table initializer for string to `int64` IDs tables from a text file. 9050,HasherSpec,tensorflow/tensorflow/python/ops/lookup_ops.py,837,class,"A structure for the spec of the hashing function to use for hash buckets. `hasher` is the name of the hashing function to use (eg. ""fasthash"", ""stronghash""). `key` is optional and specify the key to use for the hash function if supported, currently only used by a strong hash. Fields: hasher: The hasher name to use. key: The key to be used by the hashing function, if required." 9051,StrongHashSpec,tensorflow/tensorflow/python/ops/lookup_ops.py,855,class,"A structure to specify a key of the strong keyed hash spec. The strong hash requires a `key`, which is a list of 2 unsigned integer numbers. These should be non-zero; random numbers generated from random.org would be a fine choice. Fields: key: The key to be used by the keyed hashing function." 9052,_as_string,tensorflow/tensorflow/python/ops/lookup_ops.py,878,function, 9053,IdTableWithHashBuckets,tensorflow/tensorflow/python/ops/lookup_ops.py,884,class,"String to Id table wrapper that assigns out-of-vocabulary keys to buckets. For example, if an instance of `IdTableWithHashBuckets` is initialized with a string-to-id table that maps: * `emerson -> 0` * `lake -> 1` * `palmer -> 2` The `IdTableWithHashBuckets` object will performs the following mapping: * `emerson -> 0` * `lake -> 1` * `palmer -> 2` * ` -> bucket_id`, where bucket_id will be between `3` and `3 + num_oov_buckets - 1`, calculated by: `hash() % num_oov_buckets + vocab_size` If input_tensor is `[""emerson"", ""lake"", ""palmer"", ""king"", ""crimson""]`, the lookup result is `[0, 1, 2, 4, 7]`. If `table` is None, only out-of-vocabulary buckets are used. Example usage: ```python num_oov_buckets = 3 input_tensor = tf.constant([""emerson"", ""lake"", ""palmer"", ""king"", ""crimnson""]) table = tf.IdTableWithHashBuckets( tf.StaticHashTable( tf.lookup.TextFileInitializer( filename, key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE, value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER, delimiter=""\t""), default_value), num_oov_buckets) out = table.lookup(input_tensor). table.init.run() print(out.eval()) ``` The hash function used for generating out-of-vocabulary buckets ID is handled by `hasher_spec`." 9054,StaticVocabularyTable,tensorflow/tensorflow/python/ops/lookup_ops.py,1099,class,"String to Id table wrapper that assigns out-of-vocabulary keys to buckets. For example, if an instance of `StaticVocabularyTable` is initialized with a string-to-id initializer that maps: * `emerson -> 0` * `lake -> 1` * `palmer -> 2` The `Vocabulary` object will performs the following mapping: * `emerson -> 0` * `lake -> 1` * `palmer -> 2` * ` -> bucket_id`, where bucket_id will be between `3` and `3 + num_oov_buckets - 1`, calculated by: `hash() % num_oov_buckets + vocab_size` If input_tensor is `[""emerson"", ""lake"", ""palmer"", ""king"", ""crimson""]`, the lookup result is `[0, 1, 2, 4, 7]`. If `initializer` is None, only out-of-vocabulary buckets are used. Example usage: ```python num_oov_buckets = 3 input_tensor = tf.constant([""emerson"", ""lake"", ""palmer"", ""king"", ""crimnson""]) table = tf.lookup.StaticVocabularyTable( tf.lookup.TextFileInitializer( filename, key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE, value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER, delimiter=""\t""), num_oov_buckets) out = table.lookup(input_tensor). table.init.run() print(out.eval()) ``` The hash function used for generating out-of-vocabulary buckets ID is Fingerprint64." 9055,StaticVocabularyTableV1,tensorflow/tensorflow/python/ops/lookup_ops.py,1280,class, 9056,index_table_from_file,tensorflow/tensorflow/python/ops/lookup_ops.py,1290,function,"Returns a lookup table that converts a string tensor into int64 IDs. This operation constructs a lookup table to convert tensor of strings into int64 IDs. The mapping can be initialized from a vocabulary file specified in `vocabulary_file`, where the whole line is the key and the zero-based line number is the ID. Any lookup of an out-of-vocabulary token will return a bucket ID based on its hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the `default_value`. The bucket ID range is `[vocabulary size, vocabulary size + num_oov_buckets - 1]`. The underlying table must be initialized by calling `session.run(tf.compat.v1.tables_initializer())` or `session.run(table.init())` once. To specify multi-column vocabulary files, use key_column_index and value_column_index and delimiter. - TextFileIndex.LINE_NUMBER means use the line number starting from zero, expects data type int64. - TextFileIndex.WHOLE_LINE means use the whole line content, expects data type string. - A value >=0 means use the index (starting at zero) of the split line based on `delimiter`. Sample Usages: If we have a vocabulary file ""test.txt"" with the following content: ``` emerson lake palmer ``` ```python features = tf.constant([""emerson"", ""lake"", ""and"", ""palmer""]) table = tf.lookup.index_table_from_file( vocabulary_file=""test.txt"", num_oov_buckets=1) ids = table.lookup(features) ... tf.compat.v1.tables_initializer().run() ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket ``` Args: vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`. num_oov_buckets: The number of out-of-vocabulary buckets. vocab_size: Number of the elements in the vocabulary, if known. default_value: The value to use for out-of-vocabulary feature values. Defaults to -1. hasher_spec: A `HasherSpec` to specify the hash function to use for assignation of out-of-vocabulary buckets. key_dtype: The `key` data type. name: A name for this op (optional). key_column_index: The column index from the text file to get the `key` values from. The default is to use the whole line content. value_column_index: The column index from the text file to get the `value` values from. The default is to use the line number, starting from zero. delimiter: The delimiter to separate fields in a line. Returns: The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`. Raises: ValueError: If `vocabulary_file` is not set. ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater than zero." 9057,index_table_from_tensor,tensorflow/tensorflow/python/ops/lookup_ops.py,1411,function,"Returns a lookup table that converts a string tensor into int64 IDs. This operation constructs a lookup table to convert tensor of strings into int64 IDs. The mapping can be initialized from a string `vocabulary_list` 1-D tensor where each element is a key and corresponding index within the tensor is the value. Any lookup of an out-of-vocabulary token will return a bucket ID based on its hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the `default_value`. The bucket ID range is `[vocabulary list size, vocabulary list size + num_oov_buckets - 1]`. The underlying table must be initialized by calling `session.run(tf.compat.v1.tables_initializer())` or `session.run(table.init())` once. Elements in `vocabulary_list` cannot have duplicates, otherwise when executing the table initializer op, it will throw a `FailedPreconditionError`. Sample Usages: ```python vocabulary_list = tf.constant([""emerson"", ""lake"", ""palmer""]) table = tf.lookup.index_table_from_tensor( vocabulary_list=vocabulary_list, num_oov_buckets=1, default_value=-1) features = tf.constant([""emerson"", ""lake"", ""and"", ""palmer""]) ids = table.lookup(features) ... tf.compat.v1.tables_initializer().run() ids.eval() ==> [0, 1, 4, 2] ``` Args: vocabulary_list: A 1-D `Tensor` that specifies the mapping of keys to indices. The type of this object must be castable to `dtype`. num_oov_buckets: The number of out-of-vocabulary buckets. default_value: The value to use for out-of-vocabulary feature values. Defaults to -1. hasher_spec: A `HasherSpec` to specify the hash function to use for assignment of out-of-vocabulary buckets. dtype: The type of values passed to `lookup`. Only string and integers are supported. name: A name for this op (optional). Returns: The lookup table to map an input `Tensor` to index `int64` `Tensor`. Raises: ValueError: If `vocabulary_list` is invalid. ValueError: If `num_oov_buckets` is negative." 9058,index_to_string_table_from_file,tensorflow/tensorflow/python/ops/lookup_ops.py,1510,function,"Returns a lookup table that maps a `Tensor` of indices into strings. This operation constructs a lookup table to map int64 indices into string values. The table is initialized from a vocabulary file specified in `vocabulary_file`, where the whole line is the value and the zero-based line number is the index. Any input which does not have a corresponding index in the vocabulary file (an out-of-vocabulary entry) is assigned the `default_value` The underlying table must be initialized by calling `session.run(tf.compat.v1.tables_initializer())` or `session.run(table.init())` once. To specify multi-column vocabulary files, use key_column_index and value_column_index and delimiter. - TextFileIndex.LINE_NUMBER means use the line number starting from zero, expects data type int64. - TextFileIndex.WHOLE_LINE means use the whole line content, expects data type string. - A value >=0 means use the index (starting at zero) of the split line based on `delimiter`. Sample Usages: If we have a vocabulary file ""test.txt"" with the following content: ``` emerson lake palmer ``` ```python indices = tf.constant([1, 5], tf.int64) table = tf.lookup.index_to_string_table_from_file( vocabulary_file=""test.txt"", default_value=""UNKNOWN"") values = table.lookup(indices) ... tf.compat.v1.tables_initializer().run() values.eval() ==> [""lake"", ""UNKNOWN""] ``` Args: vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`. vocab_size: Number of the elements in the vocabulary, if known. default_value: The value to use for out-of-vocabulary indices. name: A name for this op (optional). key_column_index: The column index from the text file to get the `key` values from. The default is to use the line number, starting from zero. value_column_index: The column index from the text file to get the `value` values from. The default is to use the whole line content. delimiter: The delimiter to separate fields in a line. Returns: The lookup table to map a string values associated to a given index `int64` `Tensors`. Raises: ValueError: when `vocabulary_file` is empty. ValueError: when `vocab_size` is invalid." 9059,index_to_string_table_from_tensor,tensorflow/tensorflow/python/ops/lookup_ops.py,1601,function,"Returns a lookup table that maps a `Tensor` of indices into strings. This operation constructs a lookup table to map int64 indices into string values. The mapping is initialized from a string `vocabulary_list` 1-D `Tensor` where each element is a value and the corresponding index within the tensor is the key. Any input which does not have a corresponding index in 'vocabulary_list' (an out-of-vocabulary entry) is assigned the `default_value` The underlying table must be initialized by calling `session.run(tf.compat.v1.tables_initializer())` or `session.run(table.init())` once. Elements in `vocabulary_list` cannot have duplicates, otherwise when executing the table initializer op, it will throw a `FailedPreconditionError`. Sample Usages: ```python vocabulary_list = tf.constant([""emerson"", ""lake"", ""palmer""]) indices = tf.constant([1, 5], tf.int64) table = tf.lookup.index_to_string_table_from_tensor( vocabulary_list, default_value=""UNKNOWN"") values = table.lookup(indices) ... tf.compat.v1.tables_initializer().run() values.eval() ==> [""lake"", ""UNKNOWN""] ``` Args: vocabulary_list: A 1-D string `Tensor` that specifies the strings to map from indices. default_value: The value to use for out-of-vocabulary indices. name: A name for this op (optional). Returns: The lookup table to map a string values associated to a given index `int64` `Tensors`. Raises: ValueError: when `vocabulary_list` is not set." 9060,MutableHashTable,tensorflow/tensorflow/python/ops/lookup_ops.py,1663,class,"A generic mutable hash table implementation. Data can be inserted by calling the insert method and removed by calling the remove method. It does not support initialization via the init method. Example usage: ```python table = tf.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) sess.run(table.insert(keys, values)) out = table.lookup(query_keys) print(out.eval()) ```" 9061,DenseHashTable,tensorflow/tensorflow/python/ops/lookup_ops.py,1902,class,"A generic mutable hash table implementation using tensors as backing store. Data can be inserted by calling the insert method and removed by calling the remove method. It does not support initialization via the init method. It uses ""open addressing"" with quadratic reprobing to resolve collisions. Compared to `MutableHashTable` the insert, remove and lookup operations in a `DenseHashTable` are typically faster, but memory usage can be higher. However, `DenseHashTable` does not require additional memory for temporary tensors created during checkpointing and restore operations. Example usage: ```python table = tf.lookup.DenseHashTable(key_dtype=tf.int64, value_dtype=tf.int64, default_value=-1, empty_key=0, deleted_key=-1) sess.run(table.insert(keys, values)) out = table.lookup(query_keys) print(out.eval()) ```" 9062,_RollGrad,tensorflow/tensorflow/python/ops/manip_grad.py,26,function, 9063,roll,tensorflow/tensorflow/python/ops/manip_ops.py,31,function, 9064,map_fn,tensorflow/tensorflow/python/ops/map_fn.py,47,function,"Transforms `elems` by applying `fn` to each element unstacked on axis 0. See also `tf.scan`. `map_fn` unstacks `elems` on axis 0 to obtain a sequence of elements; calls `fn` to transform each element; and then stacks the transformed values back together. #### Mapping functions with single-Tensor inputs and outputs If `elems` is a single tensor and `fn`'s signature is `tf.Tensor->tf.Tensor`, then `map_fn(fn, elems)` is equivalent to `tf.stack([fn(elem) for elem in tf.unstack(elems)])`. E.g.: >>> tf.map_fn(fn=lambda t: tf.range(t, t + 3), elems=tf.constant([3, 5, 2])) `map_fn(fn, elems).shape = [elems.shape[0]] + fn(elems[0]).shape`. #### Mapping functions with multi-arity inputs and outputs `map_fn` also supports functions with multi-arity inputs and outputs: * If `elems` is a tuple (or nested structure) of tensors, then those tensors must all have the same outer-dimension size (`num_elems`); and `fn` is used to transform each tuple (or structure) of corresponding slices from `elems`. E.g., if `elems` is a tuple `(t1, t2, t3)`, then `fn` is used to transform each tuple of slices `(t1[i], t2[i], t3[i])` (where `0 <= i < num_elems`). * If `fn` returns a tuple (or nested structure) of tensors, then the result is formed by stacking corresponding elements from those structures. #### Specifying `fn`'s output signature If `fn`'s input and output signatures are different, then the output signature must be specified using `fn_output_signature`. (The input and output signatures are differ if their structures, dtypes, or tensor types do not match). E.g.: >>> tf.map_fn(fn=tf.strings.length, # input & output have different dtypes ... elems=tf.constant([""hello"", ""moon""]), ... fn_output_signature=tf.int32) >>> tf.map_fn(fn=tf.strings.join, # input & output have different structures ... elems=[tf.constant(['The', 'A']), tf.constant(['Dog', 'Cat'])], ... fn_output_signature=tf.string) `fn_output_signature` can be specified using any of the following: * A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`) * A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`) * A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`) * A (possibly nested) tuple, list, or dict containing the above types. #### RaggedTensors `map_fn` supports `tf.RaggedTensor` inputs and outputs. In particular: * If `elems` is a `RaggedTensor`, then `fn` will be called with each row of that ragged tensor. * If `elems` has only one ragged dimension, then the values passed to `fn` will be `tf.Tensor`s. * If `elems` has multiple ragged dimensions, then the values passed to `fn` will be `tf.RaggedTensor`s with one fewer ragged dimension. * If the result of `map_fn` should be a `RaggedTensor`, then use a `tf.RaggedTensorSpec` to specify `fn_output_signature`. * If `fn` returns `tf.Tensor`s with varying sizes, then use a `tf.RaggedTensorSpec` with `ragged_rank=0` to combine them into a single ragged tensor (which will have ragged_rank=1). * If `fn` returns `tf.RaggedTensor`s, then use a `tf.RaggedTensorSpec` with the same `ragged_rank`. >>> # Example: RaggedTensor input >>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]]) >>> tf.map_fn(tf.reduce_sum, rt, fn_output_signature=tf.int32) >>> # Example: RaggedTensor output >>> elems = tf.constant([3, 5, 0, 2]) >>> tf.map_fn(tf.range, elems, ... fn_output_signature=tf.RaggedTensorSpec(shape=[None], ... dtype=tf.int32)) Note: `map_fn` should only be used if you need to map a function over the *rows* of a `RaggedTensor`. If you wish to map a function over the individual values, then you should use: * `tf.ragged.map_flat_values(fn, rt)` (if fn is expressible as TensorFlow ops) * `rt.with_flat_values(map_fn(fn, rt.flat_values))` (otherwise) E.g.: >>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]]) >>> tf.ragged.map_flat_values(lambda x: x + 2, rt) #### SparseTensors `map_fn` supports `tf.sparse.SparseTensor` inputs and outputs. In particular: * If `elems` is a `SparseTensor`, then `fn` will be called with each row of that sparse tensor. In particular, the value passed to `fn` will be a `tf.sparse.SparseTensor` with one fewer dimension than `elems`. * If the result of `map_fn` should be a `SparseTensor`, then use a `tf.SparseTensorSpec` to specify `fn_output_signature`. The individual `SparseTensor`s returned by `fn` will be stacked into a single `SparseTensor` with one more dimension. >>> # Example: SparseTensor input >>> st = tf.sparse.SparseTensor([[0, 0], [2, 0], [2, 1]], [2, 3, 4], [4, 4]) >>> tf.map_fn(tf.sparse.reduce_sum, st, fn_output_signature=tf.int32) >>> # Example: SparseTensor output >>> tf.sparse.to_dense( ... tf.map_fn(tf.sparse.eye, tf.constant([2, 3]), ... fn_output_signature=tf.SparseTensorSpec(None, tf.float32))) Note: `map_fn` should only be used if you need to map a function over the *rows* of a `SparseTensor`. If you wish to map a function over the nonzero values, then you should use: * If the function is expressible as TensorFlow ops, use: ```python tf.sparse.SparseTensor(st.indices, fn(st.values), st.dense_shape) ``` * Otherwise, use: ```python tf.sparse.SparseTensor(st.indices, tf.map_fn(fn, st.values), st.dense_shape) ``` #### `map_fn` vs. vectorized operations `map_fn` will apply the operations used by `fn` to each element of `elems`, resulting in `O(elems.shape[0])` total operations. This is somewhat mitigated by the fact that `map_fn` can process elements in parallel. However, a transform expressed using `map_fn` is still typically less efficient than an equivalent transform expressed using vectorized operations. `map_fn` should typically only be used if one of the following is true: * It is difficult or expensive to express the desired transform with vectorized operations. * `fn` creates large intermediate values, so an equivalent vectorized transform would take too much memory. * Processing elements in parallel is more efficient than an equivalent vectorized transform. * Efficiency of the transform is not critical, and using `map_fn` is more readable. E.g., the example given above that maps `fn=lambda t: tf.range(t, t + 3)` across `elems` could be rewritten more efficiently using vectorized ops: >>> elems = tf.constant([3, 5, 2]) >>> tf.range(3) + tf.expand_dims(elems, 1) In some cases, `tf.vectorized_map` can be used to automatically convert a function to a vectorized eqivalent. #### Eager execution When executing eagerly, `map_fn` does not execute in parallel even if `parallel_iterations` is set to a value > 1. You can still get the performance benefits of running a function in parallel by using the `tf.function` decorator: >>> fn=lambda t: tf.range(t, t + 3) >>> @tf.function ... def func(elems): ... return tf.map_fn(fn, elems, parallel_iterations=3) >>> func(tf.constant([3, 5, 2])) Note: if you use the `tf.function` decorator, any non-TensorFlow Python code that you may have written in your function won't get executed. See `tf.function` for more details. The recommendation would be to debug without `tf.function` but switch to it to get performance benefits of running `map_fn` in parallel. Args: fn: The callable to be performed. It accepts one argument, which will have the same (possibly nested) structure as `elems`. Its output must have the same structure as `fn_output_signature` if one is provided; otherwise it must have the same structure as `elems`. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unstacked along their first dimension. `fn` will be applied to the nested sequence of the resulting slices. `elems` may include ragged and sparse tensors. `elems` must consist of at least one tensor. dtype: Deprecated: Equivalent to `fn_output_signature`. parallel_iterations: (optional) The number of iterations allowed to run in parallel. When graph building, the default value is 10. While executing eagerly, the default value is set to 1. back_prop: (optional) False disables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. infer_shape: (optional) False disables tests for consistent output shapes. name: (optional) Name prefix for the returned tensors. fn_output_signature: The output signature of `fn`. Must be specified if `fn`'s input and output signatures are different (i.e., if their structures, dtypes, or tensor types do not match). `fn_output_signature` can be specified using any of the following: * A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`) * A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`) * A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`) * A (possibly nested) tuple, list, or dict containing the above types. Returns: A tensor or (possibly nested) sequence of tensors. Each tensor stacks the results of applying `fn` to tensors unstacked from `elems` along the first dimension, from first to last. The result may include ragged and sparse tensors. Raises: TypeError: if `fn` is not callable or the structure of the output of `fn` and `fn_output_signature` do not match. ValueError: if the lengths of the output of `fn` and `fn_output_signature` do not match, or if the `elems` does not contain any tensor. Examples: >>> elems = np.array([1, 2, 3, 4, 5, 6]) >>> tf.map_fn(lambda x: x * x, elems) >>> elems = (np.array([1, 2, 3]), np.array([-1, 1, -1])) >>> tf.map_fn(lambda x: x[0] * x[1], elems, fn_output_signature=tf.int64) >>> elems = np.array([1, 2, 3]) >>> tf.map_fn(lambda x: (x, -x), elems, ... fn_output_signature=(tf.int64, tf.int64)) (, )" 9065,_dtype_to_spec,tensorflow/tensorflow/python/ops/map_fn.py,525,function, 9066,_most_general_compatible_type,tensorflow/tensorflow/python/ops/map_fn.py,531,function,Returns the most general TypeSpec compatible with `spec`. 9067,_result_flat_signature_to_batchable_tensor_spec,tensorflow/tensorflow/python/ops/map_fn.py,547,function,Converts result_flat_signature -> result_batchable_tensor_specs. 9068,_elems_flat_to_batchable,tensorflow/tensorflow/python/ops/map_fn.py,557,function,Converts elems_flat -> elems_batchable. 9069,_elems_value_batchable_to_flat,tensorflow/tensorflow/python/ops/map_fn.py,570,function,Converts elems_value_batchable -> elems_value_flat. 9070,_result_value_flat_to_batchable,tensorflow/tensorflow/python/ops/map_fn.py,584,function,Converts result_value_flat -> result_value_batchable. 9071,_result_batchable_to_flat,tensorflow/tensorflow/python/ops/map_fn.py,602,function,Converts result_batchable -> result_flat. 9072,map_fn_v2,tensorflow/tensorflow/python/ops/map_fn.py,628,function,Transform `elems` by applying `fn` to each element unstacked on axis 0. 9073,_safe_shape_div,tensorflow/tensorflow/python/ops/math_grad.py,35,function,"Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`." 9074,_ArgMaxGrad,tensorflow/tensorflow/python/ops/math_grad.py,41,function, 9075,_ArgMinGrad,tensorflow/tensorflow/python/ops/math_grad.py,47,function, 9076,_EuclideanNormGrad,tensorflow/tensorflow/python/ops/math_grad.py,53,function,Gradient for EuclideanNorm. 9077,SmartBroadcastGradientArgs,tensorflow/tensorflow/python/ops/math_grad.py,67,function,"Optimized version of `broadcast_gradient_args` that caches results. This implementation avoids creating `broadcast_gradient_args` ops in the case that the input shapes are fully defined, and provides hints to the calling code that can be used to avoid creating reduction and reshaping ops. Args: x: The left input tensor to a broadcasting binary op. y: The right input tensor to a broadcasting binary op. grad: The incoming gradient tensor for a broadcasting binary op. Returns: A pair of tuples, containing: * A 3-tuple of broadcast information for x, containing: * The shape of x (as a tuple or Tensor). * The reduction indices for x (as a tuple or Tensor). * A boolean, which if True, indicates that x's shape differs from grad's shape (and so x's gradient must be reduced and/or reshaped). * A 3-tuple of broadcast information for y, containing the respective details for y." 9078,_IsScalar,tensorflow/tensorflow/python/ops/math_grad.py,143,function, 9079,_SumGrad,tensorflow/tensorflow/python/ops/math_grad.py,148,function,Gradient for Sum. 9080,_MinOrMaxGrad,tensorflow/tensorflow/python/ops/math_grad.py,220,function,Gradient for Min or Max. Amazingly it's precisely the same code. 9081,_MaxGrad,tensorflow/tensorflow/python/ops/math_grad.py,242,function,Gradient for Max. 9082,_MinGrad,tensorflow/tensorflow/python/ops/math_grad.py,248,function, 9083,_MeanGrad,tensorflow/tensorflow/python/ops/math_grad.py,253,function,Gradient for Mean. 9084,_ProdGrad,tensorflow/tensorflow/python/ops/math_grad.py,273,function,Gradient for Prod. 9085,_SegmentSumGrad,tensorflow/tensorflow/python/ops/math_grad.py,322,function,Gradient for SegmentSum. 9086,_SegmentMeanGrad,tensorflow/tensorflow/python/ops/math_grad.py,328,function,Gradient for SegmentMean. 9087,_SparseSegmentSumGrad,tensorflow/tensorflow/python/ops/math_grad.py,341,function,Gradient for SparseSegmentSum. 9088,_SparseSegmentSumWithNumSegmentsGrad,tensorflow/tensorflow/python/ops/math_grad.py,350,function,Gradient for SparseSegmentSumWithNumSegments. 9089,_SparseSegmentMeanGrad,tensorflow/tensorflow/python/ops/math_grad.py,359,function,Gradient for SparseSegmentMean. 9090,_SparseSegmentMeanWithNumSegmentsGrad,tensorflow/tensorflow/python/ops/math_grad.py,367,function,Gradient for SparseSegmentMeanWithNumSegments. 9091,_SparseSegmentSqrtNGrad,tensorflow/tensorflow/python/ops/math_grad.py,375,function,Gradient for SparseSegmentSqrtN. 9092,_SparseSegmentSqrtNWithNumSegmentsGrad,tensorflow/tensorflow/python/ops/math_grad.py,383,function,Gradient for SparseSegmentSqrtNWithNumSegments. 9093,_SegmentMinOrMaxGrad,tensorflow/tensorflow/python/ops/math_grad.py,390,function,Gradient for SegmentMin and SegmentMax. 9094,_SegmentMinGrad,tensorflow/tensorflow/python/ops/math_grad.py,406,function,Gradient for SegmentMin. 9095,_SegmentMaxGrad,tensorflow/tensorflow/python/ops/math_grad.py,412,function,Gradient for SegmentMax. 9096,_GatherDropNegatives,tensorflow/tensorflow/python/ops/math_grad.py,417,function,"Helper function for unsorted segment ops. Gathers params for positive segment ids and gathers 0 for inputs with negative segment id. Also returns the clipped indices and a boolean mask with the same shape as ids where a positive id is masked as true. With this, the latter two can be passed as arguments to this function to reuse them." 9097,_UnsortedSegmentMinOrMaxGrad,tensorflow/tensorflow/python/ops/math_grad.py,452,function,Gradient for UnsortedSegmentMin and UnsortedSegmentMax. 9098,_UnsortedSegmentSumGrad,tensorflow/tensorflow/python/ops/math_grad.py,471,function,Gradient for UnsortedSegmentSum. 9099,_UnsortedSegmentMaxGrad,tensorflow/tensorflow/python/ops/math_grad.py,477,function,Gradient for UnsortedSegmentMax. 9100,_UnsortedSegmentMinGrad,tensorflow/tensorflow/python/ops/math_grad.py,483,function,Gradient for UnsortedSegmentMin. 9101,_UnsortedSegmentProdGrad,tensorflow/tensorflow/python/ops/math_grad.py,489,function,"Gradient for UnsortedSegmentProd. The gradient can be expressed for each segment by dividing the segment's product by each element of the segment input tensor, but this approach can't deal with zeros in the input. Unlike reduce_prod we can't use cumsum here as individual segments may have a different number of elements. Therefore we consider three cases: 1) A segment input contains no zeros and we can safely divide by the input tensor. 2) A segment contains exactly one zero. Then the gradient of each input of the segment is zero except for the 0-input, there the gradient is the product of the remaining segment entries. 3) A segment contains at least two zeros. The gradient is zero for all segment inputs." 9102,_AbsGrad,tensorflow/tensorflow/python/ops/math_grad.py,537,function, 9103,_NegGrad,tensorflow/tensorflow/python/ops/math_grad.py,543,function,Returns -grad. 9104,_InvGrad,tensorflow/tensorflow/python/ops/math_grad.py,549,function,Returns -grad * (1 / x^2). 9105,_ReciprocalGrad,tensorflow/tensorflow/python/ops/math_grad.py,556,function,Returns -grad * (1 / x^2). 9106,_InvGradGrad,tensorflow/tensorflow/python/ops/math_grad.py,563,function, 9107,_ReciprocalGradGrad,tensorflow/tensorflow/python/ops/math_grad.py,573,function, 9108,_SquareGrad,tensorflow/tensorflow/python/ops/math_grad.py,583,function, 9109,_SqrtGrad,tensorflow/tensorflow/python/ops/math_grad.py,593,function, 9110,_SqrtGradGrad,tensorflow/tensorflow/python/ops/math_grad.py,599,function, 9111,_RsqrtGrad,tensorflow/tensorflow/python/ops/math_grad.py,608,function,Returns -0.5 * grad * conj(y)^3. 9112,_RsqrtGradGrad,tensorflow/tensorflow/python/ops/math_grad.py,615,function,"Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3." 9113,_ExpGrad,tensorflow/tensorflow/python/ops/math_grad.py,628,function,Returns grad * exp(x). 9114,_Expm1Grad,tensorflow/tensorflow/python/ops/math_grad.py,637,function,Returns grad * exp(x). 9115,_LogGrad,tensorflow/tensorflow/python/ops/math_grad.py,647,function,Returns grad * (1/x). 9116,_Log1pGrad,tensorflow/tensorflow/python/ops/math_grad.py,656,function,Returns grad * (1/(1 + x)). 9117,_XLogyGrad,tensorflow/tensorflow/python/ops/math_grad.py,665,function,"Returns gradient of xlogy(x, y) with respect to x and y." 9118,_XLog1pyGrad,tensorflow/tensorflow/python/ops/math_grad.py,682,function,"Returns gradient of xlog1py(x, y) with respect to x and y." 9119,_XDivyGrad,tensorflow/tensorflow/python/ops/math_grad.py,699,function,"Returns gradient of xdivy(x, y) with respect to x and y." 9120,_SinhGrad,tensorflow/tensorflow/python/ops/math_grad.py,716,function,Returns grad * cosh(x). 9121,_CoshGrad,tensorflow/tensorflow/python/ops/math_grad.py,725,function,Returns grad * sinh(x). 9122,_TanhGrad,tensorflow/tensorflow/python/ops/math_grad.py,734,function,Returns grad * (1 - tanh(x) * tanh(x)). 9123,_AsinhGrad,tensorflow/tensorflow/python/ops/math_grad.py,743,function,Returns grad * 1/cosh(y). 9124,_AcoshGrad,tensorflow/tensorflow/python/ops/math_grad.py,752,function,Returns grad * 1/sinh(y). 9125,_AtanhGrad,tensorflow/tensorflow/python/ops/math_grad.py,761,function,Returns grad * 1/ (1 - x^2). 9126,_TanhGradGrad,tensorflow/tensorflow/python/ops/math_grad.py,773,function, 9127,_ErfGrad,tensorflow/tensorflow/python/ops/math_grad.py,781,function,Returns grad * 2/sqrt(pi) * exp(-x**2). 9128,_ErfcGrad,tensorflow/tensorflow/python/ops/math_grad.py,791,function,Returns -grad * 2/sqrt(pi) * exp(-x**2). 9129,_ErfinvGrad,tensorflow/tensorflow/python/ops/math_grad.py,802,function,Returns grad * sqrt(pi) / 2 * exp(erfinv(x)**2). 9130,_NdtriGrad,tensorflow/tensorflow/python/ops/math_grad.py,811,function,Returns grad * sqrt(2 * pi) * exp(ndtri(x)**2 / 2). 9131,_LgammaGrad,tensorflow/tensorflow/python/ops/math_grad.py,820,function,Returns grad * digamma(x). 9132,_DigammaGrad,tensorflow/tensorflow/python/ops/math_grad.py,829,function,Compute gradient of the digamma function with respect to its argument. 9133,_DawsnGrad,tensorflow/tensorflow/python/ops/math_grad.py,839,function,Compute gradient of dawsn(x) with respect to its argument. 9134,_ExpintGrad,tensorflow/tensorflow/python/ops/math_grad.py,848,function,Compute gradient of expint(x) with respect to its argument. 9135,_FresnelCosGrad,tensorflow/tensorflow/python/ops/math_grad.py,856,function,Compute gradient of fresnel_cos(x) with respect to its argument. 9136,_FresnelSinGrad,tensorflow/tensorflow/python/ops/math_grad.py,864,function,Compute gradient of fresnel_sin(x) with respect to its argument. 9137,_SpenceGrad,tensorflow/tensorflow/python/ops/math_grad.py,872,function,Compute gradient of spence(x) with respect to its argument. 9138,_BesselI0Grad,tensorflow/tensorflow/python/ops/math_grad.py,883,function,Compute gradient of bessel_i0(x) with respect to its argument. 9139,_BesselI0eGrad,tensorflow/tensorflow/python/ops/math_grad.py,892,function,Compute gradient of bessel_i0e(x) with respect to its argument. 9140,_BesselI1Grad,tensorflow/tensorflow/python/ops/math_grad.py,902,function,Compute gradient of bessel_i1(x) with respect to its argument. 9141,_BesselI1eGrad,tensorflow/tensorflow/python/ops/math_grad.py,919,function,Compute gradient of bessel_i1e(x) with respect to its argument. 9142,_BesselK0Grad,tensorflow/tensorflow/python/ops/math_grad.py,937,function,Compute gradient of bessel_k0(x) with respect to its argument. 9143,_BesselK0eGrad,tensorflow/tensorflow/python/ops/math_grad.py,946,function,Compute gradient of bessel_k0e(x) with respect to its argument. 9144,_BesselK1Grad,tensorflow/tensorflow/python/ops/math_grad.py,956,function,Compute gradient of bessel_k1(x) with respect to its argument. 9145,_BesselK1eGrad,tensorflow/tensorflow/python/ops/math_grad.py,968,function,Compute gradient of bessel_k1e(x) with respect to its argument. 9146,_BesselJ0Grad,tensorflow/tensorflow/python/ops/math_grad.py,981,function,Compute gradient of bessel_j0(x) with respect to its argument. 9147,_BesselJ1Grad,tensorflow/tensorflow/python/ops/math_grad.py,990,function,Compute gradient of bessel_j1(x) with respect to its argument. 9148,_BesselY0Grad,tensorflow/tensorflow/python/ops/math_grad.py,1007,function,Compute gradient of bessel_y0(x) with respect to its argument. 9149,_BesselY1Grad,tensorflow/tensorflow/python/ops/math_grad.py,1016,function,Compute gradient of bessel_y1(x) with respect to its argument. 9150,_IgammaGrad,tensorflow/tensorflow/python/ops/math_grad.py,1028,function,"Returns gradient of igamma(a, x) with respect to a and x." 9151,_IgammacGrad,tensorflow/tensorflow/python/ops/math_grad.py,1047,function,"Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x." 9152,_BetaincGrad,tensorflow/tensorflow/python/ops/math_grad.py,1054,function,"Returns gradient of betainc(a, b, x) with respect to x." 9153,_ZetaGrad,tensorflow/tensorflow/python/ops/math_grad.py,1082,function,"Returns gradient of zeta(x, q) with respect to x and q." 9154,_PolygammaGrad,tensorflow/tensorflow/python/ops/math_grad.py,1101,function,"Returns gradient of psi(n, x) with respect to n and x." 9155,_SigmoidGrad,tensorflow/tensorflow/python/ops/math_grad.py,1120,function,Returns grad * sigmoid(x) * (1 - sigmoid(x)). 9156,_SigmoidGradGrad,tensorflow/tensorflow/python/ops/math_grad.py,1129,function, 9157,_SignGrad,tensorflow/tensorflow/python/ops/math_grad.py,1138,function,Returns 0. 9158,_SinGrad,tensorflow/tensorflow/python/ops/math_grad.py,1145,function,Returns grad * cos(x). 9159,_CosGrad,tensorflow/tensorflow/python/ops/math_grad.py,1154,function,Returns grad * -sin(x). 9160,_TanGrad,tensorflow/tensorflow/python/ops/math_grad.py,1163,function,Returns grad * 1/sec^2(x). 9161,_AsinGrad,tensorflow/tensorflow/python/ops/math_grad.py,1174,function,Returns grad * 1/sqrt(1-x^2). 9162,_AcosGrad,tensorflow/tensorflow/python/ops/math_grad.py,1187,function,Returns grad * -1/sqrt(1-x^2). 9163,_AtanGrad,tensorflow/tensorflow/python/ops/math_grad.py,1200,function,Returns grad * 1/ (1 + x^2). 9164,_Atan2Grad,tensorflow/tensorflow/python/ops/math_grad.py,1212,function,"Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2)." 9165,_AddNGrad,tensorflow/tensorflow/python/ops/math_grad.py,1222,function,Copies the gradient to all inputs. 9166,_ShapesFullySpecifiedAndEqual,tensorflow/tensorflow/python/ops/math_grad.py,1228,function, 9167,_AddGrad,tensorflow/tensorflow/python/ops/math_grad.py,1240,function,Gradient for Add. 9168,_SubGrad,tensorflow/tensorflow/python/ops/math_grad.py,1274,function,Gradient for Sub. 9169,_MulGrad,tensorflow/tensorflow/python/ops/math_grad.py,1308,function,The gradient of scalar multiplication. 9170,_MulNoNanGrad,tensorflow/tensorflow/python/ops/math_grad.py,1349,function,The gradient of scalar multiplication with NaN-suppression. 9171,_DivGrad,tensorflow/tensorflow/python/ops/math_grad.py,1367,function,The gradient for the Div operator. 9172,_FloorDivGrad,tensorflow/tensorflow/python/ops/math_grad.py,1384,function,The gradient for the FloorDiv operator. 9173,_FloorModGrad,tensorflow/tensorflow/python/ops/math_grad.py,1390,function,"Returns grad * (1, -floor(x/y))." 9174,_TruncateDivGrad,tensorflow/tensorflow/python/ops/math_grad.py,1406,function, 9175,_RealDivGrad,tensorflow/tensorflow/python/ops/math_grad.py,1411,function,RealDiv op gradient. 9176,_DivNoNanGrad,tensorflow/tensorflow/python/ops/math_grad.py,1428,function,DivNoNan op gradient. 9177,_PowGrad,tensorflow/tensorflow/python/ops/math_grad.py,1446,function,"Returns grad * (y*x^(y-1), z*log(x))." 9178,_MaximumMinimumGradInputOnly,tensorflow/tensorflow/python/ops/math_grad.py,1498,function, 9179,_MaximumMinimumGrad,tensorflow/tensorflow/python/ops/math_grad.py,1508,function,Factor out the code for the gradient of Maximum or Minimum. 9180,_MaximumGrad,tensorflow/tensorflow/python/ops/math_grad.py,1546,function,"Returns grad*(x > y, x <= y) with type of grad." 9181,_MinimumGrad,tensorflow/tensorflow/python/ops/math_grad.py,1552,function,"Returns grad*(x < y, x >= y) with type of grad." 9182,_SquaredDifferenceGrad,tensorflow/tensorflow/python/ops/math_grad.py,1558,function,Returns the gradient for (x-y)^2. 9183,_SelectGrad,tensorflow/tensorflow/python/ops/math_grad.py,1611,function, 9184,_SelectGradV2,tensorflow/tensorflow/python/ops/math_grad.py,1620,function, 9185,_MatMulGradAgainstFirstOnly,tensorflow/tensorflow/python/ops/math_grad.py,1643,function,"Gradient for MatMul, only for the first input." 9186,_MatMulGradAgainstSecondOnly,tensorflow/tensorflow/python/ops/math_grad.py,1659,function,"Gradient for MatMul, only for the second input." 9187,_MatMulGrad,tensorflow/tensorflow/python/ops/math_grad.py,1676,function,Gradient for MatMul. 9188,_SparseMatMulGrad,tensorflow/tensorflow/python/ops/math_grad.py,1709,function,Gradient for SparseMatMul. 9189,_FloorGrad,tensorflow/tensorflow/python/ops/math_grad.py,1761,function, 9190,_CeilGrad,tensorflow/tensorflow/python/ops/math_grad.py,1766,function, 9191,_RoundGrad,tensorflow/tensorflow/python/ops/math_grad.py,1771,function, 9192,_RintGrad,tensorflow/tensorflow/python/ops/math_grad.py,1776,function, 9193,_BatchMatMul,tensorflow/tensorflow/python/ops/math_grad.py,1782,function,Returns the gradient of x and y given the gradient of x * y. 9194,_BatchMatMulV2,tensorflow/tensorflow/python/ops/math_grad.py,1808,function,Returns the gradient of x and y given the gradient of x * y. 9195,_ComplexGrad,tensorflow/tensorflow/python/ops/math_grad.py,1850,function,"Returns the real and imaginary components of 'grad', respectively." 9196,_RealGrad,tensorflow/tensorflow/python/ops/math_grad.py,1862,function,Returns 'grad' as the real part and set the imaginary part 0. 9197,_ImagGrad,tensorflow/tensorflow/python/ops/math_grad.py,1869,function,Returns 'grad' as the imaginary part and set the real part 0. 9198,_AngleGrad,tensorflow/tensorflow/python/ops/math_grad.py,1876,function,Returns -grad / (Im(x) + iRe(x)) 9199,_ConjGrad,tensorflow/tensorflow/python/ops/math_grad.py,1889,function,Returns the complex conjugate of grad. 9200,_ComplexAbsGrad,tensorflow/tensorflow/python/ops/math_grad.py,1895,function,Returns the gradient of ComplexAbs. 9201,_CastGrad,tensorflow/tensorflow/python/ops/math_grad.py,1905,function, 9202,_CrossGrad,tensorflow/tensorflow/python/ops/math_grad.py,1919,function, 9203,_CumsumGrad,tensorflow/tensorflow/python/ops/math_grad.py,1926,function, 9204,_CumprodGrad,tensorflow/tensorflow/python/ops/math_grad.py,1937,function, 9205,_CumulativeLogsumexpGrad,tensorflow/tensorflow/python/ops/math_grad.py,1951,function, 9206,_NextAfterGrad,tensorflow/tensorflow/python/ops/math_grad.py,1985,function,"Returns gradient of nextafter(x1, x2) with respect to x1 and x2." 9207,SquaredDifferenceOpTest,tensorflow/tensorflow/python/ops/math_grad_test.py,38,class, 9208,AbsOpTest,tensorflow/tensorflow/python/ops/math_grad_test.py,66,class, 9209,MinOrMaxGradientTest,tensorflow/tensorflow/python/ops/math_grad_test.py,107,class, 9210,MaximumOrMinimumGradientTest,tensorflow/tensorflow/python/ops/math_grad_test.py,126,class, 9211,ProdGradientTest,tensorflow/tensorflow/python/ops/math_grad_test.py,145,class, 9212,EuclideanNormGradientTest,tensorflow/tensorflow/python/ops/math_grad_test.py,195,class, 9213,SegmentMinOrMaxGradientTest,tensorflow/tensorflow/python/ops/math_grad_test.py,332,class, 9214,FloorModGradientTest,tensorflow/tensorflow/python/ops/math_grad_test.py,377,class, 9215,DivNoNanGradientTest,tensorflow/tensorflow/python/ops/math_grad_test.py,392,class, 9216,MulNoNanGradientTest,tensorflow/tensorflow/python/ops/math_grad_test.py,419,class, 9217,XlogyTest,tensorflow/tensorflow/python/ops/math_grad_test.py,444,class, 9218,Xlog1pyTest,tensorflow/tensorflow/python/ops/math_grad_test.py,492,class, 9219,XdivyTest,tensorflow/tensorflow/python/ops/math_grad_test.py,542,class, 9220,PowGradTest,tensorflow/tensorflow/python/ops/math_grad_test.py,591,class, 9221,NextAfterTest,tensorflow/tensorflow/python/ops/math_grad_test.py,615,class, 9222,linspace_nd,tensorflow/tensorflow/python/ops/math_ops.py,112,function,"Generates evenly-spaced values in an interval along a given axis. A sequence of `num` evenly-spaced values are generated beginning at `start` along a given `axis`. If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, so that the last one is exactly `stop`. If `num <= 0`, `ValueError` is raised. Matches [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s behaviour except when `num == 0`. For example: ``` tf.linspace(10.0, 12.0, 3, name=""linspace"") => [ 10.0 11.0 12.0] ``` `Start` and `stop` can be tensors of arbitrary size: >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0) `Axis` is where the values will be generated (the dimension in the returned tensor which corresponds to the axis will be equal to `num`) >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1) Args: start: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`. N-D tensor. First entry in the range. stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor. Last entry in the range. num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D tensor. Number of values to generate. name: A name for the operation (optional). axis: Axis along which the operation is performed (used only when N-D tensors are provided). Returns: A `Tensor`. Has the same type as `start`." 9223,_set_doc,tensorflow/tensorflow/python/ops/math_ops.py,234,function, 9224,argmax,tensorflow/tensorflow/python/ops/math_ops.py,251,function, 9225,argmax_v2,tensorflow/tensorflow/python/ops/math_ops.py,263,function,"Returns the index with the largest value across axes of a tensor. In case of identity returns the smallest index. For example: >>> A = tf.constant([2, 20, 30, 3, 6]) >>> tf.math.argmax(A) # A[2] is maximum in tensor A >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8], ... [14, 45, 23, 5, 27]]) >>> tf.math.argmax(B, 0) >>> tf.math.argmax(B, 1) >>> C = tf.constant([0, 0, 0, 0]) >>> tf.math.argmax(C) # Returns smallest index in case of ties Args: input: A `Tensor`. axis: An integer, the axis to reduce across. Default to 0. output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults to `tf.int64`. name: An optional name for the operation. Returns: A `Tensor` of type `output_type`." 9226,argmin,tensorflow/tensorflow/python/ops/math_ops.py,305,function, 9227,argmin_v2,tensorflow/tensorflow/python/ops/math_ops.py,317,function,"Returns the index with the smallest value across axes of a tensor. Returns the smallest index in case of ties. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. int32 or int64, must be in the range `-rank(input), rank(input))`. Describes which axis of the input Tensor to reduce across. For vectors, use axis = 0. output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. name: A name for the operation (optional). Returns: A `Tensor` of type `output_type`. Usage: ```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.math.argmin(input = a) c = tf.keras.backend.eval(b) # c = 0 # here a[0] = 1 which is the smallest element of a across axis 0 ```" 9228,abs,tensorflow/tensorflow/python/ops/math_ops.py,360,function,"Computes the absolute value of a tensor. Given a tensor of integer or floating-point values, this operation returns a tensor of the same type, where each element contains the absolute value of the corresponding element in the input. Given a tensor `x` of complex numbers, this operation returns a tensor of type `float32` or `float64` that is the absolute value of each element in `x`. For a complex number \\(a + bj\\), its absolute value is computed as \\(\sqrt{a^2 + b^2}\\). For example: >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]]) >>> tf.abs(x) Args: x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`, `int32`, `int64`, `complex64` or `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`, with absolute values. Note, for `complex64` or `complex128` input, the returned `Tensor` will be of type `float32` or `float64`, respectively." 9229,_bucketize,tensorflow/tensorflow/python/ops/math_ops.py,399,function, 9230,DivideDelegateWithName,tensorflow/tensorflow/python/ops/math_ops.py,406,class,Use Python2/Python3 division delegation to implement divide for tensors. 9231,divide,tensorflow/tensorflow/python/ops/math_ops.py,431,function,"Computes Python style division of `x` by `y`. For example: >>> x = tf.constant([16, 12, 11]) >>> y = tf.constant([4, 6, 2]) >>> tf.divide(x,y) Args: x: A `Tensor` y: A `Tensor` name: A name for the operation (optional). Returns: A `Tensor` with same shape as input" 9232,multiply,tensorflow/tensorflow/python/ops/math_ops.py,465,function,"Returns an element-wise x * y. For example: >>> x = tf.constant(([1, 2, 3, 4])) >>> tf.math.multiply(x, x) Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also pass in non-`Tensor` arguments: >>> tf.math.multiply(7,6) If `x.shape` is not thes same as `y.shape`, they will be broadcast to a compatible shape. (More about broadcasting [here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).) For example: >>> x = tf.ones([1, 2]); >>> y = tf.ones([2, 1]); >>> x * y # Taking advantage of operator overriding Args: x: A Tensor. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. Raises: * InvalidArgumentError: When `x` and `y` have incomptatible shapes or types." 9233,_mul,tensorflow/tensorflow/python/ops/math_ops.py,516,function, 9234,subtract,tensorflow/tensorflow/python/ops/math_ops.py,526,function, 9235,_sub,tensorflow/tensorflow/python/ops/math_ops.py,537,function, 9236,_neg,tensorflow/tensorflow/python/ops/math_ops.py,551,function,"Computes numerical negative value element-wise. I.e., \(y = -x\). Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`." 9237,scalar_mul,tensorflow/tensorflow/python/ops/math_ops.py,572,function,"Multiplies a scalar times a `Tensor` or `IndexedSlices` object. Intended for use in gradient code which might deal with `IndexedSlices` objects, which are easy to multiply by a scalar but more expensive to multiply with arbitrary tensors. Args: scalar: A 0-D scalar `Tensor`. Must have known shape. x: A `Tensor` or `IndexedSlices` to be scaled. name: A name for the operation (optional). Returns: `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`. Raises: ValueError: if scalar is not a 0-D `scalar`." 9238,scalar_mul_v2,tensorflow/tensorflow/python/ops/math_ops.py,606,function, 9239,pow,tensorflow/tensorflow/python/ops/math_ops.py,613,function,"Computes the power of one value to another. Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for corresponding elements in `x` and `y`. For example: ```python x = tf.constant([[2, 2], [3, 3]]) y = tf.constant([[8, 16], [2, 3]]) tf.pow(x, y) # [[256, 65536], [9, 27]] ``` Args: x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, or `complex128`. y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, or `complex128`. name: A name for the operation (optional). Returns: A `Tensor`." 9240,complex,tensorflow/tensorflow/python/ops/math_ops.py,642,function,"Converts two real numbers to a complex number. Given a tensor `real` representing the real part of a complex number, and a tensor `imag` representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form \\(a + bj\\), where *a* represents the `real` part and *b* represents the `imag` part. The input tensors `real` and `imag` must have the same shape. For example: ```python real = tf.constant([2.25, 3.25]) imag = tf.constant([4.75, 5.75]) tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]] ``` Args: real: A `Tensor`. Must be one of the following types: `float32`, `float64`. imag: A `Tensor`. Must have the same type as `real`. name: A name for the operation (optional). Returns: A `Tensor` of type `complex64` or `complex128`. Raises: TypeError: Real and imag must be correct types" 9241,sign,tensorflow/tensorflow/python/ops/math_ops.py,687,function,"Returns an element-wise indication of the sign of a number. y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0. For complex numbers, y = sign(x) = x / |x| if x != 0, otherwise y = 0. Example usage: >>> tf.math.sign([0., 2., -3.]) Args: x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, int32, int64, complex64, complex128. name: A name for the operation (optional). Returns: A Tensor. Has the same type as x. If x is a SparseTensor, returns SparseTensor(x.indices, tf.math.sign(x.values, ...), x.dense_shape)." 9242,real,tensorflow/tensorflow/python/ops/math_ops.py,728,function,"Returns the real part of a complex (or real) tensor. Given a tensor `input`, this operation returns a tensor of type `float` that is the real part of each element in `input` considered as a complex number. For example: ```python x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) tf.math.real(x) # [-2.25, 3.25] ``` If `input` is already real, it is returned unchanged. Args: input: A `Tensor`. Must have numeric type. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`." 9243,imag,tensorflow/tensorflow/python/ops/math_ops.py,763,function,"Returns the imaginary part of a complex (or real) tensor. Given a tensor `input`, this operation returns a tensor of type `float` that is the imaginary part of each element in `input` considered as a complex number. If `input` is real, a tensor of all zeros is returned. For example: ```python x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) tf.math.imag(x) # [4.75, 5.75] ``` Args: input: A `Tensor`. Must be one of the following types: `float`, `double`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`." 9244,angle,tensorflow/tensorflow/python/ops/math_ops.py,797,function,"Returns the element-wise argument of a complex (or real) tensor. Given a tensor `input`, this operation returns a tensor of type `float` that is the argument of each element in `input` considered as a complex number. The elements in `input` are considered to be complex numbers of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part. If `input` is real then *b* is zero by definition. The argument returned by this function is of the form \\(atan2(b, a)\\). If `input` is real, a tensor of all zeros is returned. For example: ``` input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64) tf.math.angle(input).numpy() # ==> array([2.0131705, 1.056345 ], dtype=float32) ``` Args: input: A `Tensor`. Must be one of the following types: `float`, `double`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32` or `float64`." 9245,round,tensorflow/tensorflow/python/ops/math_ops.py,840,function,"Rounds the values of a tensor to the nearest integer, element-wise. Rounds half to even. Also known as bankers rounding. If you want to round according to the current system rounding mode use tf::cint. For example: ```python x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5]) tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ] ``` Args: x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as `x`." 9246,cast,tensorflow/tensorflow/python/ops/math_ops.py,868,function,"Casts a tensor to a new type. The operation casts `x` (in case of `Tensor`) or `x.values` (in case of `SparseTensor` or `IndexedSlices`) to `dtype`. For example: >>> x = tf.constant([1.8, 2.2], dtype=tf.float32) >>> tf.dtypes.cast(x, tf.int32) The operation supports data types (for `x` and `dtype`) of `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`. In case of casting from complex types (`complex64`, `complex128`) to real types, only the real part of `x` is returned. In case of casting from real types to complex types (`complex64`, `complex128`), the imaginary part of the returned value is set to `0`. The handling of complex types here matches the behavior of numpy. Note casting nan and inf values to integral types has undefined behavior. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`. dtype: The destination type. The list of supported dtypes is the same as `x`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and same type as `dtype`. Raises: TypeError: If `x` cannot be cast to the `dtype`." 9247,saturate_cast,tensorflow/tensorflow/python/ops/math_ops.py,933,function,"Performs a safe saturating cast of `value` to `dtype`. This function casts the input to `dtype` without applying any scaling. If there is a danger that values would over or underflow in the cast, this op applies the appropriate clamping before the cast. Args: value: A `Tensor`. dtype: The desired output `DType`. name: A name for the operation (optional). Returns: `value` safely cast to `dtype`." 9248,to_float,tensorflow/tensorflow/python/ops/math_ops.py,967,function,"Casts a tensor to type `float32`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `float32`. Raises: TypeError: If `x` cannot be cast to the `float32`." 9249,to_double,tensorflow/tensorflow/python/ops/math_ops.py,987,function,"Casts a tensor to type `float64`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `float64`. Raises: TypeError: If `x` cannot be cast to the `float64`." 9250,to_int32,tensorflow/tensorflow/python/ops/math_ops.py,1007,function,"Casts a tensor to type `int32`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `int32`. Raises: TypeError: If `x` cannot be cast to the `int32`." 9251,to_int64,tensorflow/tensorflow/python/ops/math_ops.py,1027,function,"Casts a tensor to type `int64`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `int64`. Raises: TypeError: If `x` cannot be cast to the `int64`." 9252,to_bfloat16,tensorflow/tensorflow/python/ops/math_ops.py,1047,function,"Casts a tensor to type `bfloat16`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `bfloat16`. Raises: TypeError: If `x` cannot be cast to the `bfloat16`." 9253,to_complex64,tensorflow/tensorflow/python/ops/math_ops.py,1067,function,"Casts a tensor to type `complex64`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `complex64`. Raises: TypeError: If `x` cannot be cast to the `complex64`." 9254,to_complex128,tensorflow/tensorflow/python/ops/math_ops.py,1087,function,"Casts a tensor to type `complex128`. Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with type `complex128`. Raises: TypeError: If `x` cannot be cast to the `complex128`." 9255,_OverrideBinaryOperatorHelper,tensorflow/tensorflow/python/ops/math_ops.py,1108,function,"Register operators with different tensor and scalar versions. If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices, sp_values, sp_shape, dense)` and outputs `(new_sp_values)`. Args: func: the operator op_name: name of the operator being overridden clazz_object: class to override for. Either `Tensor` or `SparseTensor`." 9256,_sparse_dense_truediv,tensorflow/tensorflow/python/ops/math_ops.py,1196,function,Internal helper function for 'sp_t / dense_t'. 9257,_truediv_python3,tensorflow/tensorflow/python/ops/math_ops.py,1218,function, 9258,_div_python2,tensorflow/tensorflow/python/ops/math_ops.py,1237,function,"Divide two values using Python 2 semantics. Used for Tensor.__div__. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` returns the quotient of x and y." 9259,truediv,tensorflow/tensorflow/python/ops/math_ops.py,1267,function,"Divides x / y elementwise (using Python 3 division operator semantics). NOTE: Prefer using the Tensor operator or tf.divide which obey Python division operator semantics. This function forces Python 3 division operator semantics where all integer arguments are cast to floating types first. This op is generated by normal `x / y` division in Python 3 and in Python 2.7 with `from __future__ import division`. If you want integer division that rounds down, use `x // y` or `tf.math.floordiv`. `x` and `y` must have the same numeric type. If the inputs are floating point, the output will have the same type. If the inputs are integral, the inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32` and `int64` (matching the behavior of Numpy). Args: x: `Tensor` numerator of numeric type. y: `Tensor` denominator of numeric type. name: A name for the operation (optional). Returns: `x / y` evaluated in floating point. Raises: TypeError: If `x` and `y` have different dtypes." 9260,div,tensorflow/tensorflow/python/ops/math_ops.py,1303,function,"Divides x / y elementwise (using Python 2 division operator semantics). NOTE: Prefer using the Tensor division operator or tf.divide which obey Python 3 division operator semantics. This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x` and `y` are both integers then the result will be an integer. This is in contrast to Python 3, where division with `/` is always a float while division with `//` is always an integer. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` returns the quotient of x and y." 9261,div_no_nan,tensorflow/tensorflow/python/ops/math_ops.py,1329,function,"Computes a safe divide which returns 0 if the y is zero. Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`. y: A `Tensor` whose dtype is compatible with `x`. name: A name for the operation (optional). Returns: The element-wise value of the x divided by y." 9262,multiply_no_nan,tensorflow/tensorflow/python/ops/math_ops.py,1349,function,"Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite. Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`. y: A `Tensor` whose dtype is compatible with `x`. name: A name for the operation (optional). Returns: The element-wise value of the x times y." 9263,floordiv,tensorflow/tensorflow/python/ops/math_ops.py,1381,function,"Divides `x / y` elementwise, rounding toward the most negative integer. The same as `tf.compat.v1.div(x,y)` for integers, but uses `tf.floor(tf.compat.v1.div(x,y))` for floating point arguments so that the result is always an integer (though possibly an integer represented as floating point). This op is generated by `x // y` floor division in Python 3 and in Python 2.7 with `from __future__ import division`. `x` and `y` must have the same type, and the result will have the same type as well. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` rounded down. Raises: TypeError: If the inputs are complex." 9264,_add_dispatch,tensorflow/tensorflow/python/ops/math_ops.py,1419,function,"The operation invoked by the `Tensor.__add__` operator. Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for `Tensor.__add__` to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: x: The left-hand side of the `+` operator. y: The right-hand side of the `+` operator. name: an optional name for the operation. Returns: The result of the elementwise `+` operation." 9265,_mul_dispatch,tensorflow/tensorflow/python/ops/math_ops.py,1448,function,"Dispatches cwise mul for ""Dense*Dense"" and ""Dense*Sparse""." 9266,logical_xor,tensorflow/tensorflow/python/ops/math_ops.py,1481,function,"Logical XOR function. x ^ y = (x | y) & ~(x & y) The operation works for the following input types: - Two single elements of type `bool` - One `tf.Tensor` of type `bool` and one single `bool`, where the result will be calculated by applying logical XOR with the single element to each element in the larger Tensor. - Two `tf.Tensor` objects of type `bool` of the same shape. In this case, the result will be the element-wise logical XOR of the two input tensors. Usage: >>> a = tf.constant([True]) >>> b = tf.constant([False]) >>> tf.math.logical_xor(a, b) >>> c = tf.constant([True]) >>> x = tf.constant([False, True, True, False]) >>> tf.math.logical_xor(c, x) >>> y = tf.constant([False, False, True, True]) >>> z = tf.constant([False, True, False, True]) >>> tf.math.logical_xor(y, z) Args: x: A `tf.Tensor` type bool. y: A `tf.Tensor` of type bool. name: A name for the operation (optional). Returns: A `tf.Tensor` of type bool with the same size as that of x or y." 9267,logical_and,tensorflow/tensorflow/python/ops/math_ops.py,1529,function,"Logical AND function. The operation works for the following input types: - Two single elements of type `bool` - One `tf.Tensor` of type `bool` and one single `bool`, where the result will be calculated by applying logical AND with the single element to each element in the larger Tensor. - Two `tf.Tensor` objects of type `bool` of the same shape. In this case, the result will be the element-wise logical AND of the two input tensors. Usage: >>> a = tf.constant([True]) >>> b = tf.constant([False]) >>> tf.math.logical_and(a, b) >>> c = tf.constant([True]) >>> x = tf.constant([False, True, True, False]) >>> tf.math.logical_and(c, x) >>> y = tf.constant([False, False, True, True]) >>> z = tf.constant([False, True, False, True]) >>> tf.math.logical_and(y, z) Args: x: A `tf.Tensor` type bool. y: A `tf.Tensor` of type bool. name: A name for the operation (optional). Returns: A `tf.Tensor` of type bool with the same size as that of x or y." 9268,and_,tensorflow/tensorflow/python/ops/math_ops.py,1569,function, 9269,or_,tensorflow/tensorflow/python/ops/math_ops.py,1575,function, 9270,xor_,tensorflow/tensorflow/python/ops/math_ops.py,1581,function, 9271,invert_,tensorflow/tensorflow/python/ops/math_ops.py,1587,function, 9272,equal,tensorflow/tensorflow/python/ops/math_ops.py,1607,function,"Returns the truth value of (x == y) element-wise. Performs a [broadcast]( https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the arguments and then an element-wise equality comparison, returning a Tensor of boolean values. For example: >>> x = tf.constant([2, 4]) >>> y = tf.constant(2) >>> tf.math.equal(x, y) >>> x = tf.constant([2, 4]) >>> y = tf.constant([2, 4]) >>> tf.math.equal(x, y) Args: x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`. y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`. name: A name for the operation (optional). Returns: A `tf.Tensor` of type bool with the same size as that of x or y. Raises: `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible" 9273,not_equal,tensorflow/tensorflow/python/ops/math_ops.py,1643,function,"Returns the truth value of (x != y) element-wise. Performs a [broadcast]( https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the arguments and then an element-wise inequality comparison, returning a Tensor of boolean values. For example: >>> x = tf.constant([2, 4]) >>> y = tf.constant(2) >>> tf.math.not_equal(x, y) >>> x = tf.constant([2, 4]) >>> y = tf.constant([2, 4]) >>> tf.math.not_equal(x, y) Args: x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`. y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`. name: A name for the operation (optional). Returns: A `tf.Tensor` of type bool with the same size as that of x or y. Raises: `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible" 9274,tensor_equals,tensorflow/tensorflow/python/ops/math_ops.py,1679,function,"The operation invoked by the `Tensor.__eq__` operator. Compares two tensors element-wise for equality if they are broadcast-compatible; or returns False if they are not broadcast-compatible. (Note that this behavior differs from `tf.math.equal`, which raises an exception if the two tensors are not broadcast-compatible.) Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for `Tensor.__eq__` to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: self: The left-hand side of the `==` operator. other: The right-hand side of the `==` operator. Returns: The result of the elementwise `==` operation, or `False` if the arguments are not broadcast-compatible." 9275,tensor_not_equals,tensorflow/tensorflow/python/ops/math_ops.py,1717,function,"The operation invoked by the `Tensor.__ne__` operator. Compares two tensors element-wise for inequality if they are broadcast-compatible; or returns True if they are not broadcast-compatible. (Note that this behavior differs from `tf.math.not_equal`, which raises an exception if the two tensors are not broadcast-compatible.) Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for `Tensor.__ne__` to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: self: The left-hand side of the `!=` operator. other: The right-hand side of the `!=` operator. Returns: The result of the elementwise `!=` operation, or `True` if the arguments are not broadcast-compatible." 9276,range,tensorflow/tensorflow/python/ops/math_ops.py,1757,function,"Creates a sequence of numbers. Creates a sequence of numbers that begins at `start` and extends by increments of `delta` up to but not including `limit`. The dtype of the resulting tensor is inferred from the inputs unless it is provided explicitly. Like the Python builtin `range`, `start` defaults to 0, so that `range(n) = range(0, n)`. For example: >>> start = 3 >>> limit = 18 >>> delta = 3 >>> tf.range(start, limit, delta) >>> start = 3 >>> limit = 1 >>> delta = -0.5 >>> tf.range(start, limit, delta) >>> limit = 5 >>> tf.range(limit) Args: start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit` is not None; otherwise, acts as range limit and first entry defaults to 0. limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None, defaults to the value of `start` while the first entry of the range defaults to 0. delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to 1. dtype: The type of the elements of the resulting tensor. name: A name for the operation. Defaults to ""range"". Returns: An 1-D `Tensor` of type `dtype`. @compatibility(numpy) Equivalent to np.arange @end_compatibility" 9277,_range_tensor_conversion_function,tensorflow/tensorflow/python/ops/math_ops.py,1839,function, 9278,_ReductionDims,tensorflow/tensorflow/python/ops/math_ops.py,1850,function,"Returns range(0, rank(x)) if axis is None." 9279,_has_fully_defined_shape,tensorflow/tensorflow/python/ops/math_ops.py,1869,function,Returns true if tensor has a fully defined shape. 9280,_may_reduce_to_scalar,tensorflow/tensorflow/python/ops/math_ops.py,1874,function,Set a reduction's output shape to be a scalar if we are certain. 9281,reduce_sum_v1,tensorflow/tensorflow/python/ops/math_ops.py,1887,function,"Computes the sum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1, 1, 1], [1, 1, 1]]) tf.reduce_sum(x) # 6 tf.reduce_sum(x, 0) # [2, 2, 2] tf.reduce_sum(x, 1) # [3, 3] tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]] tf.reduce_sum(x, [0, 1]) # 6 ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to int64 while tensorflow returns the same dtype as the input. @end_compatibility" 9282,reduce_sum,tensorflow/tensorflow/python/ops/math_ops.py,1942,function,"Computes the sum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> # x has a shape of (2, 3) (two rows and three columns): >>> x = tf.constant([[1, 1, 1], [1, 1, 1]]) >>> x.numpy() array([[1, 1, 1], [1, 1, 1]], dtype=int32) >>> # sum all the elements >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6 >>> tf.reduce_sum(x).numpy() 6 >>> # reduce along the first dimension >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2] >>> tf.reduce_sum(x, 0).numpy() array([2, 2, 2], dtype=int32) >>> # reduce along the second dimension >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3] >>> tf.reduce_sum(x, 1).numpy() array([3, 3], dtype=int32) >>> # keep the original dimensions >>> tf.reduce_sum(x, 1, keepdims=True).numpy() array([[3], [3]], dtype=int32) >>> # reduce along both dimensions >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6 >>> # or, equivalently, reduce along rows, then reduce the resultant array >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2] >>> # 2 + 2 + 2 = 6 >>> tf.reduce_sum(x, [0, 1]).numpy() 6 Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor)]`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to int64 while tensorflow returns the same dtype as the input. @end_compatibility" 9283,reduce_sum_with_dims,tensorflow/tensorflow/python/ops/math_ops.py,2006,function, 9284,reduce_euclidean_norm,tensorflow/tensorflow/python/ops/math_ops.py,2019,function,"Computes the Euclidean norm of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32 tf.math.reduce_euclidean_norm(x) # returns 4 as dtype is tf.int32 y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32) tf.math.reduce_euclidean_norm(y) # returns 4.1231055 which is sqrt(17) tf.math.reduce_euclidean_norm(y, 0) # [sqrt(2), sqrt(5), sqrt(10)] tf.math.reduce_euclidean_norm(y, 1) # [sqrt(14), sqrt(3)] tf.math.reduce_euclidean_norm(y, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]] tf.math.reduce_euclidean_norm(y, [0, 1]) # sqrt(17) ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor, of the same dtype as the input_tensor." 9285,count_nonzero,tensorflow/tensorflow/python/ops/math_ops.py,2069,function,"Computes number of nonzero elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. **NOTE** Floating point comparison to zero is done by exact floating point equality check. Small values are **not** rounded to zero for purposes of the nonzero check. For example: ```python x = tf.constant([[0, 1, 0], [1, 1, 0]]) tf.math.count_nonzero(x) # 3 tf.math.count_nonzero(x, 0) # [1, 2, 0] tf.math.count_nonzero(x, 1) # [1, 2] tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]] tf.math.count_nonzero(x, [0, 1]) # 3 ``` **NOTE** Strings are compared against zero-length empty string `""""`. Any string with a size greater than zero is already considered as nonzero. For example: ```python x = tf.constant(["""", ""a"", "" "", ""b"", """"]) tf.math.count_nonzero(x) # 3, with ""a"", "" "", and ""b"" as nonzero strings. ``` Args: input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or `string`. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. dtype: The output dtype; defaults to `tf.int64`. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. input: Overrides input_tensor. For compatibility. Returns: The reduced tensor (number of nonzero values)." 9286,count_nonzero_v2,tensorflow/tensorflow/python/ops/math_ops.py,2141,function,"Computes number of nonzero elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. **NOTE** Floating point comparison to zero is done by exact floating point equality check. Small values are **not** rounded to zero for purposes of the nonzero check. For example: ```python x = tf.constant([[0, 1, 0], [1, 1, 0]]) tf.math.count_nonzero(x) # 3 tf.math.count_nonzero(x, 0) # [1, 2, 0] tf.math.count_nonzero(x, 1) # [1, 2] tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]] tf.math.count_nonzero(x, [0, 1]) # 3 ``` **NOTE** Strings are compared against zero-length empty string `""""`. Any string with a size greater than zero is already considered as nonzero. For example: ```python x = tf.constant(["""", ""a"", "" "", ""b"", """"]) tf.math.count_nonzero(x) # 3, with ""a"", "" "", and ""b"" as nonzero strings. ``` Args: input: The tensor to reduce. Should be of numeric type, `bool`, or `string`. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input), rank(input))`. keepdims: If true, retains reduced dimensions with length 1. dtype: The output dtype; defaults to `tf.int64`. name: A name for the operation (optional). Returns: The reduced tensor (number of nonzero values)." 9287,reduce_mean_v1,tensorflow/tensorflow/python/ops/math_ops.py,2209,function,"Computes the mean of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis` by computing the mean of elements across the dimensions in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[1., 1.], [2., 2.]]) >>> tf.reduce_mean(x) >>> tf.reduce_mean(x, 0) >>> tf.reduce_mean(x, 1) Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.mean Please note that `np.mean` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`, for example: >>> x = tf.constant([1, 0, 1, 0]) >>> tf.reduce_mean(x) >>> y = tf.constant([1., 0., 1., 0.]) >>> tf.reduce_mean(y) @end_compatibility" 9288,reduce_mean,tensorflow/tensorflow/python/ops/math_ops.py,2276,function,"Computes the mean of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis` by computing the mean of elements across the dimensions in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[1., 1.], [2., 2.]]) >>> tf.reduce_mean(x) >>> tf.reduce_mean(x, 0) >>> tf.reduce_mean(x, 1) Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.mean Please note that `np.mean` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`, for example: >>> x = tf.constant([1, 0, 1, 0]) >>> tf.reduce_mean(x) >>> y = tf.constant([1., 0., 1., 0.]) >>> tf.reduce_mean(y) @end_compatibility" 9289,reduce_variance,tensorflow/tensorflow/python/ops/math_ops.py,2336,function,"Computes the variance of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[1., 2.], [3., 4.]]) >>> tf.math.reduce_variance(x) >>> tf.math.reduce_variance(x, 0) >>> tf.math.reduce_variance(x, 1) Args: input_tensor: The tensor to reduce. Should have real or complex type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name scope for the associated operations (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. Note, for `complex64` or `complex128` input, the returned `Tensor` will be of type `float32` or `float64`, respectively. @compatibility(numpy) Equivalent to np.var Please note `np.var` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.math.reduce_variance` has aggressive type inference from `input_tensor`. @end_compatibility" 9290,reduce_std,tensorflow/tensorflow/python/ops/math_ops.py,2397,function,"Computes the standard deviation of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[1., 2.], [3., 4.]]) >>> tf.math.reduce_std(x) >>> tf.math.reduce_std(x, 0) >>> tf.math.reduce_std(x, 1) Args: input_tensor: The tensor to reduce. Should have real or complex type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name scope for the associated operations (optional). Returns: The reduced tensor, of the same dtype as the input_tensor. Note, for `complex64` or `complex128` input, the returned `Tensor` will be of type `float32` or `float64`, respectively. @compatibility(numpy) Equivalent to np.std Please note `np.std` has a `dtype` parameter that could be used to specify the output type. By default this is `dtype=float64`. On the other hand, `tf.math.reduce_std` has aggressive type inference from `input_tensor`. @end_compatibility" 9291,reduce_prod,tensorflow/tensorflow/python/ops/math_ops.py,2447,function,"Computes the product of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.prod @end_compatibility" 9292,reduce_prod_v1,tensorflow/tensorflow/python/ops/math_ops.py,2486,function,"Computes the product of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.prod @end_compatibility" 9293,reduce_min_v1,tensorflow/tensorflow/python/ops/math_ops.py,2532,function,"Computes the minimum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.min @end_compatibility" 9294,reduce_min,tensorflow/tensorflow/python/ops/math_ops.py,2575,function,"Computes the minimum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. For example: >>> a = tf.constant([[1, 2], [3, 4]]) >>> tf.reduce_min(a) @compatibility(numpy) Equivalent to np.min @end_compatibility" 9295,reduce_max_v1,tensorflow/tensorflow/python/ops/math_ops.py,2619,function,"Computes the maximum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.max @end_compatibility" 9296,reduce_max,tensorflow/tensorflow/python/ops/math_ops.py,2662,function,"Computes the maximum of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. Usage example: >>> x = tf.constant([5, 1, 2, 4]) >>> print(tf.reduce_max(x)) tf.Tensor(5, shape=(), dtype=int32) >>> x = tf.constant([-5, -1, -2, -4]) >>> print(tf.reduce_max(x)) tf.Tensor(-1, shape=(), dtype=int32) >>> x = tf.constant([4, float('nan')]) >>> print(tf.reduce_max(x)) tf.Tensor(4.0, shape=(), dtype=float32) >>> x = tf.constant([float('nan'), float('nan')]) >>> print(tf.reduce_max(x)) tf.Tensor(-inf, shape=(), dtype=float32) >>> x = tf.constant([float('-inf'), float('inf')]) >>> print(tf.reduce_max(x)) tf.Tensor(inf, shape=(), dtype=float32) See the numpy docs for `np.amax` and `np.nanmax` behavior. Args: input_tensor: The tensor to reduce. Should have real numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor." 9297,reduce_max_with_dims,tensorflow/tensorflow/python/ops/math_ops.py,2708,function, 9298,reduce_all_v1,tensorflow/tensorflow/python/ops/math_ops.py,2724,function,"Computes the ""logical and"" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[True, True], [False, False]]) tf.reduce_all(x) # False tf.reduce_all(x, 0) # [False, False] tf.reduce_all(x, 1) # [True, False] ``` Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.all @end_compatibility" 9299,reduce_all,tensorflow/tensorflow/python/ops/math_ops.py,2776,function,"Computes the ""logical and"" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[True, True], [False, False]]) tf.reduce_all(x) # False tf.reduce_all(x, 0) # [False, False] tf.reduce_all(x, 1) # [True, False] ``` Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.all @end_compatibility" 9300,reduce_any_v1,tensorflow/tensorflow/python/ops/math_ops.py,2824,function,"Computes the ""logical or"" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[True, True], [False, False]]) tf.reduce_any(x) # True tf.reduce_any(x, 0) # [True, True] tf.reduce_any(x, 1) # [True, False] ``` Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.any @end_compatibility" 9301,reduce_any,tensorflow/tensorflow/python/ops/math_ops.py,2876,function,"Computes the ""logical or"" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: ```python x = tf.constant([[True, True], [False, False]]) tf.reduce_any(x) # True tf.reduce_any(x, 0) # [True, True] tf.reduce_any(x, 1) # [True, False] ``` Args: input_tensor: The boolean tensor to reduce. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.any @end_compatibility" 9302,reduce_logsumexp_v1,tensorflow/tensorflow/python/ops/math_ops.py,2924,function,"Computes log(sum(exp(elements across dimensions of a tensor))). Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. For example: ```python x = tf.constant([[0., 0., 0.], [0., 0., 0.]]) tf.reduce_logsumexp(x) # log(6) tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)] tf.reduce_logsumexp(x, 1) # [log(3), log(3)] tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]] tf.reduce_logsumexp(x, [0, 1]) # log(6) ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced tensor." 9303,reduce_logsumexp,tensorflow/tensorflow/python/ops/math_ops.py,2978,function,"Computes log(sum(exp(elements across dimensions of a tensor))). Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each of the entries in `axis`, which must be unique. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. For example: ```python x = tf.constant([[0., 0., 0.], [0., 0., 0.]]) tf.reduce_logsumexp(x) # log(6) tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)] tf.reduce_logsumexp(x, 1) # [log(3), log(3)] tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]] tf.reduce_logsumexp(x, [0, 1]) # log(6) ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor." 9304,trace,tensorflow/tensorflow/python/ops/math_ops.py,3041,function,"Compute the trace of a tensor `x`. `trace(x)` returns the sum along the main diagonal of each inner-most matrix in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where `output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])` For example: ```python x = tf.constant([[1, 2], [3, 4]]) tf.linalg.trace(x) # 5 x = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) tf.linalg.trace(x) # 15 x = tf.constant([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[-1, -2, -3], [-4, -5, -6], [-7, -8, -9]]]) tf.linalg.trace(x) # [15, -15] ``` Args: x: tensor. name: A name for the operation (optional). Returns: The trace of input tensor." 9305,matmul,tensorflow/tensorflow/python/ops/math_ops.py,3084,function,"Multiplies matrix `a` by matrix `b`, producing `a` * `b`. The inputs must, following any transpositions, be tensors of rank >= 2 where the inner 2 dimensions specify valid matrix multiplication dimensions, and any further outer dimensions specify matching batch size. Both matrices must be of the same type. The supported types are: `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`. Either matrix can be transposed or adjointed (conjugated and transposed) on the fly by setting one of the corresponding flag to `True`. These are `False` by default. If one or both of the matrices contain a lot of zeros, a more efficient multiplication algorithm can be used by setting the corresponding `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default. This optimization is only available for plain matrices (rank-2 tensors) with datatypes `bfloat16` or `float32`. A simple 2-D tensor matrix multiplication: >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) >>> a # 2-D tensor >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) >>> b # 2-D tensor >>> c = tf.matmul(a, b) >>> c # `a` * `b` A batch matrix multiplication with batch shape [2]: >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3]) >>> a # 3-D tensor >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2]) >>> b # 3-D tensor >>> c = tf.matmul(a, b) >>> c # `a` * `b` Since python >= 3.5 the @ operator is supported (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow, it simply calls the `tf.matmul()` function, so the following lines are equivalent: >>> d = a @ b @ [[10], [11]] >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]]) Args: a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128` and rank > 1. b: `tf.Tensor` with same type and rank as `a`. transpose_a: If `True`, `a` is transposed before multiplication. transpose_b: If `True`, `b` is transposed before multiplication. adjoint_a: If `True`, `a` is conjugated and transposed before multiplication. adjoint_b: If `True`, `b` is conjugated and transposed before multiplication. a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this **does not support `tf.sparse.SparseTensor`**, it just makes optimizations that assume most values in `a` are zero. See `tf.sparse.sparse_dense_matmul` for some support for `tf.sparse.SparseTensor` multiplication. b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this **does not support `tf.sparse.SparseTensor`**, it just makes optimizations that assume most values in `a` are zero. See `tf.sparse.sparse_dense_matmul` for some support for `tf.sparse.SparseTensor` multiplication. name: Name for the operation (optional). Returns: A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix is the product of the corresponding matrices in `a` and `b`, e.g. if all transpose or adjoint attributes are `False`: `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`, for all indices `i`, `j`. Note: This is matrix product, not element-wise product. Raises: ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and `adjoint_b` are both set to `True`." 9306,matvec,tensorflow/tensorflow/python/ops/math_ops.py,3279,function,"Multiplies matrix `a` by vector `b`, producing `a` * `b`. The matrix `a` must, following any transpositions, be a tensor of rank >= 2, with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast with `shape(b)[:-1]`. Both `a` and `b` must be of the same type. The supported types are: `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`. Matrix `a` can be transposed or adjointed (conjugated and transposed) on the fly by setting one of the corresponding flag to `True`. These are `False` by default. If one or both of the inputs contain a lot of zeros, a more efficient multiplication algorithm can be used by setting the corresponding `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default. This optimization is only available for plain matrices/vectors (rank-2/1 tensors) with datatypes `bfloat16` or `float32`. For example: ```python # 2-D tensor `a` # [[1, 2, 3], # [4, 5, 6]] a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) # 1-D tensor `b` # [7, 9, 11] b = tf.constant([7, 9, 11], shape=[3]) # `a` * `b` # [ 58, 64] c = tf.linalg.matvec(a, b) # 3-D tensor `a` # [[[ 1, 2, 3], # [ 4, 5, 6]], # [[ 7, 8, 9], # [10, 11, 12]]] a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3]) # 2-D tensor `b` # [[13, 14, 15], # [16, 17, 18]] b = tf.constant(np.arange(13, 19, dtype=np.int32), shape=[2, 3]) # `a` * `b` # [[ 86, 212], # [410, 563]] c = tf.linalg.matvec(a, b) ``` Args: a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128` and rank > 1. b: `Tensor` with same type as `a` and compatible dimensions. transpose_a: If `True`, `a` is transposed before multiplication. adjoint_a: If `True`, `a` is conjugated and transposed before multiplication. a_is_sparse: If `True`, `a` is treated as a sparse matrix. b_is_sparse: If `True`, `b` is treated as a sparse matrix. name: Name for the operation (optional). Returns: A `Tensor` of the same type as `a` and `b` where each inner-most vector is the product of the corresponding matrices in `a` and vectors in `b`, e.g. if all transpose or adjoint attributes are `False`: `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i. Note: This is matrix-vector product, not element-wise product. Raises: ValueError: If transpose_a and adjoint_a are both set to True." 9307,_calc_mat_mul_flops,tensorflow/tensorflow/python/ops/math_ops.py,3386,function,Calculates the compute resources needed for MatMul. 9308,_calc_batch_mat_mul_flops,tensorflow/tensorflow/python/ops/math_ops.py,3403,function,Calculates the compute resources needed for BatchMatMul. 9309,_as_indexed_slices,tensorflow/tensorflow/python/ops/math_ops.py,3418,function,"Convert 'x' to IndexedSlices. Convert a dense Tensor to a block-sparse IndexedSlices. Args: x: Either a Tensor object, or an IndexedSlices object. optimize: if true, attempt to optimize the conversion of 'x'. Returns: An IndexedSlices object. Raises: TypeError: If 'x' is not a Tensor or an IndexedSlices object." 9310,_as_indexed_slices_list,tensorflow/tensorflow/python/ops/math_ops.py,3442,function,"Convert all elements of 'inputs' to IndexedSlices. Additionally, homogenize the types of all the indices to either int32 or int64. Args: inputs: List containing either Tensor or IndexedSlices objects. optimize: if true, attempt to optimize the conversion of each input. Returns: A list of IndexedSlices objects. Raises: TypeError: If 'inputs' is not a list or a tuple." 9311,add_n,tensorflow/tensorflow/python/ops/math_ops.py,3479,function,"Adds all input tensors element-wise. `tf.math.add_n` performs the same operation as `tf.math.accumulate_n`, but it waits for all of its inputs to be ready before beginning to sum. This buffering can result in higher memory consumption when inputs are ready at different times, since the minimum temporary storage required is proportional to the input size rather than the output size. This op does not [broadcast]( https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html) its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator) instead. For example: >>> a = tf.constant([[3, 5], [4, 8]]) >>> b = tf.constant([[1, 6], [2, 9]]) >>> tf.math.add_n([a, b, a]) Args: inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the same shape and type. `tf.IndexedSlices` objects will be converted into dense tensors prior to adding. name: A name for the operation (optional). Returns: A `tf.Tensor` of the same shape and type as the elements of `inputs`. Raises: ValueError: If `inputs` don't all have same shape and dtype or the shape cannot be inferred." 9312,accumulate_n,tensorflow/tensorflow/python/ops/math_ops.py,3537,function,"Returns the element-wise sum of a list of tensors. Optionally, pass `shape` and `tensor_dtype` for shape and type checking, otherwise, these are inferred. `accumulate_n` performs the same operation as `tf.math.add_n`. For example: ```python a = tf.constant([[1, 2], [3, 4]]) b = tf.constant([[5, 0], [0, 6]]) tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]] # Explicitly pass shape and type tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32) # [[7, 4], # [6, 14]] ``` Args: inputs: A list of `Tensor` objects, each with same shape and type. shape: Expected shape of elements of `inputs` (optional). Also controls the output shape of this op, which may affect type inference in other ops. A value of `None` means ""infer the input shape from the shapes in `inputs`"". tensor_dtype: Expected data type of `inputs` (optional). A value of `None` means ""infer the input dtype from `inputs[0]`"". name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as the elements of `inputs`. Raises: ValueError: If `inputs` don't all have same shape and dtype or the shape cannot be inferred." 9313,_accumulate_n_grad,tensorflow/tensorflow/python/ops/math_ops.py,3607,function,Same as gradient for AddN. Copies the gradient to all inputs. 9314,sigmoid,tensorflow/tensorflow/python/ops/math_ops.py,3615,function,"Computes sigmoid of `x` element-wise. Formula for calculating sigmoid(x): `y = 1 / (1 + exp(-x))`. For x \in (-inf, inf) => sigmoid(x) \in (0, 1) Example Usage: If a positive number is large, then its sigmoid will approach to 1 since the formula will be `y = / (1 + )` >>> x = tf.constant([0.0, 1.0, 50.0, 100.0]) >>> tf.math.sigmoid(x) If a negative number is large, its sigmoid will approach to 0 since the formula will be `y = 1 / (1 + )` >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0]) >>> tf.math.sigmoid(x) Args: x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or `complex128`. name: A name for the operation (optional). Returns: A Tensor with the same type as `x`. Usage Example: >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32) >>> tf.sigmoid(x) @compatibility(scipy) Equivalent to scipy.special.expit @end_compatibility" 9315,log_sigmoid,tensorflow/tensorflow/python/ops/math_ops.py,3668,function,"Computes log sigmoid of `x` element-wise. Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability, we use `y = -tf.nn.softplus(-x)`. Args: x: A Tensor with type `float32` or `float64`. name: A name for the operation (optional). Returns: A Tensor with the same type as `x`." 9316,cumsum,tensorflow/tensorflow/python/ops/math_ops.py,3688,function,"Compute the cumulative sum of the tensor `x` along `axis`. By default, this op performs an inclusive cumsum, which means that the first element of the input is identical to the first element of the output: For example: >>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c] >>> x = tf.constant([2, 4, 6, 8]) >>> tf.cumsum(x) >>> # using varying `axis` values >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]]) >>> tf.cumsum(y, axis=0) >>> tf.cumsum(y, axis=1) By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed instead: >>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b] >>> x = tf.constant([2, 4, 6, 8]) >>> tf.cumsum(x, exclusive=True) By setting the `reverse` kwarg to `True`, the cumsum is performed in the opposite direction: >>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c] >>> x = tf.constant([2, 4, 6, 8]) >>> tf.cumsum(x, reverse=True) This is more efficient than using separate `tf.reverse` ops. The `reverse` and `exclusive` kwargs can also be combined: >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0] >>> x = tf.constant([2, 4, 6, 8]) >>> tf.cumsum(x, exclusive=True, reverse=True) Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor` of type `int32` (default: 0). Must be in the range `[-rank(x), rank(x))`. exclusive: If `True`, perform exclusive cumsum. reverse: A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`." 9317,cumprod,tensorflow/tensorflow/python/ops/math_ops.py,3761,function,"Compute the cumulative product of the tensor `x` along `axis`. By default, this op performs an inclusive cumprod, which means that the first element of the input is identical to the first element of the output: ```python tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c] ``` By setting the `exclusive` kwarg to `True`, an exclusive cumprod is performed instead: ```python tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b] ``` By setting the `reverse` kwarg to `True`, the cumprod is performed in the opposite direction: ```python tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c] ``` This is more efficient than using separate `tf.reverse` ops. The `reverse` and `exclusive` kwargs can also be combined: ```python tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1] ``` Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor` of type `int32` (default: 0). Must be in the range `[-rank(x), rank(x))`. exclusive: If `True`, perform exclusive cumprod. reverse: A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`." 9318,cumulative_logsumexp,tensorflow/tensorflow/python/ops/math_ops.py,3814,function,"Compute the cumulative log-sum-exp of the tensor `x` along `axis`. By default, this op performs an inclusive cumulative log-sum-exp, which means that the first element of the input is identical to the first element of the output. This operation is significantly more numerically stable than the equivalent tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although computes the same result given infinite numerical precision. However, note that in some cases, it may be less stable than `tf.math.reduce_logsumexp` for a given element, as it applies the ""log-sum-exp trick"" in a different way. More precisely, where `tf.math.reduce_logsumexp` uses the following trick: ``` log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x) ``` it cannot be directly used here as there is no fast way of applying it to each prefix `x[:i]`. Instead, this function implements a prefix scan using pairwise log-add-exp, which is a commutative and associative (up to floating point precision) operator: ``` log_add_exp(x, y) = log(exp(x) + exp(y)) = log(1 + exp(min(x, y) - max(x, y))) + max(x, y) ``` However, reducing using the above operator leads to a different computation tree (logs are taken repeatedly instead of only at the end), and the maximum is only computed pairwise instead of over the entire prefix. In general, this leads to a different and slightly less precise computation. Args: x: A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`. axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the range `[-rank(x), rank(x))`. exclusive: If `True`, perform exclusive cumulative log-sum-exp. reverse: If `True`, performs the cumulative log-sum-exp in the reverse direction. name: A name for the operation (optional). Returns: A `Tensor`. Has the same shape and type as `x`." 9319,conj,tensorflow/tensorflow/python/ops/math_ops.py,3871,function,"Returns the complex conjugate of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of complex numbers that are the complex conjugate of each element in `input`. The complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part. The complex conjugate returned by this operation is of the form \\(a - bj\\). For example: # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] If `x` is real, it is returned unchanged. Args: x: `Tensor` to conjugate. Must have numeric or variant type. name: A name for the operation (optional). Returns: A `Tensor` that is the conjugate of `x` (with the same type). Raises: TypeError: If `x` is not a numeric tensor." 9320,reduced_shape,tensorflow/tensorflow/python/ops/math_ops.py,3913,function,"Helper function for reduction ops. Args: input_shape: 1-D Tensor, the shape of the Tensor being reduced. axes: 1-D Tensor, the reduction axes. Returns: A 1-D Tensor, the output shape as if keepdims were set to True." 9321,_unsorted_segment_N,tensorflow/tensorflow/python/ops/math_ops.py,3955,function,"Helper function for unsorted_segment_mean/_sqrtN. Computes the number of segment entries with 0-entries set to 1 to allow division by N." 9322,unsorted_segment_mean,tensorflow/tensorflow/python/ops/math_ops.py,3983,function,"Computes the mean along segments of a tensor. Read [the section on segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. This operator is similar to the unsorted segment sum operator found [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). Instead of computing the sum over segments, it computes the mean of all entries belonging to a segment such that: \\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of occurrences of id \\i\\. If there is no entry for a given segment ID `i`, it outputs 0. If the given segment ID `i` is negative, the value is dropped and will not be added to the sum of the segment. Args: data: A `Tensor` with floating point or complex dtype. segment_ids: An integer tensor whose shape is a prefix of `data.shape`. num_segments: An integer scalar `Tensor`. The number of distinct segment IDs. name: A name for the operation (optional). Returns: A `Tensor`. Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`." 9323,unsorted_segment_sqrt_n,tensorflow/tensorflow/python/ops/math_ops.py,4030,function,"Computes the sum along segments of a tensor divided by the sqrt(N). Read [the section on segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. This operator is similar to the unsorted segment sum operator found [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). Additionally to computing the sum over segments, it divides the results by sqrt(N). \\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of occurrences of id \\i\\. If there is no entry for a given segment ID `i`, it outputs 0. Note that this op only supports floating point and complex dtypes, due to tf.sqrt only supporting these types. If the given segment ID `i` is negative, the value is dropped and will not be added to the sum of the segment. Args: data: A `Tensor` with floating point or complex dtype. segment_ids: An integer tensor whose shape is a prefix of `data.shape`. num_segments: An integer scalar `Tensor`. The number of distinct segment IDs. name: A name for the operation (optional). Returns: A `Tensor`. Has same shape as data, except for the first `segment_ids.rank` dimensions, which are replaced with a single dimension which has size `num_segments`." 9324,sparse_segment_sum,tensorflow/tensorflow/python/ops/math_ops.py,4076,function,"Computes the sum along sparse segments of a tensor. Read [the section on segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. `segment_ids` is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases `num_segments` is used to determine the size of the output. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) # Select two rows, one segment. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) # => [[0 0 0 0]] # Select two rows, two segment. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) # => [[ 1 2 3 4] # [-1 -2 -3 -4]] # With missing segment ids. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]), num_segments=4) # => [[ 1 2 3 4] # [ 0 0 0 0] # [-1 -2 -3 -4] # [ 0 0 0 0]] # Select all rows, two segments. tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) # => [[0 0 0 0] # [5 6 7 8]] # Which is equivalent to: tf.math.segment_sum(c, tf.constant([0, 0, 1])) ``` Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`." 9325,sparse_segment_sum_v2,tensorflow/tensorflow/python/ops/math_ops.py,4152,function,"Computes the sum along sparse segments of a tensor. Read [the section on segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. `segment_ids` is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases `num_segments` is used to determine the size of the output. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) # Select two rows, one segment. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) # => [[0 0 0 0]] # Select two rows, two segment. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) # => [[ 1 2 3 4] # [-1 -2 -3 -4]] # With missing segment ids. tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]), num_segments=4) # => [[ 1 2 3 4] # [ 0 0 0 0] # [-1 -2 -3 -4] # [ 0 0 0 0]] # Select all rows, two segments. tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) # => [[0 0 0 0] # [5 6 7 8]] # Which is equivalent to: tf.math.segment_sum(c, tf.constant([0, 0, 1])) ``` Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. name: A name for the operation (optional). Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`." 9326,sparse_segment_mean,tensorflow/tensorflow/python/ops/math_ops.py,4221,function,"Computes the mean along sparse segments of a tensor. Read [the section on segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. Like `tf.math.segment_mean`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. `segment_ids` is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases `num_segments` is used to determine the size of the output. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`." 9327,sparse_segment_mean_v2,tensorflow/tensorflow/python/ops/math_ops.py,4267,function,"Computes the mean along sparse segments of a tensor. Read [the section on segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. Like `tf.math.segment_mean`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. `segment_ids` is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases `num_segments` is used to determine the size of the output. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. name: A name for the operation (optional). Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`." 9328,sparse_segment_sqrt_n,tensorflow/tensorflow/python/ops/math_ops.py,4306,function,"Computes the sum along sparse segments of a tensor divided by the sqrt(N). `N` is the size of the segment being reduced. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`." 9329,sparse_segment_sqrt_n_v2,tensorflow/tensorflow/python/ops/math_ops.py,4343,function,"Computes the sum along sparse segments of a tensor divided by the sqrt(N). Read [the section on segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. Like `tf.sparse.segment_mean`, but instead of dividing by the size of the segment, `N`, divide by `sqrt(N)` instead. Args: data: A `Tensor` with data that will be assembled in the output. indices: A 1-D `Tensor` with indices into `data`. Has same rank as `segment_ids`. segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output `Tensor`. name: A name for the operation (optional). Returns: A `tensor` of the shape as data, except for dimension 0 which has size `k`, the number of segments specified via `num_segments` or inferred for the last element in `segments_ids`." 9330,tensordot,tensorflow/tensorflow/python/ops/math_ops.py,4378,function,"Tensor contraction of a and b along specified axes and outer product. Tensordot (also known as tensor contraction) sums the product of elements from `a` and `b` over the indices specified by `a_axes` and `b_axes`. The lists `a_axes` and `b_axes` specify those pairs of axes along which to contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists `a_axes` and `b_axes` must have identical length and consist of unique integers that specify valid axes for each of the tensors. Additionally outer product is supported by passing `axes=0`. This operation corresponds to `numpy.tensordot(a, b, axes)`. Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1` is equivalent to matrix multiplication. Example 2: When `a` and `b` are matrices (order 2), the case `axes = [[1], [0]]` is equivalent to matrix multiplication. Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives the outer product, a tensor of order 4. Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor \\(c_{jklm}\\) whose entry corresponding to the indices \\((j,k,l,m)\\) is given by: \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\). In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`. Args: a: `Tensor` of type `float32` or `float64`. b: `Tensor` with the same type as `a`. axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k]. If axes is a scalar, sum over the last N axes of a and the first N axes of b in order. If axes is a list or `Tensor` the first and second row contain the set of unique integers specifying axes along which the contraction is computed, for `a` and `b`, respectively. The number of axes for `a` and `b` must be equal. If `axes=0`, computes the outer product between `a` and `b`. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `a`. Raises: ValueError: If the shapes of `a`, `b`, and `axes` are incompatible. IndexError: If the values in axes exceed the rank of the corresponding tensor." 9331,polyval,tensorflow/tensorflow/python/ops/math_ops.py,4565,function,"Computes the elementwise value of a polynomial. If `x` is a tensor and `coeffs` is a list n + 1 tensors, this function returns the value of the n-th order polynomial p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1) evaluated using Horner's method, i.e. p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0])) Usage Example: >>> coefficients = [1.0, 2.5, -4.2] >>> x = 5.0 >>> y = tf.math.polyval(coefficients, x) >>> y Usage Example: >>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0) `tf.math.polyval` can also be used in polynomial regression. Taking advantage of this function can facilitate writing a polynomial equation as compared to explicitly writing it out, especially for higher degree polynomials. >>> x = tf.constant(3) >>> theta1 = tf.Variable(2) >>> theta2 = tf.Variable(1) >>> theta3 = tf.Variable(0) >>> tf.math.polyval([theta1, theta2, theta3], x) Args: coeffs: A list of `Tensor` representing the coefficients of the polynomial. x: A `Tensor` representing the variable of the polynomial. name: A name for the operation (optional). Returns: A `tensor` of the shape as the expression p(x) with usual broadcasting rules for element-wise addition and multiplication applied. @compatibility(numpy) Equivalent to numpy.polyval. @end_compatibility" 9332,reciprocal_no_nan,tensorflow/tensorflow/python/ops/math_ops.py,4636,function,"Performs a safe reciprocal operation, element wise. If a particular element is zero, the reciprocal for that element is also set to zero. For example: ```python x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32) tf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ] ``` Args: x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or `complex128`. name: A name for the operation (optional). Returns: A `Tensor` of same shape and type as `x`. Raises: TypeError: x must be of a valid dtype." 9333,xlog1py,tensorflow/tensorflow/python/ops/math_ops.py,4669,function,"Compute x * log1p(y). Given `x` and `y`, compute `x * log1p(y)`. This function safely returns zero when `x = 0`, no matter what the value of `y` is. Example: >>> tf.math.xlog1py(0., 1.) >>> tf.math.xlog1py(1., 1.) >>> tf.math.xlog1py(2., 2.) >>> tf.math.xlog1py(0., -1.) Args: x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128` y: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128` name: A name for the operation (optional). Returns: `x * log1p(y)`. @compatibility(scipy) Equivalent to scipy.special.xlog1py @end_compatibility" 9334,erfinv,tensorflow/tensorflow/python/ops/math_ops.py,4706,function,"Compute inverse error function. Given `x`, compute the inverse error function of `x`. This function is the inverse of `tf.math.erf`. Args: x: `Tensor` with type `float` or `double`. name: A name for the operation (optional). Returns: Inverse error function of `x`." 9335,ndtri,tensorflow/tensorflow/python/ops/math_ops.py,4724,function,"Compute quantile of Standard Normal. Args: x: `Tensor` with type `float` or `double`. name: A name for the operation (optional). Returns: Inverse error function of `x`." 9336,ceil,tensorflow/tensorflow/python/ops/math_ops.py,4741,function,"Return the ceiling of the input, element-wise. For example: >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) Args: x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. `int32` name: A name for the operation (optional). Returns: A `tf.Tensor`. Has the same type as `x`. @compatibility(numpy) Equivalent to np.ceil @end_compatibility" 9337,sqrt,tensorflow/tensorflow/python/ops/math_ops.py,4767,function,"Computes element-wise square root of the input tensor. Note: This operation does not support integer types. >>> x = tf.constant([[4.0], [16.0]]) >>> tf.sqrt(x) >>> y = tf.constant([[-4.0], [16.0]]) >>> tf.sqrt(y) >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128) >>> tf.sqrt(z) Note: In order to support complex complex, please provide an input tensor of `complex64` or `complex128`. Args: x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128` name: A name for the operation (optional). Returns: A `tf.Tensor` of same size, type and sparsity as `x`." 9338,exp,tensorflow/tensorflow/python/ops/math_ops.py,4805,function,"Computes exponential of x element-wise. \\(y = e^x\\). This function computes the exponential of the input tensor element-wise. i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor. \\(e\\) denotes Euler's number and is approximately equal to 2.718281. Output is positive for any real input. >>> x = tf.constant(2.0) >>> tf.math.exp(x) >>> x = tf.constant([2.0, 8.0]) >>> tf.math.exp(x) For complex numbers, the exponential value is calculated as \\(e^{x+iy}={e^x}{e^{iy}}={e^x}{\\cos(y)+i\\sin(y)}\\) For `1+1j` the value would be computed as: \\(e^1{\\cos(1)+i\\sin(1)} = 2.7182817 \\times (0.5403023+0.84147096j)\\) >>> x = tf.constant(1 + 1j) >>> tf.math.exp(x) Args: x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `tf.Tensor`. Has the same type as `x`. @compatibility(numpy) Equivalent to np.exp @end_compatibility" 9339,sobol_sample,tensorflow/tensorflow/python/ops/math_ops.py,4853,function,"Generates points from the Sobol sequence. Creates a Sobol sequence with `num_results` samples. Each sample has dimension `dim`. Skips the first `skip` samples. Args: dim: Positive scalar `Tensor` representing each sample's dimension. num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol points to return in the output. skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of initial points of the Sobol sequence to skip. Default value is 0. dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or `tf.float64`. Defaults to `tf.float32`. name: (Optional) Python `str` name prefixed to ops created by this function. Returns: `Tensor` of samples from Sobol sequence with `shape` [num_results, dim]." 9340,rsqrt,tensorflow/tensorflow/python/ops/math_ops.py,4880,function,"Computes reciprocal of square root of x element-wise. For example: >>> x = tf.constant([2., 0., -2.]) >>> tf.math.rsqrt(x) Args: x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. `int32` name: A name for the operation (optional). Returns: A `tf.Tensor`. Has the same type as `x`." 9341,LinspaceTest,tensorflow/tensorflow/python/ops/math_ops_linspace_test.py,34,class, 9342,ReduceTest,tensorflow/tensorflow/python/ops/math_ops_test.py,39,class, 9343,LogSumExpTest,tensorflow/tensorflow/python/ops/math_ops_test.py,136,class, 9344,RoundTest,tensorflow/tensorflow/python/ops/math_ops_test.py,216,class, 9345,ModTest,tensorflow/tensorflow/python/ops/math_ops_test.py,231,class, 9346,SquaredDifferenceTest,tensorflow/tensorflow/python/ops/math_ops_test.py,261,class, 9347,ApproximateEqualTest,tensorflow/tensorflow/python/ops/math_ops_test.py,284,class, 9348,ScalarMulTest,tensorflow/tensorflow/python/ops/math_ops_test.py,325,class, 9349,AddNTest,tensorflow/tensorflow/python/ops/math_ops_test.py,363,class, 9350,DivAndModTest,tensorflow/tensorflow/python/ops/math_ops_test.py,445,class, 9351,DivNoNanTest,tensorflow/tensorflow/python/ops/math_ops_test.py,584,class, 9352,MultiplyNoNanTest,tensorflow/tensorflow/python/ops/math_ops_test.py,600,class, 9353,XlogyTest,tensorflow/tensorflow/python/ops/math_ops_test.py,616,class, 9354,Xlog1pyTest,tensorflow/tensorflow/python/ops/math_ops_test.py,649,class, 9355,XdivyTest,tensorflow/tensorflow/python/ops/math_ops_test.py,683,class, 9356,NextAfterTest,tensorflow/tensorflow/python/ops/math_ops_test.py,716,class, 9357,BinaryOpsTest,tensorflow/tensorflow/python/ops/math_ops_test.py,752,class, 9358,SignTest,tensorflow/tensorflow/python/ops/math_ops_test.py,812,class, 9359,ReciprocalNoNanTest,tensorflow/tensorflow/python/ops/math_ops_test.py,825,class, 9360,EqualityTest,tensorflow/tensorflow/python/ops/math_ops_test.py,855,class, 9361,RangeTest,tensorflow/tensorflow/python/ops/math_ops_test.py,866,class, 9362,build_graph,tensorflow/tensorflow/python/ops/matmul_benchmark.py,35,function,"Build a graph containing a sequence of matmul operations. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. dtype: numpy data type of the input tensor. Returns: A matmul operation to run()" 9363,MatmulBenchmark,tensorflow/tensorflow/python/ops/matmul_benchmark.py,68,class,Benchmark matmul! 9364,metric_variable,tensorflow/tensorflow/python/ops/metrics_impl.py,41,function,"Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections. If running in a `DistributionStrategy` context, the variable will be ""sync on read"". This means: * The returned object will be a container with separate variables per replica of the model. * When writing to the variable, e.g. using `assign_add` in a metric update, the update will be applied to the variable local to the replica. * To get a metric's result value, we need to sum the variable values across the replicas before computing the final answer. Furthermore, the final answer should be computed once instead of in every replica. Both of these are accomplished by running the computation of the final result value inside `distribution_strategy_context.get_replica_context().merge_call(fn)`. Inside the `merge_call()`, ops are only added to the graph once and access to a sync on read variable in a computation returns the sum across all replicas. Args: shape: Shape of the created variable. dtype: Type of the created variable. validate_shape: (Optional) Whether shape validation is enabled for the created variable. name: (Optional) String name of the created variable. Returns: A (non-trainable) variable initialized to zero, or if inside a `DistributionStrategy` scope a sync on read variable container." 9365,_remove_squeezable_dimensions,tensorflow/tensorflow/python/ops/metrics_impl.py,88,function,"Squeeze or expand last dim if needed. Squeezes last dim of `predictions` or `labels` if their rank differs by 1 (using confusion_matrix.remove_squeezable_dimensions). Squeezes or expands last dim of `weights` if its rank differs by 1 from the new rank of `predictions`. If `weights` is scalar, it is kept scalar. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: predictions: Predicted values, a `Tensor` of arbitrary dimensions. labels: Optional label `Tensor` whose dimensions match `predictions`. weights: Optional weight scalar or `Tensor` whose dimensions match `predictions`. Returns: Tuple of `predictions`, `labels` and `weights`. Each of them possibly has the last dimension squeezed, `weights` could be extended by one dimension." 9366,_maybe_expand_labels,tensorflow/tensorflow/python/ops/metrics_impl.py,164,function,"If necessary, expand `labels` along last dimension to match `predictions`. Args: labels: `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN]. The latter implies num_labels=1, in which case the result is an expanded `labels` with shape [D1, ... DN, 1]. predictions: `Tensor` with shape [D1, ... DN, num_classes]. Returns: `labels` with the same rank as `predictions`. Raises: ValueError: if `labels` has invalid shape." 9367,_safe_scalar_div,tensorflow/tensorflow/python/ops/metrics_impl.py,215,function,"Divides two values, returning 0 if the denominator is 0. Args: numerator: A scalar `float64` `Tensor`. denominator: A scalar `float64` `Tensor`. name: Name for the returned op. Returns: 0 if `denominator` == 0, else `numerator` / `denominator`" 9368,_streaming_confusion_matrix,tensorflow/tensorflow/python/ops/metrics_impl.py,231,function,"Calculate a streaming confusion matrix. Calculates a confusion matrix. For estimation over a stream of data, the function creates an `update_op` operation. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: total_cm: A `Tensor` representing the confusion matrix. update_op: An operation that increments the confusion matrix." 9369,_aggregate_across_replicas,tensorflow/tensorflow/python/ops/metrics_impl.py,280,function,Aggregate metric value across replicas. 9370,mean,tensorflow/tensorflow/python/ops/metrics_impl.py,316,function,"Computes the (weighted) mean of the given values. The `mean` function creates two local variables, `total` and `count` that are used to compute the average of `values`. This average is ultimately returned as `mean` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean`. `update_op` increments `total` with the reduced sum of the product of `values` and `weights`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `Tensor` of arbitrary dimensions. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that `mean` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_value`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9371,accuracy,tensorflow/tensorflow/python/ops/metrics_impl.py,397,function,"Calculates how often `predictions` matches `labels`. The `accuracy` function creates two local variables, `total` and `count` that are used to compute the frequency with which `predictions` matches `labels`. This frequency is ultimately returned as `accuracy`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `accuracy`. Internally, an `is_correct` operation computes a `Tensor` with elements 1.0 where the corresponding elements of `predictions` and `labels` match and 0.0 otherwise. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `is_correct`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose shape matches `predictions`. predictions: The predicted values, a `Tensor` of any shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `accuracy` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: accuracy: A `Tensor` representing the accuracy, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `accuracy`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9372,_confusion_matrix_at_thresholds,tensorflow/tensorflow/python/ops/metrics_impl.py,461,function,"Computes true_positives, false_negatives, true_negatives, false_positives. This function creates up to four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives`. `true_positive[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`. `false_negatives[i]` is defined as the total weight of values in `predictions` at most `thresholds[i]` whose corresponding entry in `labels` is `True`. `true_negatives[i]` is defined as the total weight of values in `predictions` at most `thresholds[i]` whose corresponding entry in `labels` is `False`. `false_positives[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `False`. For estimation of these metrics over a stream of data, for each metric the function respectively creates an `update_op` operation that updates the variable and returns its value. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`, default to all four. Returns: values: Dict of variables of shape `[len(thresholds)]`. Keys are from `includes`. update_ops: Dict of operations that increments the `values`. Keys are from `includes`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if `includes` contains invalid keys." 9373,_aggregate_variable,tensorflow/tensorflow/python/ops/metrics_impl.py,624,function, 9374,auc,tensorflow/tensorflow/python/ops/metrics_impl.py,633,function,"Computes the approximate AUC via a Riemann sum. The `auc` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the AUC. To discretize the AUC curve, a linearly spaced set of thresholds is used to compute pairs of recall and precision values. The area under the ROC-curve is therefore computed using the height of the recall values by the false positive rate, while the area under the PR-curve is the computed using the height of the precision values by the recall. This value is ultimately returned as `auc`, an idempotent operation that computes the area under a discretized curve of precision versus recall values (computed using the aforementioned variables). The `num_thresholds` variable controls the degree of discretization with larger numbers of thresholds more closely approximating the true AUC. The quality of the approximation may vary dramatically depending on `num_thresholds`. For best results, `predictions` should be distributed approximately uniformly in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC approximation may be poor if this is not the case. Setting `summation_method` to 'minoring' or 'majoring' can help quantify the error in the approximation by providing lower or upper bound estimate of the AUC. The `thresholds` parameter can be used to manually specify thresholds which split the predictions more evenly. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `auc`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use when discretizing the roc curve. metrics_collections: An optional list of collections that `auc` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. curve: Specifies the name of the curve to be computed, 'ROC' [default] or 'PR' for the Precision-Recall-curve. name: An optional variable_scope name. summation_method: Specifies the Riemann summation method used (https://en.wikipedia.org/wiki/Riemann_sum): 'trapezoidal' [default] that applies the trapezoidal rule; 'careful_interpolation', a variant of it differing only by a more correct interpolation scheme for PR-AUC - interpolating (true/false) positives but not the ratio that is precision; 'minoring' that applies left summation for increasing intervals and right summation for decreasing intervals; 'majoring' that does the opposite. Note that 'careful_interpolation' is strictly preferred to 'trapezoidal' (to be deprecated soon) as it applies the same method for ROC, and a better one (see Davis & Goadrich 2006 for details) for the PR curve. thresholds: An optional list of floating point values to use as the thresholds for discretizing the curve. If set, the `num_thresholds` parameter is ignored. Values should be in [0, 1]. Endpoint thresholds equal to {-epsilon, 1+epsilon} for a small positive epsilon value will be automatically included with these to correctly handle predictions equal to exactly 0 or 1. Returns: auc: A scalar `Tensor` representing the current area-under-curve. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `auc`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9375,mean_absolute_error,tensorflow/tensorflow/python/ops/metrics_impl.py,863,function,"Computes the mean absolute error between the labels and predictions. The `mean_absolute_error` function creates two local variables, `total` and `count` that are used to compute the mean absolute error. This average is weighted by `weights`, and it is ultimately returned as `mean_absolute_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_absolute_error`. Internally, an `absolute_errors` operation computes the absolute value of the differences between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `absolute_errors`, and it increments `count` with the reduced sum of `weights` If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_absolute_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_absolute_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_absolute_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9376,mean_cosine_distance,tensorflow/tensorflow/python/ops/metrics_impl.py,924,function,"Computes the cosine distance between the labels and predictions. The `mean_cosine_distance` function creates two local variables, `total` and `count` that are used to compute the average cosine distance between `predictions` and `labels`. This average is weighted by `weights`, and it is ultimately returned as `mean_distance`, which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_distance`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of arbitrary shape. predictions: A `Tensor` of the same shape as `labels`. dim: The dimension along which the cosine distance is computed. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Also, dimension `dim` must be `1`. metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: mean_distance: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9377,mean_per_class_accuracy,tensorflow/tensorflow/python/ops/metrics_impl.py,998,function,"Calculates the mean of the per-class accuracies. Calculates the accuracy for each class, then takes the mean of that. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates the accuracy of each class and returns them. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since two variables with shape = [num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_per_class_accuracy' should be added to. updates_collections: An optional list of collections `update_op` should be added to. name: An optional variable_scope name. Returns: mean_accuracy: A `Tensor` representing the mean per class accuracy. update_op: An operation that updates the accuracy tensor. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9378,mean_iou,tensorflow/tensorflow/python/ops/metrics_impl.py,1103,function,"Calculate per-step mean Intersection-Over-Union (mIOU). Mean Intersection-Over-Union is a common evaluation metric for semantic image segmentation, which first computes the IOU for each semantic class and then computes the average over classes. IOU is defined as follows: IOU = true_positive / (true_positive + false_positive + false_negative). The predictions are accumulated in a confusion matrix, weighted by `weights`, and mIOU is then calculated from it. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_iou`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of ground truth labels with shape [batch size] and of type `int32` or `int64`. The tensor will be flattened if its rank > 1. predictions: A `Tensor` of prediction results for semantic labels, whose shape is [batch size] and type `int32` or `int64`. The tensor will be flattened if its rank > 1. num_classes: The possible number of labels the prediction task can have. This value must be provided, since a confusion matrix of dimension = [num_classes, num_classes] will be allocated. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_iou` should be added to. updates_collections: An optional list of collections `update_op` should be added to. name: An optional variable_scope name. Returns: mean_iou: A `Tensor` representing the mean intersection-over-union. update_op: An operation that increments the confusion matrix. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9379,mean_relative_error,tensorflow/tensorflow/python/ops/metrics_impl.py,1206,function,"Computes the mean relative error by normalizing with the given values. The `mean_relative_error` function creates two local variables, `total` and `count` that are used to compute the mean relative absolute error. This average is weighted by `weights`, and it is ultimately returned as `mean_relative_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_reative_error`. Internally, a `relative_errors` operation divides the absolute value of the differences between `predictions` and `labels` by the `normalizer`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `relative_errors`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. normalizer: A `Tensor` of the same shape as `predictions`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_relative_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_relative_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_relative_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9380,mean_squared_error,tensorflow/tensorflow/python/ops/metrics_impl.py,1275,function,"Computes the mean squared error between the labels and predictions. The `mean_squared_error` function creates two local variables, `total` and `count` that are used to compute the mean squared error. This average is weighted by `weights`, and it is ultimately returned as `mean_squared_error`: an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean_squared_error`. Internally, a `squared_error` operation computes the element-wise square of the difference between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `squared_error`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `mean_squared_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean_squared_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_squared_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9381,mean_tensor,tensorflow/tensorflow/python/ops/metrics_impl.py,1336,function,"Computes the element-wise (weighted) mean of the given tensors. In contrast to the `mean` function which returns a scalar with the mean, this function returns an average tensor with the same shape as the input tensors. The `mean_tensor` function creates two local variables, `total_tensor` and `count_tensor` that are used to compute the average of `values`. This average is ultimately returned as `mean` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `mean`. `update_op` increments `total` with the reduced sum of the product of `values` and `weights`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `Tensor` of arbitrary dimensions. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that `mean` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: mean: A float `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `mean_value`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9382,percentage_below,tensorflow/tensorflow/python/ops/metrics_impl.py,1421,function,"Computes the percentage of values less than the given threshold. The `percentage_below` function creates two local variables, `total` and `count` that are used to compute the percentage of `values` that fall below `threshold`. This rate is weighted by `weights`, and it is ultimately returned as `percentage` which is an idempotent operation that simply divides `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `percentage`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A numeric `Tensor` of arbitrary size. threshold: A scalar threshold. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: percentage: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9383,_count_condition,tensorflow/tensorflow/python/ops/metrics_impl.py,1475,function,"Sums the weights of cases where the given values are True. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `bool` `Tensor` of arbitrary size. weights: Optional `Tensor` whose rank is either 0, or the same rank as `values`, and must be broadcastable to `values` (i.e., all dimensions must be either `1`, or the same as the corresponding `values` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple." 9384,false_negatives,tensorflow/tensorflow/python/ops/metrics_impl.py,1522,function,"Computes the total number of false negatives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9385,false_negatives_at_thresholds,tensorflow/tensorflow/python/ops/metrics_impl.py,1574,function,"Computes false negatives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `false_negatives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: false_negatives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `false_negatives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9386,false_positives,tensorflow/tensorflow/python/ops/metrics_impl.py,1630,function,"Sum the weights of false positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9387,false_positives_at_thresholds,tensorflow/tensorflow/python/ops/metrics_impl.py,1683,function,"Computes false positives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `false_positives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: false_positives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `false_positives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9388,true_negatives,tensorflow/tensorflow/python/ops/metrics_impl.py,1739,function,"Sum the weights of true_negatives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9389,true_negatives_at_thresholds,tensorflow/tensorflow/python/ops/metrics_impl.py,1792,function,"Computes true negatives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `true_negatives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: true_negatives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `true_negatives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9390,true_positives,tensorflow/tensorflow/python/ops/metrics_impl.py,1848,function,"Sum the weights of true_positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A `Tensor` representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9391,true_positives_at_thresholds,tensorflow/tensorflow/python/ops/metrics_impl.py,1901,function,"Computes true positives at provided threshold values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` whose shape matches `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `true_positives` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: true_positives: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that updates the `true_positives` variable and returns its current value. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9392,precision,tensorflow/tensorflow/python/ops/metrics_impl.py,1957,function,"Computes the precision of the predictions with respect to the labels. The `precision` function creates two local variables, `true_positives` and `false_positives`, that are used to compute the precision. This value is ultimately returned as `precision`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_positives`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision`. `update_op` weights each prediction by the corresponding value in `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `precision` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: precision: Scalar float `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately and whose value matches `precision`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9393,precision_at_thresholds,tensorflow/tensorflow/python/ops/metrics_impl.py,2052,function,"Computes precision values for different `thresholds` on `predictions`. The `precision_at_thresholds` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` for various values of thresholds. `precision[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`, divided by the total weight of values in `predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] + false_positives[i])`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `auc` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: precision: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables that are used in the computation of `precision`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9394,recall,tensorflow/tensorflow/python/ops/metrics_impl.py,2133,function,"Computes the recall of the predictions with respect to the labels. The `recall` function creates two local variables, `true_positives` and `false_negatives`, that are used to compute the recall. This value is ultimately returned as `recall`, an idempotent operation that simply divides `true_positives` by the sum of `true_positives` and `false_negatives`. For estimation of the metric over a stream of data, the function creates an `update_op` that updates these variables and returns the `recall`. `update_op` weights each prediction by the corresponding value in `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will be cast to `bool`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `recall` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: recall: Scalar float `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately and whose value matches `recall`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9395,_at_k_name,tensorflow/tensorflow/python/ops/metrics_impl.py,2225,function, 9396,_select_class_id,tensorflow/tensorflow/python/ops/metrics_impl.py,2235,function,"Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`." 9397,_maybe_select_class_id,tensorflow/tensorflow/python/ops/metrics_impl.py,2269,function,"If class ID is specified, filter all other classes. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. selected_id: Int id to select. Returns: Tuple of `labels` and `predictions_idx`, possibly with classes removed." 9398,_sparse_true_positive_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,2292,function,"Calculates true positives for recall@k and precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of operation. Returns: A [D1, ... DN] `Tensor` of true positive counts." 9399,_streaming_sparse_true_positive_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,2337,function,"Calculates weighted per step true positives for recall@k and precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape." 9400,_sparse_false_negative_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,2388,function,"Calculates false negatives for recall@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: A [D1, ... DN] `Tensor` of false negative counts." 9401,_streaming_sparse_false_negative_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,2432,function,"Calculates weighted per step false negatives for recall@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape." 9402,recall_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,2484,function,"Computes recall@k of the predictions with respect to sparse labels. If `class_id` is specified, we calculate recall by considering only the entries in the batch for which `class_id` is in the label, and computing the fraction of them for which `class_id` is in the top-k `predictions`. If `class_id` is not specified, we'll calculate recall as how often on average a class among the labels of a batch entry is in the top-k `predictions`. `sparse_recall_at_k` creates two local variables, `true_positive_at_` and `false_negative_at_`, that are used to compute the recall_at_k frequency. This frequency is ultimately returned as `recall_at_`: an idempotent operation that simply divides `true_positive_at_` by total (`true_positive_at_` + `false_negative_at_`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `recall_at_`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false negatives weighted by `weights`. Then `update_op` increments `true_positive_at_` and `false_negative_at_` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range always count towards `false_negative_at_`. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. If class_id is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: recall: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately, and whose value matches `recall`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9403,recall_at_top_k,tensorflow/tensorflow/python/ops/metrics_impl.py,2577,function,"Computes recall@k of top-k predictions with respect to sparse labels. Differs from `recall_at_k` in that predictions must be in the form of top `k` class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k` for more details. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range always count towards `false_negative_at_`. predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and predictions has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. Only used for the default op name. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. If class_id is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: recall: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_negatives`. update_op: `Operation` that increments `true_positives` and `false_negatives` variables appropriately, and whose value matches `recall`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple." 9404,recall_at_thresholds,tensorflow/tensorflow/python/ops/metrics_impl.py,2661,function,"Computes various recall values for different `thresholds` on `predictions`. The `recall_at_thresholds` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` for various values of thresholds. `recall[i]` is defined as the total weight of values in `predictions` above `thresholds[i]` whose corresponding entry in `labels` is `True`, divided by the total weight of `True` values in `labels` (`true_positives[i] / (true_positives[i] + false_negatives[i])`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `recall`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. thresholds: A python list or tuple of float thresholds in `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `recall` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: recall: A float `Tensor` of shape `[len(thresholds)]`. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables that are used in the computation of `recall`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9405,root_mean_squared_error,tensorflow/tensorflow/python/ops/metrics_impl.py,2739,function,"Computes the root mean squared error between the labels and predictions. The `root_mean_squared_error` function creates two local variables, `total` and `count` that are used to compute the root mean squared error. This average is weighted by `weights`, and it is ultimately returned as `root_mean_squared_error`: an idempotent operation that takes the square root of the division of `total` by `count`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `root_mean_squared_error`. Internally, a `squared_error` operation computes the element-wise square of the difference between `predictions` and `labels`. Then `update_op` increments `total` with the reduced sum of the product of `weights` and `squared_error`, and it increments `count` with the reduced sum of `weights`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` of arbitrary shape. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that `root_mean_squared_error` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: root_mean_squared_error: A `Tensor` representing the current mean, the value of `total` divided by `count`. update_op: An operation that increments the `total` and `count` variables appropriately and whose value matches `root_mean_squared_error`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9406,sensitivity_at_specificity,tensorflow/tensorflow/python/ops/metrics_impl.py,2810,function,"Computes the specificity at a given sensitivity. The `sensitivity_at_specificity` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the sensitivity at the given specificity value. The threshold for the given specificity value is computed and used to evaluate the corresponding sensitivity. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `sensitivity`. `update_op` increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` counts with the weight of each case found in the `predictions` and `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. For additional information about specificity and sensitivity, see the following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. specificity: A scalar value in range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use for matching the given specificity. metrics_collections: An optional list of collections that `sensitivity` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: sensitivity: A scalar `Tensor` representing the sensitivity at the given `specificity` value. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `sensitivity`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, if `weights` is not `None` and its shape doesn't match `predictions`, or if `specificity` is not between 0 and 1, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9407,_expand_and_tile,tensorflow/tensorflow/python/ops/metrics_impl.py,2911,function,"Slice `tensor` shape in 2, then tile along the sliced dimension. A new dimension is inserted in shape of `tensor` before `dim`, then values are tiled `multiple` times along the new dimension. Args: tensor: Input `Tensor` or `SparseTensor`. multiple: Integer, number of times to tile. dim: Integer, dimension along which to tile. name: Name of operation. Returns: `Tensor` result of expanding and tiling `tensor`. Raises: ValueError: if `multiple` is less than 1, or `dim` is not in `[-rank(tensor), rank(tensor)]`." 9408,_num_relevant,tensorflow/tensorflow/python/ops/metrics_impl.py,2965,function,"Computes number of relevant values for each row in labels. For labels with shape [D1, ... DN, num_labels], this is the minimum of `num_labels` and `k`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. k: Integer, k for @k metric. Returns: Integer `Tensor` of shape [D1, ... DN], where each value is the number of relevant values for that row. Raises: ValueError: if inputs have invalid dtypes or values." 9409,_sparse_average_precision_at_top_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3003,function,"Computes average precision@k of predictions with respect to sparse labels. From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula for each row is: AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items A ""row"" is the elements in dimension [D1, ... DN] of `predictions_idx`, `labels`, and the result `Tensors`. In the common case, this is [batch_size]. Each row of the results contains the average precision for that row. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. Values should be non-negative. Negative values are ignored. predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final dimension must be set and contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. Values should be in range [0, num_classes). Returns: `float64` `Tensor` of shape [D1, ... DN], where each value is the average precision for that row. Raises: ValueError: if the last dimension of predictions_idx is not set." 9410,_streaming_sparse_average_precision_at_top_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3092,function,"Computes average precision@k of predictions with respect to sparse labels. `sparse_average_precision_at_top_k` creates two local variables, `average_precision_at_/total` and `average_precision_at_/max`, that are used to compute the frequency. This frequency is ultimately returned as `average_precision_at_`: an idempotent operation that simply divides `average_precision_at_/total` by `average_precision_at_/max`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_` and `false_positive_at_` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. Values should be non-negative. Negative values are ignored. predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. Values should be in range [0, num_classes). weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: mean_average_precision: Scalar `float64` `Tensor` with the mean average precision values. update: `Operation` that increments variables appropriately, and whose value matches `metric`." 9411,_clean_out_of_range_indices,tensorflow/tensorflow/python/ops/metrics_impl.py,3185,function,"Replaces large out-of-range labels by small out-of-range labels. Replaces any value in `labels` that is greater or equal to `num_classes` by -1. Do this conditionally for efficiency in case there are no such values. Args: labels: `int64` `Tensor` or `SparseTensor`. num_classes: `int64` scalar `Tensor`. Returns: An `int64` `Tensor` or `SparseTensor` as `labels` with indices greater or equal to num_classes replaced by -1." 9412,sparse_average_precision_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3228,function,"Renamed to `average_precision_at_k`, please use that method instead." 9413,average_precision_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3247,function,"Computes average precision@k of predictions with respect to sparse labels. `average_precision_at_k` creates two local variables, `average_precision_at_/total` and `average_precision_at_/max`, that are used to compute the frequency. This frequency is ultimately returned as `average_precision_at_`: an idempotent operation that simply divides `average_precision_at_/total` by `average_precision_at_/max`. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_` and `false_positive_at_` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and `predictions` has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. This will calculate an average precision for range `[1,k]`, as documented above. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: mean_average_precision: Scalar `float64` `Tensor` with the mean average precision values. update: `Operation` that increments variables appropriately, and whose value matches `metric`. Raises: ValueError: if k is invalid. RuntimeError: If eager execution is enabled." 9414,_sparse_false_positive_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3331,function,"Calculates false positives for precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels_sparse`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). Returns: A [D1, ... DN] `Tensor` of false positive counts." 9415,_streaming_sparse_false_positive_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3375,function,"Calculates weighted per step false positives for precision@k. If `class_id` is specified, calculate binary true positives for `class_id` only. If `class_id` is not specified, calculate metrics for `k` predicted vs `n` label classes, where `n` is the 2nd dimension of `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`. predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`, top `k` predicted classes. For rank `n`, the first `n-1` dimensions must match `labels`. k: Integer, k for @k metric. This is only used for default op name. class_id: Class for which we want binary metrics. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). name: Name of new variable, and namespace for other dependent ops. Returns: A tuple of `Variable` and update `Operation`. Raises: ValueError: If `weights` is not `None` and has an incompatible shape." 9416,precision_at_top_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3427,function,"Computes precision@k of the predictions with respect to sparse labels. Differs from `sparse_precision_at_k` in that predictions must be in the form of top `k` class indices, whereas `sparse_precision_at_k` expects logits. Refer to `sparse_precision_at_k` for more details. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1. Commonly, N=1 and predictions has shape [batch size, k]. The final dimension contains the top `k` predicted class indices. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. Only used for the default op name. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes], where num_classes is the last dimension of `predictions`. If `class_id` is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: precision: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately, and whose value matches `precision`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9417,sparse_precision_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3517,function,"Renamed to `precision_at_k`, please use that method instead." 9418,precision_at_k,tensorflow/tensorflow/python/ops/metrics_impl.py,3538,function,"Computes precision@k of the predictions with respect to sparse labels. If `class_id` is specified, we calculate precision by considering only the entries in the batch for which `class_id` is in the top-k highest `predictions`, and computing the fraction of them for which `class_id` is indeed a correct label. If `class_id` is not specified, we'll calculate precision as how often on average a class among the top-k classes with the highest predicted values of a batch entry is correct and can be found in the label for that entry. `precision_at_k` creates two local variables, `true_positive_at_` and `false_positive_at_`, that are used to compute the precision@k frequency. This frequency is ultimately returned as `precision_at_`: an idempotent operation that simply divides `true_positive_at_` by total (`true_positive_at_` + `false_positive_at_`). For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `precision_at_`. Internally, a `top_k` operation computes a `Tensor` indicating the top `k` `predictions`. Set operations applied to `top_k` and `labels` calculate the true positives and false positives weighted by `weights`. Then `update_op` increments `true_positive_at_` and `false_positive_at_` using these values. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN], where the latter implies num_labels=1. N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values should be in range [0, num_classes), where num_classes is the last dimension of `predictions`. Values outside this range are ignored. predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes]. The final dimension contains the logit values for each class. [D1, ... DN] must match `labels`. k: Integer, k for @k metric. class_id: Integer class ID for which we want binary metrics. This should be in range [0, num_classes], where num_classes is the last dimension of `predictions`. If `class_id` is outside this range, the method returns NAN. weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of `labels`. If the latter, it must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). metrics_collections: An optional list of collections that values should be added to. updates_collections: An optional list of collections that updates should be added to. name: Name of new update operation, and namespace for other dependent ops. Returns: precision: Scalar `float64` `Tensor` with the value of `true_positives` divided by the sum of `true_positives` and `false_positives`. update_op: `Operation` that increments `true_positives` and `false_positives` variables appropriately, and whose value matches `precision`. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9419,specificity_at_sensitivity,tensorflow/tensorflow/python/ops/metrics_impl.py,3632,function,"Computes the specificity at a given sensitivity. The `specificity_at_sensitivity` function creates four local variables, `true_positives`, `true_negatives`, `false_positives` and `false_negatives` that are used to compute the specificity at the given sensitivity value. The threshold for the given sensitivity value is computed and used to evaluate the corresponding specificity. For estimation of the metric over a stream of data, the function creates an `update_op` operation that updates these variables and returns the `specificity`. `update_op` increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` counts with the weight of each case found in the `predictions` and `labels`. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. For additional information about specificity and sensitivity, see the following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity Args: labels: The ground truth values, a `Tensor` whose dimensions must match `predictions`. Will be cast to `bool`. predictions: A floating point `Tensor` of arbitrary shape and whose values are in the range `[0, 1]`. sensitivity: A scalar value in range `[0, 1]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `labels` dimension). num_thresholds: The number of thresholds to use for matching the given sensitivity. metrics_collections: An optional list of collections that `specificity` should be added to. updates_collections: An optional list of collections that `update_op` should be added to. name: An optional variable_scope name. Returns: specificity: A scalar `Tensor` representing the specificity at the given `sensitivity` value. update_op: An operation that increments the `true_positives`, `true_negatives`, `false_positives` and `false_negatives` variables appropriately and whose value matches `specificity`. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, if `weights` is not `None` and its shape doesn't match `predictions`, or if `sensitivity` is not between 0 and 1, or if either `metrics_collections` or `updates_collections` are not a list or tuple. RuntimeError: If eager execution is enabled." 9420,all_sum,tensorflow/tensorflow/python/ops/nccl_ops.py,33,function,"Returns a list of tensors with the all-reduce sum across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to sum; must be assigned to GPU devices. Returns: List of tensors, each with the sum of the input tensors, where tensor i has the same device as `tensors[i]`." 9421,_all_sum_grad,tensorflow/tensorflow/python/ops/nccl_ops.py,51,function,"The gradients for `all_sum`. Args: op: The `all_sum` `Operation` that we are differentiating. grad: Gradient with respect to the output of the `all_sum` op. Returns: The gradient with respect to the output of `all_sum`. Raises: LookupError: If `reduction` is not `sum`." 9422,all_prod,tensorflow/tensorflow/python/ops/nccl_ops.py,79,function,"Returns a list of tensors with the all-reduce product across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to multiply; must be assigned to GPU devices. Returns: List of tensors, each with the product of the input tensors, where tensor i has the same device as `tensors[i]`." 9423,all_min,tensorflow/tensorflow/python/ops/nccl_ops.py,96,function,"Returns a list of tensors with the all-reduce min across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to reduce; must be assigned to GPU devices. Returns: List of tensors, each with the minimum of the input tensors, where tensor i has the same device as `tensors[i]`." 9424,all_max,tensorflow/tensorflow/python/ops/nccl_ops.py,113,function,"Returns a list of tensors with the all-reduce max across `tensors`. The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to reduce; must be assigned to GPU devices. Returns: List of tensors, each with the maximum of the input tensors, where tensor i has the same device as `tensors[i]`." 9425,reduce_sum,tensorflow/tensorflow/python/ops/nccl_ops.py,130,function,"Returns a tensor with the reduce sum across `tensors`. The computation is done with a reduce operation, so only one tensor is returned. Args: tensors: The input tensors across which to sum; must be assigned to GPU devices. Returns: A tensor containing the sum of the input tensors. Raises: LookupError: If context is not currently using a GPU device." 9426,_reduce_sum_grad,tensorflow/tensorflow/python/ops/nccl_ops.py,150,function,"The gradients for input `Operation` of `reduce_sum`. Args: op: The `sum send` `Operation` that we are differentiating. grad: Gradient with respect to the output of the `reduce_sum` op. Returns: The gradient with respect to the input of `reduce_sum` op. Raises: LookupError: If the reduction attribute of op is not `sum`." 9427,broadcast,tensorflow/tensorflow/python/ops/nccl_ops.py,173,function,"Returns a tensor that can be efficiently transferred to other devices. Args: tensor: The tensor to send; must be assigned to a GPU device. Returns: A tensor with the value of `src_tensor`, which can be used as input to ops on other GPU devices." 9428,_broadcast_grad,tensorflow/tensorflow/python/ops/nccl_ops.py,190,function,"The gradients for input `Operation` of `broadcast`. Args: op: The `broadcast send` `Operation` that we are differentiating. accumulated_grad: Accumulated gradients with respect to the output of the `broadcast` op. Returns: Gradients with respect to the input of `broadcast`." 9429,_apply_all_reduce,tensorflow/tensorflow/python/ops/nccl_ops.py,210,function,Helper function for all_* functions. 9430,_apply_reduce,tensorflow/tensorflow/python/ops/nccl_ops.py,239,function,Helper function for reduce_* functions. 9431,_get_shared_name,tensorflow/tensorflow/python/ops/nccl_ops.py,254,function, 9432,_check_device,tensorflow/tensorflow/python/ops/nccl_ops.py,263,function, 9433,_DeviceTensors,tensorflow/tensorflow/python/ops/nccl_ops_test.py,33,function, 9434,_NcclAllReduce,tensorflow/tensorflow/python/ops/nccl_ops_test.py,41,function, 9435,_NcclReduce,tensorflow/tensorflow/python/ops/nccl_ops_test.py,45,function, 9436,_NcclBroadcast,tensorflow/tensorflow/python/ops/nccl_ops_test.py,51,function, 9437,NcclTestCase,tensorflow/tensorflow/python/ops/nccl_ops_test.py,59,class, 9438,AllReduceTest,tensorflow/tensorflow/python/ops/nccl_ops_test.py,131,class, 9439,SingleReduceTest,tensorflow/tensorflow/python/ops/nccl_ops_test.py,150,class, 9440,BroadcastTest,tensorflow/tensorflow/python/ops/nccl_ops_test.py,160,class, 9441,CombinedTest,tensorflow/tensorflow/python/ops/nccl_ops_test.py,184,class,Test all-reduce vs. single-reduce plus broadcast in one session.run. 9442,BatchNormalizationTest,tensorflow/tensorflow/python/ops/nn_batchnorm_test.py,38,class, 9443,SufficientStatisticsTest,tensorflow/tensorflow/python/ops/nn_batchnorm_test.py,352,class, 9444,NormalizeMomentsTest,tensorflow/tensorflow/python/ops/nn_batchnorm_test.py,412,class, 9445,MomentsTest,tensorflow/tensorflow/python/ops/nn_batchnorm_test.py,455,class, 9446,WeightedMomentsTest,tensorflow/tensorflow/python/ops/nn_batchnorm_test.py,597,class,"Tests for nn.weighted_moments. Note that this test inherits from MomentsTest, inheriting all its test methods! It modifies MomentsTest in two ways: a) By overriding _unweighted_moments, all the codepaths in MomentsTest are executed, but with calls to tf.nn.moments() replaced by calls to tf.nn.weighted_moments() with a constant weight of 1. b) By overriding RunMomentTest and RunMomentTestWithDynamicShape, this test adds multiple additional calls to RunWeightedMomentsTest() to exercise correctness with non-constant weights and varying broadcasting situations. (It also continues to call MomentsTest.Run(Weighted)?MomentsTest as well.)" 9447,BatchNormalizationTest,tensorflow/tensorflow/python/ops/nn_fused_batchnorm_test.py,35,class, 9448,_Conv2DBackpropInputGrad,tensorflow/tensorflow/python/ops/nn_grad.py,31,function,"The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter" 9449,_Conv2DBackpropFilterGrad,tensorflow/tensorflow/python/ops/nn_grad.py,68,function, 9450,_DepthwiseConv2dNativeBackpropInputGrad,tensorflow/tensorflow/python/ops/nn_grad.py,95,function,"The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter" 9451,_DepthwiseConv2dNativeBackpropFilterGrad,tensorflow/tensorflow/python/ops/nn_grad.py,128,function, 9452,_Conv3DGrad,tensorflow/tensorflow/python/ops/nn_grad.py,151,function, 9453,_Conv3DBackpropInputGrad,tensorflow/tensorflow/python/ops/nn_grad.py,174,function, 9454,_Conv3DBackpropFilterGrad,tensorflow/tensorflow/python/ops/nn_grad.py,197,function, 9455,_AvgPool3DGrad,tensorflow/tensorflow/python/ops/nn_grad.py,219,function, 9456,_AvgPool3DGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,230,function, 9457,_MaxPool3DGrad,tensorflow/tensorflow/python/ops/nn_grad.py,241,function, 9458,_MaxPool3DGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,253,function, 9459,_MaxPool3DGradGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,269,function, 9460,_SoftmaxGrad,tensorflow/tensorflow/python/ops/nn_grad.py,285,function,"The derivative of the softmax nonlinearity. We assume that probs is of shape [batch_size * dim] The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax'). This matrix is diagonal minus a rank one matrix, so it is easy to implement as follows: grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax Args: op: the Softmax op. grad_softmax: the tensor representing the gradient w.r.t. the softmax output. Returns: gradient w.r.t the input to the softmax" 9461,_LogSoftmaxGrad,tensorflow/tensorflow/python/ops/nn_grad.py,310,function,"The gradient for log_softmax. log_softmax = input - log(sum(exp(input)) dlog_softmax/dinput = diag - softmax(input) Args: op: The log softmax op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input." 9462,_BiasAddGrad,tensorflow/tensorflow/python/ops/nn_grad.py,328,function,"Return the gradients for the 2 inputs of bias_op. The first input of unused_bias_op is the tensor t, and its gradient is just the gradient the unused_bias_op received. The second input of unused_bias_op is the bias vector which has one fewer dimension than ""received_grad"" (the batch dimension.) Its gradient is the received gradient Summed on the batch dimension, which is the first dimension. Args: op: The BiasOp for which we need to generate gradients. received_grad: Tensor. The gradients passed to the BiasOp. Returns: Two tensors, the first one for the ""tensor"" input of the BiasOp, the second one for the ""bias"" input of the BiasOp." 9463,_BiasAddGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,356,function,"Gradient for the BiasAddGrad op. Args: op: BiasAddGrad op for which we are calculating gradients. received_grad: The gradients passed to the BiasAddGrad op. Returns: A single gradient Tensor for the input to BiasAddGrad (which is the gradient of the bias term in BiasAdd)" 9464,_BiasAddGradV1,tensorflow/tensorflow/python/ops/nn_grad.py,392,function,"Return the gradients for the 2 inputs of bias_op. The first input of unused_bias_op is the tensor t, and its gradient is just the gradient the unused_bias_op received. The second input of unused_bias_op is the bias vector which has one fewer dimension than ""received_grad"" (the batch dimension.) Its gradient is the received gradient Summed on the batch dimension, which is the first dimension. Args: unused_bias_op: The BiasOp for which we need to generate gradients. received_grad: Tensor. The gradients passed to the BiasOp. Returns: Two tensors, the first one for the ""tensor"" input of the BiasOp, the second one for the ""bias"" input of the BiasOp." 9465,_ReluGrad,tensorflow/tensorflow/python/ops/nn_grad.py,416,function, 9466,_EluGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,421,function, 9467,_SeluGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,429,function, 9468,_Relu6Grad,tensorflow/tensorflow/python/ops/nn_grad.py,437,function, 9469,_Relu6GradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,442,function, 9470,_LeakyReluGrad,tensorflow/tensorflow/python/ops/nn_grad.py,449,function, 9471,_LeakyReluGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,456,function, 9472,_EluGrad,tensorflow/tensorflow/python/ops/nn_grad.py,464,function, 9473,_SeluGrad,tensorflow/tensorflow/python/ops/nn_grad.py,469,function, 9474,_SoftplusGrad,tensorflow/tensorflow/python/ops/nn_grad.py,474,function, 9475,_SoftplusGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,479,function, 9476,_SoftsignGrad,tensorflow/tensorflow/python/ops/nn_grad.py,492,function, 9477,_ReluGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,497,function, 9478,_BroadcastMul,tensorflow/tensorflow/python/ops/nn_grad.py,503,function,"Multiply after broadcasting vec to match dimensions of mat. Args: vec: A 1-D tensor of dimension [D0] mat: A 2-D tensor of dimension [D0, D1] Returns: A tensor of dimension [D0, D1], the result of vec * mat" 9479,_SoftmaxCrossEntropyWithLogitsGrad,tensorflow/tensorflow/python/ops/nn_grad.py,519,function,Gradient function for SoftmaxCrossEntropyWithLogits. 9480,_SparseSoftmaxCrossEntropyWithLogitsGrad,tensorflow/tensorflow/python/ops/nn_grad.py,544,function,Gradient function for SparseSoftmaxCrossEntropyWithLogits. 9481,_Conv2DGrad,tensorflow/tensorflow/python/ops/nn_grad.py,570,function,Gradient function for Conv2D. 9482,_DepthwiseConv2dNativeGrad,tensorflow/tensorflow/python/ops/nn_grad.py,611,function, 9483,_Dilation2DGrad,tensorflow/tensorflow/python/ops/nn_grad.py,635,function, 9484,_LRNGrad,tensorflow/tensorflow/python/ops/nn_grad.py,649,function, 9485,_AvgPoolGrad,tensorflow/tensorflow/python/ops/nn_grad.py,661,function, 9486,_AvgPoolGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,672,function, 9487,_MaxPoolGrad,tensorflow/tensorflow/python/ops/nn_grad.py,683,function, 9488,_MaxPoolGradV2,tensorflow/tensorflow/python/ops/nn_grad.py,695,function, 9489,_MaxPoolGradWithArgmax,tensorflow/tensorflow/python/ops/nn_grad.py,709,function, 9490,_MaxPoolGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,722,function, 9491,_MaxPoolGradGradV2,tensorflow/tensorflow/python/ops/nn_grad.py,738,function, 9492,_MaxPoolGradGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,756,function, 9493,_FractionalMaxPoolGrad,tensorflow/tensorflow/python/ops/nn_grad.py,772,function,"Returns gradient for FractionalMaxPool. Since FractionalMaxPool has three outputs, there are three gradients passed in for each of the outputs. Only the first one is useful, the other two gradients are empty. Args: op: The FractionalMaxPoolOp. grad_0: Gradient with respect to op.outputs[0] unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty. unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty. Returns: Input backprop for FractionalMaxPool op." 9494,_FractionalAvgPoolGrad,tensorflow/tensorflow/python/ops/nn_grad.py,794,function,"Returns gradient for FractionalAvgPool. Since FractionalAvgPool has three outputs, there are three gradients passed in for each of the outputs. Only the first one is useful, the other two gradients are empty. Args: op: The FractionalAvgPoolOp. grad_0: Gradient with respect to op.outputs[0] unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty. unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty. Returns: Input backprop for FractionalAvgPool op." 9495,_BatchNormWithGlobalNormalizationGrad,tensorflow/tensorflow/python/ops/nn_grad.py,816,function,"Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization. We do not backprop anything for the mean and var intentionally as they are not being trained with backprop in the operation. Args: op: The BatchNormOp for which we need to generate gradients. grad: Tensor. The gradients passed to the BatchNormOp. Returns: dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon))) dm: Backprop for mean, which is sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon)) dv: Backprop for variance, which is sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2) db: Backprop for beta, which is grad reduced in all except the last dimension. dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))" 9496,_BaseFusedBatchNormGrad,tensorflow/tensorflow/python/ops/nn_grad.py,842,function,"Return the gradients for the 3 inputs of BatchNorm. Args: op: The BatchNormOp for which we need to compute gradients. version: Integer indicating which version to use of the fused batch norm gradient. *grad: An argument list for tensors of gradients wrt the outputs with grad[0] as grad_y. Returns: grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) * [grad_y - mean(grad_y) - (x - mean(x)) * mean(grad_y * (x - mean(x))) / (variance + epsilon)] in training mode; grad_y * scale * rsqrt(pop_variance + epsilon) in freeze mode. grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) * rsqrt(variance + epsilon)) in training mode; sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon)) in freeze mode. grad_offset: gradient for offset, which is sum(grad_y) in training mode; sum(grad_y) in freeze mode." 9497,_FusedBatchNormGrad,tensorflow/tensorflow/python/ops/nn_grad.py,918,function, 9498,_FusedBatchNormV2Grad,tensorflow/tensorflow/python/ops/nn_grad.py,923,function, 9499,_FusedBatchNormV3Grad,tensorflow/tensorflow/python/ops/nn_grad.py,928,function, 9500,_BatchNormGrad,tensorflow/tensorflow/python/ops/nn_grad.py,932,function,"Returns the gradients for the 3 inputs of BatchNorm. Args: grad_y: A `Tensor` of 4 dimensions for gradient for y. x: A `Tensor` of 4 dimensions for x. scale: A `Tensor` of 1 dimension for scaling. pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when is_training=False. pop_var: A `Tensor` of 1 dimension for the population variance. Only used when is_training=False. epsilon: A small float number added to the variance of x. data_format: The data format for input. Either b""NHWC"" or b""NCHW"". is_training: A bool value to indicate the operation is for training (default) or inference. Returns: A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient for x, grad_scale the gradient for scale, and grad_offset the gradient for offset." 9501,_FusedBatchNormGradGrad,tensorflow/tensorflow/python/ops/nn_grad.py,1012,function,"Returns the gradients for the 3 inputs of FusedBatchNormGrad. Args: op: The FusedBatchNormGradOp for which we need to compute gradients. *grad: An argument list for tensors of gradients wrt the outputs with grad[0] as grad_grad_x, grad[1] as grad_grad_scale, grad[2] as grad_grad_offset. Returns: A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y is the gradient for grad_y, grad_x the gradient for x, grad_scale the gradient for scale." 9502,_FusedBatchNormGradGradV2,tensorflow/tensorflow/python/ops/nn_grad.py,1050,function, 9503,_FusedBatchNormGradGradV3,tensorflow/tensorflow/python/ops/nn_grad.py,1055,function, 9504,_L2LossGrad,tensorflow/tensorflow/python/ops/nn_grad.py,1061,function,"Return the gradients for L2Loss. Args: op: The L2LossOp for which we need to generate gradients. grad: Tensor containing a single number. Returns: The gradient, which is (x * grad)." 9505,_TopKGrad,tensorflow/tensorflow/python/ops/nn_grad.py,1076,function,"Return the gradients for TopK. Args: op: The TopKOp for which we need to generate gradients. grad: Tensor. The gradients passed to the TopKOp. Returns: A list of two tensors, the first being the gradient w.r.t to the input and TopK, and the second being the gradient w.r.t. to the indices (all zero)." 9506,_NthElementGrad,tensorflow/tensorflow/python/ops/nn_grad.py,1121,function,"Return the gradients for NthElement. Args: op: The NthElementOp for which we need to generate gradients. grad: Tensor. The gradients passed to the NthElementOp Returns: A list of two tensors, the first being the gradient w.r.t. the input, the second being the gradient w.r.t. the N (None)." 9507,SoftmaxOpTest,tensorflow/tensorflow/python/ops/nn_grad_test.py,38,class, 9508,Relu6OpTest,tensorflow/tensorflow/python/ops/nn_grad_test.py,61,class, 9509,Conv2dOpTest,tensorflow/tensorflow/python/ops/nn_grad_test.py,80,class, 9510,DepthwiseConv2dTest,tensorflow/tensorflow/python/ops/nn_grad_test.py,131,class, 9511,EluGradOpTest,tensorflow/tensorflow/python/ops/nn_grad_test.py,186,class, 9512,SeluGradOpTest,tensorflow/tensorflow/python/ops/nn_grad_test.py,223,class, 9513,log_poisson_loss,tensorflow/tensorflow/python/ops/nn_impl.py,49,function,"Computes log Poisson loss given `log_input`. Gives the log-likelihood loss between the prediction and the target under the assumption that the target has a Poisson distribution. Caveat: By default, this is not the exact loss, but the loss minus a constant term [log(z!)]. That has no effect for optimization, but does not play well with relative loss comparisons. To compute an approximation of the log factorial term, specify compute_full_loss=True to enable Stirling's Approximation. For brevity, let `c = log(x) = log_input`, `z = targets`. The log Poisson loss is -log(exp(-x) * (x^z) / z!) = -log(exp(-x) * (x^z)) + log(z!) ~ -log(exp(-x)) - log(x^z) [+ z * log(z) - z + 0.5 * log(2 * pi * z)] [ Note the second term is the Stirling's Approximation for log(z!). It is invariant to x and does not affect optimization, though important for correct relative loss comparisons. It is only computed when compute_full_loss == True. ] = x - z * log(x) [+ z * log(z) - z + 0.5 * log(2 * pi * z)] = exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)] Args: targets: A `Tensor` of the same type and shape as `log_input`. log_input: A `Tensor` of type `float32` or `float64`. compute_full_loss: whether to compute the full loss. If false, a constant term is dropped in favor of more efficient optimization. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `log_input` with the componentwise logistic losses. Raises: ValueError: If `log_input` and `targets` do not have the same shape." 9514,sigmoid_cross_entropy_with_logits,tensorflow/tensorflow/python/ops/nn_impl.py,115,function,"Computes sigmoid cross entropy given `logits`. Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. For brevity, let `x = logits`, `z = labels`. The logistic loss is z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) For x < 0, to avoid overflow in exp(-x), we reformulate the above x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) = - x * z + log(1 + exp(x)) Hence, to ensure stability and avoid overflow, the implementation uses this equivalent formulation max(x, 0) - x * z + log(1 + exp(-abs(x))) `logits` and `labels` must have the same type and shape. Args: _sentinel: Used to prevent positional parameters. Internal, do not use. labels: A `Tensor` of the same type and shape as `logits`. logits: A `Tensor` of type `float32` or `float64`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise logistic losses. Raises: ValueError: If `logits` and `labels` do not have the same shape." 9515,sigmoid_cross_entropy_with_logits_v2,tensorflow/tensorflow/python/ops/nn_impl.py,198,function,"Computes sigmoid cross entropy given `logits`. Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. For brevity, let `x = logits`, `z = labels`. The logistic loss is z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) For x < 0, to avoid overflow in exp(-x), we reformulate the above x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) = - x * z + log(1 + exp(x)) Hence, to ensure stability and avoid overflow, the implementation uses this equivalent formulation max(x, 0) - x * z + log(1 + exp(-abs(x))) `logits` and `labels` must have the same type and shape. Args: labels: A `Tensor` of the same type and shape as `logits`. logits: A `Tensor` of type `float32` or `float64`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise logistic losses. Raises: ValueError: If `logits` and `labels` do not have the same shape." 9516,weighted_cross_entropy_with_logits_v2,tensorflow/tensorflow/python/ops/nn_impl.py,249,function,"Computes a weighted cross entropy. This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`, allows one to trade off recall and precision by up- or down-weighting the cost of a positive error relative to a negative error. The usual cross-entropy cost is defined as: labels * -log(sigmoid(logits)) + (1 - labels) * -log(1 - sigmoid(logits)) A value `pos_weight > 1` decreases the false negative count, hence increasing the recall. Conversely setting `pos_weight < 1` decreases the false positive count and increases the precision. This can be seen from the fact that `pos_weight` is introduced as a multiplicative coefficient for the positive labels term in the loss expression: labels * -log(sigmoid(logits)) * pos_weight + (1 - labels) * -log(1 - sigmoid(logits)) For brevity, let `x = logits`, `z = labels`, `q = pos_weight`. The loss is: qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x)) = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow, the implementation uses (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) `logits` and `labels` must have the same type and shape. Args: labels: A `Tensor` of the same type and shape as `logits`. logits: A `Tensor` of type `float32` or `float64`. pos_weight: A coefficient to use on the positive examples. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise weighted logistic losses. Raises: ValueError: If `logits` and `labels` do not have the same shape." 9517,weighted_cross_entropy_with_logits,tensorflow/tensorflow/python/ops/nn_impl.py,329,function,"Computes a weighted cross entropy. This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`, allows one to trade off recall and precision by up- or down-weighting the cost of a positive error relative to a negative error. The usual cross-entropy cost is defined as: labels * -log(sigmoid(logits)) + (1 - labels) * -log(1 - sigmoid(logits)) A value `pos_weight > 1` decreases the false negative count, hence increasing the recall. Conversely setting `pos_weight < 1` decreases the false positive count and increases the precision. This can be seen from the fact that `pos_weight` is introduced as a multiplicative coefficient for the positive labels term in the loss expression: labels * -log(sigmoid(logits)) * pos_weight + (1 - labels) * -log(1 - sigmoid(logits)) For brevity, let `x = logits`, `z = labels`, `q = pos_weight`. The loss is: qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x)) = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow, the implementation uses (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) `logits` and `labels` must have the same type and shape. Args: labels: A `Tensor` of the same type and shape as `logits`. logits: A `Tensor` of type `float32` or `float64`. pos_weight: A coefficient to use on the positive examples. name: A name for the operation (optional). targets: Deprecated alias for labels. Returns: A `Tensor` of the same shape as `logits` with the componentwise weighted logistic losses. Raises: ValueError: If `logits` and `labels` do not have the same shape." 9518,compute_average_loss,tensorflow/tensorflow/python/ops/nn_impl.py,393,function,"Scales per-example losses with sample_weights and computes their average. Usage with distribution strategy and custom training loop: ```python with strategy.scope(): def compute_loss(labels, predictions, sample_weight=None): # If you are using a `Loss` class instead, set reduction to `NONE` so that # we can do the reduction afterwards and divide by global batch size. per_example_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, predictions) # Compute loss that is scaled by sample_weight and by global batch size. return tf.nn.compute_average_loss( per_example_loss, sample_weight=sample_weight, global_batch_size=GLOBAL_BATCH_SIZE) ``` Args: per_example_loss: Per-example loss. sample_weight: Optional weighting for each example. global_batch_size: Optional global batch size value. Defaults to (size of first dimension of `losses`) * (number of replicas). Returns: Scalar loss value." 9519,scale_regularization_loss,tensorflow/tensorflow/python/ops/nn_impl.py,451,function,"Scales the sum of the given regularization losses by number of replicas. Usage with distribution strategy and custom training loop: ```python with strategy.scope(): def compute_loss(self, label, predictions): per_example_loss = tf.keras.losses.sparse_categorical_crossentropy( labels, predictions) # Compute loss that is scaled by sample_weight and by global batch size. loss = tf.nn.compute_average_loss( per_example_loss, sample_weight=sample_weight, global_batch_size=GLOBAL_BATCH_SIZE) # Add scaled regularization losses. loss += tf.nn.scale_regularization_loss(tf.nn.l2_loss(weights)) return loss ``` Args: regularization_loss: Regularization loss. Returns: Scalar loss value." 9520,relu_layer,tensorflow/tensorflow/python/ops/nn_impl.py,490,function,"Computes Relu(x * weight + biases). Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified ""nn_relu_layer"" is used. Returns: A 2-D Tensor computing relu(matmul(x, weights) + biases). Dimensions typically: batch, out_units." 9521,swish,tensorflow/tensorflow/python/ops/nn_impl.py,515,function,"Computes the SiLU or Swish activation function: `x * sigmoid(x)`. The SiLU activation function was introduced in ""Gaussian Error Linear Units (GELUs)"" [Hendrycks et al. 2016](https://arxiv.org/abs/1606.08415) and ""Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning"" [Elfwing et al. 2017](https://arxiv.org/abs/1702.03118) and was independently discovered (and called swish) in ""Searching for Activation Functions"" [Ramachandran et al. 2017](https://arxiv.org/abs/1710.05941) Args: features: A `Tensor` representing preactivation values. name: A name for the operation (optional). Returns: The activation value." 9522,normalize,tensorflow/tensorflow/python/ops/nn_impl.py,557,function,"Normalizes `tensor` along dimension `axis` using specified norm. This uses `tf.linalg.norm` to compute the norm along `axis`. This function can compute several different vector norms (the 1-norm, the Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and matrix norms (Frobenius, 1-norm, 2-norm and inf-norm). Args: tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128` ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`, `1`, `2`, `np.inf` and any positive real number yielding the corresponding p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if `tensor` is a matrix and equivalent to 2-norm for vectors. Some restrictions apply: a) The Frobenius norm `'fro'` is not defined for vectors, b) If axis is a 2-tuple (matrix norm), only `'euclidean'`, '`fro'`, `1`, `2`, `np.inf` are supported. See the description of `axis` on how to compute norms for a batch of vectors or matrices stored in a tensor. axis: If `axis` is `None` (the default), the input is considered a vector and a single vector norm is computed over the entire set of values in the tensor, i.e. `norm(tensor, ord=ord)` is equivalent to `norm(reshape(tensor, [-1]), ord=ord)`. If `axis` is a Python integer, the input is considered a batch of vectors, and `axis` determines the axis in `tensor` over which to compute vector norms. If `axis` is a 2-tuple of Python integers it is considered a batch of matrices and `axis` determines the axes in `tensor` over which to compute a matrix norm. Negative indices are supported. Example: If you are passing a tensor that can be either a matrix or a batch of matrices at runtime, pass `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are computed. name: The name of the op. Returns: normalized: A normalized `Tensor` with the same shape as `tensor`. norm: The computed norms with the same shape and dtype `tensor` but the final axis is 1 instead. Same as running `tf.cast(tf.linalg.norm(tensor, ord, axis keepdims=True), tensor.dtype)`. Raises: ValueError: If `ord` or `axis` is invalid." 9523,l2_normalize,tensorflow/tensorflow/python/ops/nn_impl.py,611,function,"Normalizes along dimension `axis` using an L2 norm. For a 1-D tensor with `axis = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `axis`. Args: x: A `Tensor`. axis: Dimension along which to normalize. A scalar or a vector of integers. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). dim: Deprecated alias for axis. Returns: A `Tensor` with the same shape as `x`." 9524,l2_normalize_v2,tensorflow/tensorflow/python/ops/nn_impl.py,639,function,"Normalizes along dimension `axis` using an L2 norm. For a 1-D tensor with `axis = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `axis`. Args: x: A `Tensor`. axis: Dimension along which to normalize. A scalar or a vector of integers. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`." 9525,_count_nonzero,tensorflow/tensorflow/python/ops/nn_impl.py,676,function,"Same as math_ops.count_nonzero. The reduction is done in dtype, which can be faster for 32-bit dtypes. Args: input_tensor: numeric tensor dtype: reduction dtype Returns: number of nonzero values with type dtype" 9526,zero_fraction,tensorflow/tensorflow/python/ops/nn_impl.py,699,function,"Returns the fraction of zeros in `value`. If `value` is empty, the result is `nan`. This is useful in summaries to measure and report sparsity. For example, ```python z = tf.nn.relu(...) summ = tf.compat.v1.summary.scalar('sparsity', tf.nn.zero_fraction(z)) ``` Args: value: A tensor of numeric type. name: A name for the operation (optional). Returns: The fraction of zeros in `value`, with type `float32`." 9527,depthwise_conv2d,tensorflow/tensorflow/python/ops/nn_impl.py,742,function,"Depthwise 2-D convolution. Given a 4D input tensor ('NHWC' or 'NCHW' data formats) and a filter tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]` containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. The output has `in_channels * channel_multiplier` channels. In detail, with the default NHWC format, output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di, strides[2] * j + rate[1] * dj, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. If any value in `rate` is greater than 1, we perform atrous depthwise convolution, in which case all values in the `strides` tensor must be equal to 1. Usage Example: >>> x = np.array([ ... [1., 2.], ... [3., 4.], ... [5., 6.] ... ], dtype=np.float32).reshape((1, 3, 2, 1)) >>> kernel = np.array([ ... [1., 2.], ... [3., 4] ... ], dtype=np.float32).reshape((2, 1, 1, 2)) >>> tf.compat.v1.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], ... padding='VALID').numpy() array([[[[10., 14.], [14., 20.]], [[18., 26.], [22., 32.]]]], dtype=float32) >>> tf.compat.v1.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], ... padding=[[0, 0], [1, 0], [1, 0], [0, 0]] ... ).numpy() array([[[[ 0., 0.], [ 3., 4.], [ 6., 8.]], [[ 0., 0.], [10., 14.], [14., 20.]], [[ 0., 0.], [18., 26.], [22., 32.]]]], dtype=float32) Args: input: 4-D with shape according to `data_format`. filter: 4-D with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. strides: 1-D of size 4. The stride of the sliding window for each dimension of `input`. padding: Controls how to pad the image before applying the convolution. Can be the string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. rate: 1-D of size 2. The dilation rate in which we sample input values across the `height` and `width` dimensions in atrous convolution. If it is greater than 1, then all values of strides must be 1. name: A name for this operation (optional). data_format: The data format for input. Either ""NHWC"" (default) or ""NCHW"". dilations: Alias of rate. Returns: A 4-D `Tensor` with shape according to `data_format`. E.g., for ""NHWC"" format, shape is `[batch, out_height, out_width, in_channels * channel_multiplier].`" 9528,depthwise_conv2d_v2,tensorflow/tensorflow/python/ops/nn_impl.py,871,function,"Depthwise 2-D convolution. Given a 4D input tensor ('NHWC' or 'NCHW' data formats) and a filter tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]` containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. The output has `in_channels * channel_multiplier` channels. In detail, with the default NHWC format, output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di, strides[2] * j + rate[1] * dj, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. If any value in `rate` is greater than 1, we perform atrous depthwise convolution, in which case all values in the `strides` tensor must be equal to 1. Usage Example: >>> x = np.array([ ... [1., 2.], ... [3., 4.], ... [5., 6.] ... ], dtype=np.float32).reshape((1, 3, 2, 1)) >>> kernel = np.array([ ... [1., 2.], ... [3., 4] ... ], dtype=np.float32).reshape((2, 1, 1, 2)) >>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], ... padding='VALID').numpy() array([[[[10., 14.], [14., 20.]], [[18., 26.], [22., 32.]]]], dtype=float32) >>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], ... padding=[[0, 0], [1, 0], [1, 0], [0, 0]]).numpy() array([[[[ 0., 0.], [ 3., 4.], [ 6., 8.]], [[ 0., 0.], [10., 14.], [14., 20.]], [[ 0., 0.], [18., 26.], [22., 32.]]]], dtype=float32) Args: input: 4-D with shape according to `data_format`. filter: 4-D with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. strides: 1-D of size 4. The stride of the sliding window for each dimension of `input`. padding: Controls how to pad the image before applying the convolution. Can be the string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: The data format for input. Either ""NHWC"" (default) or ""NCHW"". dilations: 1-D of size 2. The dilation rate in which we sample input values across the `height` and `width` dimensions in atrous convolution. If it is greater than 1, then all values of strides must be 1. name: A name for this operation (optional). Returns: A 4-D `Tensor` with shape according to `data_format`. E.g., for ""NHWC"" format, shape is `[batch, out_height, out_width, in_channels * channel_multiplier].`" 9529,separable_conv2d,tensorflow/tensorflow/python/ops/nn_impl.py,969,function,"2-D convolution with separable filters. Performs a depthwise convolution that acts separately on channels followed by a pointwise convolution that mixes channels. Note that this is separability between dimensions `[1, 2]` and `3`, not spatial separability between dimensions `1` and `2`. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q, r} input[b, strides[1] * i + di, strides[2] * j + dj, q] * depthwise_filter[di, dj, q, r] * pointwise_filter[0, 0, q * channel_multiplier + r, k] `strides` controls the strides for the depthwise convolution only, since the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. If any value in `rate` is greater than 1, we perform atrous depthwise convolution, in which case all values in the `strides` tensor must be equal to 1. Args: input: 4-D `Tensor` with shape according to `data_format`. depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. Contains `in_channels` convolutional filters of depth 1. pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise filter to mix channels after `depthwise_filter` has convolved spatially. strides: 1-D of size 4. The strides for the depthwise convolution for each dimension of `input`. padding: Controls how to pad the image before applying the depthwise convolution. Can be the string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a Python list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. rate: 1-D of size 2. The dilation rate in which we sample input values across the `height` and `width` dimensions in atrous convolution. If it is greater than 1, then all values of strides must be 1. name: A name for this operation (optional). data_format: The data format for input. Either ""NHWC"" (default) or ""NCHW"". dilations: Alias of rate. Returns: A 4-D `Tensor` with shape according to 'data_format'. For example, with data_format=""NHWC"", shape is [batch, out_height, out_width, out_channels]." 9530,separable_conv2d_v2,tensorflow/tensorflow/python/ops/nn_impl.py,1077,function,"2-D convolution with separable filters. Performs a depthwise convolution that acts separately on channels followed by a pointwise convolution that mixes channels. Note that this is separability between dimensions `[1, 2]` and `3`, not spatial separability between dimensions `1` and `2`. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q, r} input[b, strides[1] * i + di, strides[2] * j + dj, q] * depthwise_filter[di, dj, q, r] * pointwise_filter[0, 0, q * channel_multiplier + r, k] `strides` controls the strides for the depthwise convolution only, since the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. If any value in `rate` is greater than 1, we perform atrous depthwise convolution, in which case all values in the `strides` tensor must be equal to 1. Args: input: 4-D `Tensor` with shape according to `data_format`. depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. Contains `in_channels` convolutional filters of depth 1. pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise filter to mix channels after `depthwise_filter` has convolved spatially. strides: 1-D of size 4. The strides for the depthwise convolution for each dimension of `input`. padding: Controls how to pad the image before applying the depthwise convolution. Can be the string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a Python list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: The data format for input. Either ""NHWC"" (default) or ""NCHW"". dilations: 1-D of size 2. The dilation rate in which we sample input values across the `height` and `width` dimensions in atrous convolution. If it is greater than 1, then all values of strides must be 1. name: A name for this operation (optional). Returns: A 4-D `Tensor` with shape according to 'data_format'. For example, with data_format=""NHWC"", shape is [batch, out_height, out_width, out_channels]." 9531,sufficient_statistics,tensorflow/tensorflow/python/ops/nn_impl.py,1153,function,"Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data For example: >>> t = [[1, 2, 3], [4, 5, 6]] >>> sufficient_statistics(t, [1]) (, , , None) >>> sufficient_statistics(t, [-1]) (, , , None) Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. As in Python, the axes can also be negative numbers. A negative axis is interpreted as counting from the end of the rank, i.e., axis + rank(values)-th dimension. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. keepdims: Alias for keep_dims. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None." 9532,sufficient_statistics_v2,tensorflow/tensorflow/python/ops/nn_impl.py,1228,function,"Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keepdims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None." 9533,normalize_moments,tensorflow/tensorflow/python/ops/nn_impl.py,1258,function,"Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`." 9534,moments,tensorflow/tensorflow/python/ops/nn_impl.py,1291,function,"Calculate the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. Note: shift is currently not used; the true mean is computed and used. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called ""global normalization"", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: Not used in the current implementation name: Name used to scope the operations that compute the moments. keep_dims: produce moments with the same dimensionality as the input. keepdims: Alias to keep_dims. Returns: Two `Tensor` objects: `mean` and `variance`." 9535,moments_v2,tensorflow/tensorflow/python/ops/nn_impl.py,1357,function,"Calculates the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. Note: shift is currently not used; the true mean is computed and used. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called ""global normalization"", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: Not used in the current implementation. keepdims: produce moments with the same dimensionality as the input. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`." 9536,weighted_moments,tensorflow/tensorflow/python/ops/nn_impl.py,1394,function,"Returns the frequency-weighted mean and variance of `x`. Args: x: A tensor. axes: 1-d tensor of int32 values; these are the axes along which to compute mean and variance. frequency_weights: A tensor of positive weights which can be broadcast with x. name: Name used to scope the operation. keep_dims: Produce moments with the same dimensionality as the input. keepdims: Alias of keep_dims. Returns: Two tensors: `weighted_mean` and `weighted_variance`." 9537,weighted_moments_v2,tensorflow/tensorflow/python/ops/nn_impl.py,1473,function,"Returns the frequency-weighted mean and variance of `x`. Args: x: A tensor. axes: 1-d tensor of int32 values; these are the axes along which to compute mean and variance. frequency_weights: A tensor of positive weights which can be broadcast with x. keepdims: Produce moments with the same dimensionality as the input. name: Name used to scope the operation. Returns: Two tensors: `weighted_mean` and `weighted_variance`." 9538,batch_normalization,tensorflow/tensorflow/python/ops/nn_impl.py,1498,function,"Batch normalization. Normalizes a tensor by `mean` and `variance`, and applies (optionally) a `scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\): \\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\) `mean`, `variance`, `offset` and `scale` are all expected to be of one of two shapes: * In all generality, they can have the same number of dimensions as the input `x`, with identical sizes as `x` for the dimensions that are not normalized over (the 'depth' dimension(s)), and dimension 1 for the others which are being normalized over. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keepdims=True)` during training, or running averages thereof during inference. * In the common case where the 'depth' dimension is the last dimension in the input tensor `x`, they may be one dimensional tensors of the same size as the 'depth' dimension. This is the case for example for the common `[batch, depth]` layout of fully-connected layers, and `[batch, height, width, depth]` for convolutions. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keepdims=False)` during training, or running averages thereof during inference. See equation 11 in Algorithm 2 of source: [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy] (http://arxiv.org/abs/1502.03167). Args: x: Input `Tensor` of arbitrary dimensionality. mean: A mean `Tensor`. variance: A variance `Tensor`. offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or None. If present, will be added to the normalized tensor. scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or `None`. If present, the scale is applied to the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. name: A name for this operation (optional). Returns: the normalized, scaled, offset tensor. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://arxiv.org/abs/1502.03167) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))" 9539,fused_batch_norm,tensorflow/tensorflow/python/ops/nn_impl.py,1569,function,"Batch normalization. See Source: [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift; S. Ioffe, C. Szegedy] (http://arxiv.org/abs/1502.03167). Args: x: Input `Tensor` of 4 dimensions. scale: A `Tensor` of 1 dimension for scaling. offset: A `Tensor` of 1 dimension for bias. mean: A `Tensor` of 1 dimension for population mean. The shape and meaning of this argument depends on the value of is_training and exponential_avg_factor as follows: is_training==False (inference): Mean must be a `Tensor` of the same shape as scale containing the estimated population mean computed during training. is_training==True and exponential_avg_factor == 1.0: Mean must be None. is_training==True and exponential_avg_factor != 1.0: Mean must be a `Tensor` of the same shape as scale containing the exponential running mean. variance: A `Tensor` of 1 dimension for population variance. The shape and meaning of this argument depends on the value of is_training and exponential_avg_factor as follows: is_training==False (inference): Variance must be a `Tensor` of the same shape as scale containing the estimated population variance computed during training. is_training==True and exponential_avg_factor == 1.0: Variance must be None. is_training==True and exponential_avg_factor != 1.0: Variance must be a `Tensor` of the same shape as scale containing the exponential running variance. epsilon: A small float number added to the variance of x. data_format: The data format for x. Either ""NHWC"" (default) or ""NCHW"". is_training: A bool value to specify if the operation is used for training or inference. name: A name for this operation (optional). exponential_avg_factor: A float number (usually between 0 and 1) used for controlling the decay of the running population average of mean and variance. If set to 1.0, the current batch average is returned. Returns: y: A 4D Tensor for the normalized, scaled, offsetted x. running_mean: A 1D Tensor for the exponential running mean of x. The output value is (1 - exponential_avg_factor) * mean + exponential_avg_factor * batch_mean), where batch_mean is the mean of the current batch in x. running_var: A 1D Tensor for the exponential running variance The output value is (1 - exponential_avg_factor) * variance + exponential_avg_factor * batch_variance), where batch_variance is the variance of the current batch in x. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))" 9540,batch_norm_with_global_normalization,tensorflow/tensorflow/python/ops/nn_impl.py,1675,function,"Batch normalization. This op is deprecated. See `tf.nn.batch_normalization`. Args: t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If ""scale_after_normalization"" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. name: A name for this operation (optional). input: Alias for t. mean: Alias for m. variance: Alias for v. Returns: A batch-normalized `t`. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))" 9541,batch_norm_with_global_normalization_v2,tensorflow/tensorflow/python/ops/nn_impl.py,1730,function,"Batch normalization. This op is deprecated. See `tf.nn.batch_normalization`. Args: input: A 4D input Tensor. mean: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. variance: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If ""scale_after_normalization"" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. name: A name for this operation (optional). Returns: A batch-normalized `t`. References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html) ([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))" 9542,_sum_rows,tensorflow/tensorflow/python/ops/nn_impl.py,1780,function,Returns a vector summing up each row of the matrix x. 9543,_compute_sampled_logits,tensorflow/tensorflow/python/ops/nn_impl.py,1793,function,"Helper function for nce_loss and sampled_softmax_loss functions. Computes sampled output training logits and labels suitable for implementing e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see sampled_softmax_loss). Note: In the case where num_true > 1, we assign to each target class the target probability 1 / num_true so that the target probabilities sum to 1 per-example. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape `[num_classes, dim]`. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The (possibly-partitioned) class biases. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) subtract_log_q: A `bool`. whether to subtract the log expected count of the labels in the sample to get the logits of the true labels. Default is True. Turn off for Negative Sampling. remove_accidental_hits: A `bool`. whether to remove ""accidental hits"" where a sampled class equals one of the target classes. Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `""div""` and `""mod""` are supported. Default is `""mod""`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). seed: random seed for candidate sampling. Default to None, which doesn't set the op-level random seed for candidate sampling. Returns: out_logits: `Tensor` object with shape `[batch_size, num_true + num_sampled]`, for passing to either `nn.sigmoid_cross_entropy_with_logits` (NCE) or `nn.softmax_cross_entropy_with_logits` (sampled softmax). out_labels: A Tensor object with the same shape as `out_logits`." 9544,nce_loss_v2,tensorflow/tensorflow/python/ops/nn_impl.py,1980,function,"Computes and returns the noise-contrastive estimation training loss. See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). Also see our [Candidate Sampling Algorithms Reference](https://www.tensorflow.org/extras/candidate_sampling.pdf) A common use case is to use this method for training, and calculate the full sigmoid loss for evaluation or inference as in the following example: ```python if mode == ""train"": loss = tf.nn.nce_loss( weights=weights, biases=biases, labels=labels, inputs=inputs, ...) elif mode == ""eval"": logits = tf.matmul(inputs, tf.transpose(weights)) logits = tf.nn.bias_add(logits, biases) labels_one_hot = tf.one_hot(labels, n_classes) loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=labels_one_hot, logits=logits) loss = tf.reduce_sum(loss, axis=1) ``` Note: when doing embedding lookup on `weights` and `bias`, ""div"" partition strategy will be used. Support for other partition strategy will be added later. Note: By default this uses a log-uniform (Zipfian) distribution for sampling, so your labels must be sorted in order of decreasing frequency to achieve good results. For more details, see `tf.random.log_uniform_candidate_sampler`. Note: In the case where `num_true` > 1, we assign to each target class the target probability 1 / `num_true` so that the target probabilities sum to 1 per-example. Note: It would be useful to allow a variable number of target classes per example. We hope to provide this functionality in a future release. For now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled: An `int`. The number of negative classes to randomly sample per batch. This single sample of negative classes is evaluated for each element in the batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. Whether to remove ""accidental hits"" where a sampled class equals one of the target classes. If set to `True`, this is a ""Sampled Logistic"" loss instead of NCE, and we are learning to generate log-odds instead of log probabilities. See our [Candidate Sampling Algorithms Reference] (https://www.tensorflow.org/extras/candidate_sampling.pdf). Default is False. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example NCE losses." 9545,nce_loss,tensorflow/tensorflow/python/ops/nn_impl.py,2085,function,"Computes and returns the noise-contrastive estimation training loss. A common use case is to use this method for training, and calculate the full sigmoid loss for evaluation or inference. In this case, you must set `partition_strategy=""div""` for the two losses to be consistent, as in the following example: ```python if mode == ""train"": loss = tf.nn.nce_loss( weights=weights, biases=biases, labels=labels, inputs=inputs, ..., partition_strategy=""div"") elif mode == ""eval"": logits = tf.matmul(inputs, tf.transpose(weights)) logits = tf.nn.bias_add(logits, biases) labels_one_hot = tf.one_hot(labels, n_classes) loss = tf.nn.sigmoid_cross_entropy_with_logits( labels=labels_one_hot, logits=logits) loss = tf.reduce_sum(loss, axis=1) ``` Note: By default this uses a log-uniform (Zipfian) distribution for sampling, so your labels must be sorted in order of decreasing frequency to achieve good results. For more details, see `tf.random.log_uniform_candidate_sampler`. Note: In the case where `num_true` > 1, we assign to each target class the target probability 1 / `num_true` so that the target probabilities sum to 1 per-example. Note: It would be useful to allow a variable number of target classes per example. We hope to provide this functionality in a future release. For now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled: An `int`. The number of negative classes to randomly sample per batch. This single sample of negative classes is evaluated for each element in the batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. Whether to remove ""accidental hits"" where a sampled class equals one of the target classes. If set to `True`, this is a ""Sampled Logistic"" loss instead of NCE, and we are learning to generate log-odds instead of log probabilities. See our Candidate Sampling Algorithms Reference ([pdf](https://www.tensorflow.org/extras/candidate_sampling.pdf)). Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `""div""` and `""mod""` are supported. Default is `""mod""`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example NCE losses. References: Noise-contrastive estimation - A new estimation principle for unnormalized statistical models: [Gutmann et al., 2010](http://proceedings.mlr.press/v9/gutmann10a) ([pdf](http://proceedings.mlr.press/v9/gutmann10a/gutmann10a.pdf))" 9546,sampled_softmax_loss_v2,tensorflow/tensorflow/python/ops/nn_impl.py,2197,function,"Computes and returns the sampled softmax training loss. This is a faster way to train a softmax classifier over a huge number of classes. This operation is for training only. It is generally an underestimate of the full softmax loss. A common use case is to use this method for training, and calculate the full sigmoid loss for evaluation or inference as in the following example: ```python if mode == ""train"": loss = tf.nn.sampled_softmax_loss( weights=weights, biases=biases, labels=labels, inputs=inputs, ...) elif mode == ""eval"": logits = tf.matmul(inputs, tf.transpose(weights)) logits = tf.nn.bias_add(logits, biases) labels_one_hot = tf.one_hot(labels, n_classes) loss = tf.nn.softmax_cross_entropy_with_logits( labels=labels_one_hot, logits=logits) ``` See our [Candidate Sampling Algorithms Reference] (https://www.tensorflow.org/extras/candidate_sampling.pdf) Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007) ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math. Note: when doing embedding lookup on `weights` and `bias`, ""div"" partition strategy will be used. Support for other partition strategy will be added later. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-sharded) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. whether to remove ""accidental hits"" where a sampled class equals one of the target classes. Default is True. seed: random seed for candidate sampling. Default to None, which doesn't set the op-level random seed for candidate sampling. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example sampled softmax losses." 9547,sampled_softmax_loss,tensorflow/tensorflow/python/ops/nn_impl.py,2289,function,"Computes and returns the sampled softmax training loss. This is a faster way to train a softmax classifier over a huge number of classes. This operation is for training only. It is generally an underestimate of the full softmax loss. A common use case is to use this method for training, and calculate the full softmax loss for evaluation or inference. In this case, you must set `partition_strategy=""div""` for the two losses to be consistent, as in the following example: ```python if mode == ""train"": loss = tf.nn.sampled_softmax_loss( weights=weights, biases=biases, labels=labels, inputs=inputs, ..., partition_strategy=""div"") elif mode == ""eval"": logits = tf.matmul(inputs, tf.transpose(weights)) logits = tf.nn.bias_add(logits, biases) labels_one_hot = tf.one_hot(labels, n_classes) loss = tf.nn.softmax_cross_entropy_with_logits( labels=labels_one_hot, logits=logits) ``` See our Candidate Sampling Algorithms Reference ([pdf](https://www.tensorflow.org/extras/candidate_sampling.pdf)). Also see Section 3 of (Jean et al., 2014) for the math. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-sharded) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. whether to remove ""accidental hits"" where a sampled class equals one of the target classes. Default is True. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `""div""` and `""mod""` are supported. Default is `""mod""`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). seed: random seed for candidate sampling. Default to None, which doesn't set the op-level random seed for candidate sampling. Returns: A `batch_size` 1-D tensor of per-example sampled softmax losses. References: On Using Very Large Target Vocabulary for Neural Machine Translation: [Jean et al., 2014] (https://aclanthology.coli.uni-saarland.de/papers/P15-1001/p15-1001) ([pdf](http://aclweb.org/anthology/P15-1001))" 9548,LossUtilitiesTest,tensorflow/tensorflow/python/ops/nn_loss_scaling_utilities_test.py,35,class, 9549,_get_sequence,tensorflow/tensorflow/python/ops/nn_ops.py,68,function,Formats a value input for gen_nn_ops. 9550,_non_atrous_convolution,tensorflow/tensorflow/python/ops/nn_ops.py,107,function,"Computes sums of N-D convolutions (actually cross correlation). It is required that 1 <= N <= 3. This is used to implement the more generic `convolution` function, which extends the interface of this function with a `dilation_rate` parameter. Args: input: Rank N+2 tensor of type T of shape `[batch_size] + input_spatial_shape + [in_channels]` if `data_format` does not start with `""NC""`, or `[batch_size, in_channels] + input_spatial_shape` if `data_format` starts with `""NC""`. filter: Rank N+2 tensor of type T of shape `filter_spatial_shape + [in_channels, out_channels]`. Rank of either `input` or `filter` must be known. padding: Padding method to use, must be either ""VALID"" or ""SAME"". data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with ""NC""), or the second dimension (if `data_format` starts with ""NC""). For N=1, the valid values are ""NWC"" (default) and ""NCW"". For N=2, the valid values are ""NHWC"" (default) and ""NCHW"". For N=3, the valid values are ""NDHWC"" (default) and ""NCDHW"". strides: Sequence of N positive integers, defaults to `[1] * N`. name: Name prefix to use. Returns: Rank N+2 tensor of type T of shape `[batch_size] + output_spatial_shape + [out_channels]`, where if padding == ""SAME"": output_spatial_shape = input_spatial_shape if padding == ""VALID"": output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1. Raises: ValueError: if ranks are incompatible." 9551,_NonAtrousConvolution,tensorflow/tensorflow/python/ops/nn_ops.py,168,class,"Helper class for _non_atrous_convolution. Note that this class assumes that shapes of input and filter passed to `__call__` are compatible with `input_shape` and filter_shape passed to the constructor. Arguments: input_shape: static input shape, i.e. input.shape. filter_shape: static filter shape, i.e. filter.shape. padding: see _non_atrous_convolution. data_format: see _non_atrous_convolution. strides: see _non_atrous_convolution. name: see _non_atrous_convolution. num_batch_dims: (Optional.) The number of batch dimensions in the input; if not provided, the default of `1` is used." 9552,squeeze_batch_dims,tensorflow/tensorflow/python/ops/nn_ops.py,277,function,"Returns `unsqueeze_batch(op(squeeze_batch(inp)))`. Where `squeeze_batch` reshapes `inp` to shape `[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]` and `unsqueeze_batch` does the reverse reshape but on the output. Args: inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape` is length `inner_rank`. op: A callable that takes a single input tensor and returns a single. output tensor. inner_rank: A python integer. name: A string. Returns: `unsqueeze_batch_op(squeeze_batch(inp))`." 9553,dilation2d_v2,tensorflow/tensorflow/python/ops/nn_ops.py,328,function,"Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors. The `input` tensor has shape `[batch, in_height, in_width, depth]` and the `filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default ""NHWC"" `data_format`. In detail, the grayscale morphological 2-D dilation is the max-sum correlation (for consistency with `conv2d`, we use unmirrored filters): output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filters[dy, dx, c] Max-pooling is a special case when the filter has size equal to the pooling kernel size and contains all zeros. Note on duality: The dilation of `input` by the `filters` is equal to the negation of the erosion of `-input` by the reflected `filters`. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D with shape `[batch, in_height, in_width, depth]`. filters: A `Tensor`. Must have the same type as `input`. 3-D with shape `[filter_height, filter_width, depth]`. strides: A list of `ints` that has length `>= 4`. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. padding: A `string` from: `""SAME"", ""VALID""`. The type of padding algorithm to use. data_format: A `string`, only `""NHWC""` is currently supported. dilations: A list of `ints` that has length `>= 4`. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`." 9554,dilation2d_v1,tensorflow/tensorflow/python/ops/nn_ops.py,396,function, 9555,with_space_to_batch,tensorflow/tensorflow/python/ops/nn_ops.py,415,function,"Performs `op` on the space-to-batch representation of `input`. This has the effect of transforming sliding window operations into the corresponding ""atrous"" operation in which the input is sampled at the specified `dilation_rate`. In the special case that `dilation_rate` is uniformly 1, this simply returns: op(input, num_spatial_dims, padding) Otherwise, it returns: batch_to_space_nd( op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings), num_spatial_dims, ""VALID"") adjusted_dilation_rate, adjusted_crops), where: adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)], adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2] defined as follows: We first define two int64 tensors `paddings` and `crops` of shape `[num_spatial_dims, 2]` based on the value of `padding` and the spatial dimensions of the `input`: If `padding = ""VALID""`, then: paddings, crops = required_space_to_batch_paddings( input_shape[spatial_dims], dilation_rate) If `padding = ""SAME""`, then: dilated_filter_shape = filter_shape + (filter_shape - 1) * (dilation_rate - 1) paddings, crops = required_space_to_batch_paddings( input_shape[spatial_dims], dilation_rate, [(dilated_filter_shape - 1) // 2, dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2]) Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial dimensions are contiguous starting at the second dimension, but the specified `spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and `crops` in order to be usable with these operations. For a given dimension, if the block size is 1, and both the starting and ending padding and crop amounts are 0, then space_to_batch_nd effectively leaves that dimension alone, which is what is needed for dimensions not part of `spatial_dims`. Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case efficiently for any number of leading and trailing dimensions. For 0 <= i < len(spatial_dims), we assign: adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i] adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :] adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :] All unassigned values of `adjusted_dilation_rate` default to 1, while all unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0. Note in the case that `dilation_rate` is not uniformly 1, specifying ""VALID"" padding is equivalent to specifying `padding = ""SAME""` with a filter_shape of `[1]*N`. Advanced usage. Note the following optimization: A sequence of `with_space_to_batch` operations with identical (not uniformly 1) `dilation_rate` parameters and ""VALID"" padding net = with_space_to_batch(net, dilation_rate, ""VALID"", op_1) ... net = with_space_to_batch(net, dilation_rate, ""VALID"", op_k) can be combined into a single `with_space_to_batch` operation as follows: def combined_op(converted_input, num_spatial_dims, _): result = op_1(converted_input, num_spatial_dims, ""VALID"") ... result = op_k(result, num_spatial_dims, ""VALID"") net = with_space_to_batch(net, dilation_rate, ""VALID"", combined_op) This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and `batch_to_space_nd`. Similarly, a sequence of `with_space_to_batch` operations with identical (not uniformly 1) `dilation_rate` parameters, ""SAME"" padding, and odd filter dimensions net = with_space_to_batch(net, dilation_rate, ""SAME"", op_1, filter_shape_1) ... net = with_space_to_batch(net, dilation_rate, ""SAME"", op_k, filter_shape_k) can be combined into a single `with_space_to_batch` operation as follows: def combined_op(converted_input, num_spatial_dims, _): result = op_1(converted_input, num_spatial_dims, ""SAME"") ... result = op_k(result, num_spatial_dims, ""SAME"") net = with_space_to_batch(net, dilation_rate, ""VALID"", combined_op) Args: input: Tensor of rank > max(spatial_dims). dilation_rate: int32 Tensor of *known* shape [num_spatial_dims]. padding: str constant equal to ""VALID"" or ""SAME"" op: Function that maps (input, num_spatial_dims, padding) -> output filter_shape: If padding = ""SAME"", specifies the shape of the convolution kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims]. If padding = ""VALID"", filter_shape is ignored and need not be specified. spatial_dims: Monotonically increasing sequence of `num_spatial_dims` integers (which are >= 1) specifying the spatial dimensions of `input` and output. Defaults to: `range(1, num_spatial_dims+1)`. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with ""NC""), or the second dimension (if `data_format` starts with ""NC""). For N=1, the valid values are ""NWC"" (default) and ""NCW"". For N=2, the valid values are ""NHWC"" (default) and ""NCHW"". For N=3, the valid values are ""NDHWC"" (default) and ""NCDHW"". Returns: The output Tensor as described above, dimensions will vary based on the op provided. Raises: ValueError: if `padding` is invalid or the arguments are incompatible. ValueError: if `spatial_dims` are invalid." 9556,_WithSpaceToBatch,tensorflow/tensorflow/python/ops/nn_ops.py,574,class,"Helper class for with_space_to_batch. Note that this class assumes that shapes of input and filter passed to `__call__` are compatible with `input_shape`, `filter_shape`, and `spatial_dims` passed to the constructor. Arguments input_shape: static shape of input. i.e. input.shape. dilation_rate: see `with_space_to_batch`. padding: see `with_space_to_batch`. build_op: Function that maps (num_spatial_dims, paddings) -> (function that maps (input, filter) -> output). filter_shape: see `with_space_to_batch`. spatial_dims: `see with_space_to_batch`. data_format: see `with_space_to_batch`. num_batch_dims: (Optional). Number of batch dims in `input_shape`." 9557,_with_space_to_batch_base_paddings,tensorflow/tensorflow/python/ops/nn_ops.py,745,function,Helper function to compute base_paddings. 9558,_with_space_to_batch_adjust,tensorflow/tensorflow/python/ops/nn_ops.py,762,function,"Returns an `adjusted` version of `orig` based on `spatial_dims`. Tensor of the same type as `orig` and with shape `[max(spatial_dims), ...]` where: adjusted[spatial_dims[i] - 1, ...] = orig[i, ...] for 0 <= i < len(spatial_dims), and adjusted[j, ...] = fill_value for j != spatial_dims[i] - 1 for some i. If `orig` is a constant value, then the result will be a constant value. Args: orig: Tensor of rank > max(spatial_dims). fill_value: Numpy scalar (of same data type as `orig) specifying the fill value for non-spatial dimensions. spatial_dims: See with_space_to_batch. Returns: `adjusted` tensor." 9559,_get_strides_and_dilation_rate,tensorflow/tensorflow/python/ops/nn_ops.py,821,function,"Helper function for verifying strides and dilation_rate arguments. This is used by `convolution` and `pool`. Args: num_spatial_dims: int strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. Returns: Normalized (strides, dilation_rate) as int32 numpy arrays of shape [num_spatial_dims]. Raises: ValueError: if the parameters are invalid." 9560,convolution,tensorflow/tensorflow/python/ops/nn_ops.py,866,function,"Computes sums of N-D convolutions (actually cross-correlation). This also supports either output striding via the optional `strides` parameter or atrous convolution (also known as convolution with holes or dilated convolution, based on the French word ""trous"" meaning holes in English) via the optional `dilation_rate` parameter. Currently, however, output striding is not supported for atrous convolutions. Specifically, in the case that `data_format` does not start with ""NC"", given a rank (N+2) `input` Tensor of shape [num_batches, input_spatial_shape[0], ..., input_spatial_shape[N-1], num_input_channels], a rank (N+2) `filter` Tensor of shape [spatial_filter_shape[0], ..., spatial_filter_shape[N-1], num_input_channels, num_output_channels], an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N) specifying the filter upsampling/input downsampling rate, and an optional list of N `strides` (defaulting [1]*N), this computes for each N-D spatial output position (x[0], ..., x[N-1]): ``` output[b, x[0], ..., x[N-1], k] = sum_{z[0], ..., z[N-1], q} filter[z[0], ..., z[N-1], q, k] * padded_input[b, x[0]*strides[0] + dilation_rate[0]*z[0], ..., x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1], q] ``` where b is the index into the batch, k is the output channel number, q is the input channel number, and z is the N-D spatial offset within the filter. Here, `padded_input` is obtained by zero padding the input using an effective spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and output striding `strides` as described in the [comment here](https://tensorflow.org/api_guides/python/nn#Convolution). In the case that `data_format` does start with `""NC""`, the `input` and output (but not the `filter`) are simply transposed as follows: convolution(input, data_format, **kwargs) = tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) It is required that 1 <= N <= 3. Args: input: An (N+2)-D `Tensor` of type `T`, of shape `[batch_size] + input_spatial_shape + [in_channels]` if data_format does not start with ""NC"" (default), or `[batch_size, in_channels] + input_spatial_shape` if data_format starts with ""NC"". filter: An (N+2)-D `Tensor` with the same type as `input` and shape `spatial_filter_shape + [in_channels, out_channels]`. padding: A string, either `""VALID""` or `""SAME""`. The padding algorithm. `""valid""` means no padding. `""same""` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. strides: Optional. Sequence of N ints >= 1. Specifies the output stride. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter upsampling/input downsampling rate. In the literature, the same parameter is sometimes called `input stride` or `dilation`. The effective filter size used for the convolution will be `spatial_filter_shape + (spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting (dilation_rate[i]-1) zeros between consecutive elements of the original filter in each spatial dimension i. If any value of dilation_rate is > 1, then all values of strides must be 1. name: Optional name for the returned tensor. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with ""NC""), or the second dimension (if `data_format` starts with ""NC""). For N=1, the valid values are ""NWC"" (default) and ""NCW"". For N=2, the valid values are ""NHWC"" (default) and ""NCHW"". For N=3, the valid values are ""NDHWC"" (default) and ""NCDHW"". Returns: A `Tensor` with the same type as `input` of shape `[batch_size] + output_spatial_shape + [out_channels]` if data_format is None or does not start with ""NC"", or `[batch_size, out_channels] + output_spatial_shape` if data_format starts with ""NC"", where `output_spatial_shape` depends on the value of `padding`. If padding == ""SAME"": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding == ""VALID"": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (spatial_filter_shape[i]-1) * dilation_rate[i]) / strides[i]). Raises: ValueError: If input/output depth does not match `filter` shape, if padding is other than `""VALID""` or `""SAME""`, or if data_format is invalid." 9561,convolution_v2,tensorflow/tensorflow/python/ops/nn_ops.py,1005,function, 9562,convolution_internal,tensorflow/tensorflow/python/ops/nn_ops.py,1029,function,"Internal function which performs rank agnostic convolution. Args: input: See `convolution`. filters: See `convolution`. strides: See `convolution`. padding: See `convolution`. data_format: See `convolution`. dilations: See `convolution`. name: See `convolution`. call_from_convolution: See `convolution`. num_spatial_dims: (Optional.). It is a integer describing the rank of the spatial dimensions. For `1-D`, `2-D` and `3-D` convolutions, the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively. This argument is only required to disambiguate the rank of `batch_shape` when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For backwards compatibility, if `num_spatial_dims is None` and `filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be `1` (i.e., the input is expected to be `[batch_size, num_channels] + input_spatial_shape` or `[batch_size] + input_spatial_shape + [num_channels]`. Returns: A tensor of shape and dtype matching that of `input`. Raises: ValueError: If input and filter both have unknown shapes, or if `num_spatial_dims` is provided and incompatible with the value estimated from `filters.shape`." 9563,Convolution,tensorflow/tensorflow/python/ops/nn_ops.py,1171,class,"Helper class for convolution. Note that this class assumes that shapes of input and filter passed to `__call__` are compatible with `input_shape`, `filter_shape`, and `num_spatial_dims` passed to the constructor. Arguments input_shape: static shape of input. i.e. input.shape. Its length is `batch_shape + input_spatial_shape + [num_channels]` if `data_format` does not start with `NC`, or `batch_shape + [num_channels] + input_spatial_shape` if `data_format` starts with `NC`. filter_shape: static shape of the filter. i.e. filter.shape. padding: The padding algorithm, must be ""SAME"" or ""VALID"". strides: see convolution. dilation_rate: see convolution. name: see convolution. data_format: A string or `None`. Specifies whether the channel dimension of the `input` and output is the last dimension (if `data_format` is `None` or does not start with `NC`), or the first post-batch dimension (i.e. if `data_format` starts with `NC`). num_spatial_dims: (Usually optional.) Python integer, the rank of the spatial and channel dimensions. For `1-D`, `2-D` and `3-D` convolutions, the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively. This argument is only required to disambiguate the rank of `batch_shape` when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For backwards compatibility, if `num_spatial_dims is None` and `filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be `1` (i.e., the input is expected to be `[batch_size, num_channels] + input_spatial_shape` or `[batch_size] + input_spatial_shape + [num_channels]`." 9564,pool,tensorflow/tensorflow/python/ops/nn_ops.py,1332,function,"Performs an N-D pooling operation. In the case that `data_format` does not start with ""NC"", computes for 0 <= b < batch_size, 0 <= x[i] < output_spatial_shape[i], 0 <= c < num_channels: ``` output[b, x[0], ..., x[N-1], c] = REDUCE_{z[0], ..., z[N-1]} input[b, x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0], ... x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1], c], ``` where the reduction function REDUCE depends on the value of `pooling_type`, and pad_before is defined based on the value of `padding` as described in the ""returns"" section of `tf.nn.convolution` for details. The reduction never includes out-of-bounds positions. In the case that `data_format` starts with `""NC""`, the `input` and output are simply transposed as follows: ``` pool(input, data_format, **kwargs) = tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) ``` Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if data_format does not start with ""NC"" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with ""NC"". Pooling happens over the spatial dimensions only. window_shape: Sequence of N ints >= 1. pooling_type: Specifies pooling operation, must be ""AVG"" or ""MAX"". padding: The padding algorithm, must be ""SAME"" or ""VALID"". See the ""returns"" section of `tf.nn.convolution` for details. dilation_rate: Optional. Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. name: Optional. Name of the op. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with ""NC""), or the second dimension (if `data_format` starts with ""NC""). For N=1, the valid values are ""NWC"" (default) and ""NCW"". For N=2, the valid values are ""NHWC"" (default) and ""NCHW"". For N=3, the valid values are ""NDHWC"" (default) and ""NCDHW"". dilations: Alias for dilation_rate Returns: Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] if data_format is None or does not start with ""NC"", or [batch_size, num_channels] + output_spatial_shape if data_format starts with ""NC"", where `output_spatial_shape` depends on the value of padding: If padding = ""SAME"": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = ""VALID"": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) / strides[i]). Raises: ValueError: if arguments are invalid." 9565,pool_v2,tensorflow/tensorflow/python/ops/nn_ops.py,1507,function,"Performs an N-D pooling operation. In the case that `data_format` does not start with ""NC"", computes for 0 <= b < batch_size, 0 <= x[i] < output_spatial_shape[i], 0 <= c < num_channels: ``` output[b, x[0], ..., x[N-1], c] = REDUCE_{z[0], ..., z[N-1]} input[b, x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0], ... x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1], c], ``` where the reduction function REDUCE depends on the value of `pooling_type`, and pad_before is defined based on the value of `padding` as described in the ""returns"" section of `tf.nn.convolution` for details. The reduction never includes out-of-bounds positions. In the case that `data_format` starts with `""NC""`, the `input` and output are simply transposed as follows: ``` pool(input, data_format, **kwargs) = tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]), **kwargs), [0, N+1] + range(1, N+1)) ``` Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if data_format does not start with ""NC"" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with ""NC"". Pooling happens over the spatial dimensions only. window_shape: Sequence of N ints >= 1. pooling_type: Specifies pooling operation, must be ""AVG"" or ""MAX"". strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1. padding: The padding algorithm, must be ""SAME"" or ""VALID"". Defaults to ""SAME"". See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with ""NC""), or the second dimension (if `data_format` starts with ""NC""). For N=1, the valid values are ""NWC"" (default) and ""NCW"". For N=2, the valid values are ""NHWC"" (default) and ""NCHW"". For N=3, the valid values are ""NDHWC"" (default) and ""NCDHW"". dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1. name: Optional. Name of the op. Returns: Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels] if data_format is None or does not start with ""NC"", or [batch_size, num_channels] + output_spatial_shape if data_format starts with ""NC"", where `output_spatial_shape` depends on the value of padding: If padding = ""SAME"": output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i]) If padding = ""VALID"": output_spatial_shape[i] = ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i]) / strides[i]). Raises: ValueError: if arguments are invalid." 9566,atrous_conv2d,tensorflow/tensorflow/python/ops/nn_ops.py,1607,function,"Atrous convolution (a.k.a. convolution with holes or dilated convolution). This function is a simpler wrapper around the more general `tf.nn.convolution`, and exists only for backwards compatibility. You can use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution. Computes a 2-D atrous convolution, also known as convolution with holes or dilated convolution, given 4-D `value` and `filters` tensors. If the `rate` parameter is equal to one, it performs regular 2-D convolution. If the `rate` parameter is greater than one, it performs convolution with holes, sampling the input values every `rate` pixels in the `height` and `width` dimensions. This is equivalent to convolving the input with a set of upsampled filters, produced by inserting `rate - 1` zeros between two consecutive values of the filters along the `height` and `width` dimensions, hence the name atrous convolution or convolution with holes (the French word trous means holes in English). More specifically: ``` output[batch, height, width, out_channel] = sum_{dheight, dwidth, in_channel} ( filters[dheight, dwidth, in_channel, out_channel] * value[batch, height + rate*dheight, width + rate*dwidth, in_channel] ) ``` Atrous convolution allows us to explicitly control how densely to compute feature responses in fully convolutional networks. Used in conjunction with bilinear interpolation, it offers an alternative to `conv2d_transpose` in dense prediction tasks such as semantic image segmentation, optical flow computation, or depth estimation. It also allows us to effectively enlarge the field of view of filters without increasing the number of parameters or the amount of computation. For a description of atrous convolution and how it can be used for dense feature extraction, please see: (Chen et al., 2015). The same operation is investigated further in (Yu et al., 2016). Previous works that effectively use atrous convolution in different ways are, among others, (Sermanet et al., 2014) and (Giusti et al., 2013). Atrous convolution is also closely related to the so-called noble identities in multi-rate signal processing. There are many different ways to implement atrous convolution (see the refs above). The implementation here reduces ```python atrous_conv2d(value, filters, rate, padding=padding) ``` to the following three operations: ```python paddings = ... net = space_to_batch(value, paddings, block_size=rate) net = conv2d(net, filters, strides=[1, 1, 1, 1], padding=""VALID"") crops = ... net = batch_to_space(net, crops, block_size=rate) ``` Advanced usage. Note the following optimization: A sequence of `atrous_conv2d` operations with identical `rate` parameters, 'SAME' `padding`, and filters with odd heights/ widths: ```python net = atrous_conv2d(net, filters1, rate, padding=""SAME"") net = atrous_conv2d(net, filters2, rate, padding=""SAME"") ... net = atrous_conv2d(net, filtersK, rate, padding=""SAME"") ``` can be equivalently performed cheaper in terms of computation and memory as: ```python pad = ... # padding so that the input dims are multiples of rate net = space_to_batch(net, paddings=pad, block_size=rate) net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding=""SAME"") net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding=""SAME"") ... net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding=""SAME"") net = batch_to_space(net, crops=pad, block_size=rate) ``` because a pair of consecutive `space_to_batch` and `batch_to_space` ops with the same `block_size` cancel out when their respective `paddings` and `crops` inputs are identical. Args: value: A 4-D `Tensor` of type `float`. It needs to be in the default ""NHWC"" format. Its shape is `[batch, in_height, in_width, in_channels]`. filters: A 4-D `Tensor` with the same type as `value` and shape `[filter_height, filter_width, in_channels, out_channels]`. `filters`' `in_channels` dimension must match that of `value`. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height `filter_height + (filter_height - 1) * (rate - 1)` and effective width `filter_width + (filter_width - 1) * (rate - 1)`, produced by inserting `rate - 1` zeros along consecutive elements across the `filters`' spatial dimensions. rate: A positive int32. The stride with which we sample input values across the `height` and `width` dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the `height` and `width` dimensions. In the literature, the same parameter is sometimes called `input stride` or `dilation`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Output shape with `'VALID'` padding is: [batch, height - 2 * (filter_width - 1), width - 2 * (filter_height - 1), out_channels]. Output shape with `'SAME'` padding is: [batch, height, width, out_channels]. Raises: ValueError: If input/output depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`. References: Multi-Scale Context Aggregation by Dilated Convolutions: [Yu et al., 2016](https://arxiv.org/abs/1511.07122) ([pdf](https://arxiv.org/pdf/1511.07122.pdf)) Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs: [Chen et al., 2015](http://arxiv.org/abs/1412.7062) ([pdf](https://arxiv.org/pdf/1412.7062)) OverFeat - Integrated Recognition, Localization and Detection using Convolutional Networks: [Sermanet et al., 2014](https://arxiv.org/abs/1312.6229) ([pdf](https://arxiv.org/pdf/1312.6229.pdf)) Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks: [Giusti et al., 2013] (https://ieeexplore.ieee.org/abstract/document/6738831) ([pdf](https://arxiv.org/pdf/1302.1700.pdf))" 9567,convert_padding,tensorflow/tensorflow/python/ops/nn_ops.py,1755,function,"Converts Python padding to C++ padding for ops which take EXPLICIT padding. Args: padding: the `padding` argument for a Python op which supports EXPLICIT padding. Returns: (padding, explicit_paddings) pair, which should be passed as attributes to a C++ op. Raises: ValueError: If padding is invalid." 9568,conv1d,tensorflow/tensorflow/python/ops/nn_ops.py,1805,function,"Computes a 1-D convolution of input with rank `>=3` and a `3-D` filter. Given an input tensor of shape `batch_shape + [in_width, in_channels]` if `data_format` is `""NWC""`, or `batch_shape + [in_channels, in_width]` if `data_format` is `""NCW""`, and a filter / kernel tensor of shape `[filter_width, in_channels, out_channels]`, this op reshapes the arguments to pass them to `conv2d` to perform the equivalent convolution operation. Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`. For example, if `data_format` does not start with ""NC"", a tensor of shape `batch_shape + [in_width, in_channels]` is reshaped to `batch_shape + [1, in_width, in_channels]`, and the filter is reshaped to `[1, filter_width, in_channels, out_channels]`. The result is then reshaped back to `batch_shape + [out_width, out_channels]` \(where out_width is a function of the stride and padding as in conv2d\) and returned to the caller. Args: value: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or `float64`. filters: A Tensor of rank at least 3. Must have the same type as `value`. stride: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: 'SAME' or 'VALID' use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from `""NWC"", ""NCW""`. Defaults to `""NWC""`, the data is stored in the order of `batch_shape + [in_width, in_channels]`. The `""NCW""` format stores data as `batch_shape + [in_channels, in_width]`. name: A name for the operation (optional). input: Alias for value. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. Returns: A `Tensor`. Has the same type as input. Raises: ValueError: if `data_format` is invalid." 9569,conv1d_v2,tensorflow/tensorflow/python/ops/nn_ops.py,1911,function,"Computes a 1-D convolution given 3-D input and filter tensors. Given an input tensor of shape `batch_shape + [in_width, in_channels]` if `data_format` is `""NWC""`, or `batch_shape + [in_channels, in_width]` if `data_format` is `""NCW""`, and a filter / kernel tensor of shape `[filter_width, in_channels, out_channels]`, this op reshapes the arguments to pass them to `conv2d` to perform the equivalent convolution operation. Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`. For example, if `data_format` does not start with `""NC""`, a tensor of shape `batch_shape + [in_width, in_channels]` is reshaped to `batch_shape + [1, in_width, in_channels]`, and the filter is reshaped to `[1, filter_width, in_channels, out_channels]`. The result is then reshaped back to `batch_shape + [out_width, out_channels]` \(where out_width is a function of the stride and padding as in conv2d\) and returned to the caller. Args: input: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or `float64`. filters: A Tensor of rank at least 3. Must have the same type as `input`. stride: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: 'SAME' or 'VALID' data_format: An optional `string` from `""NWC"", ""NCW""`. Defaults to `""NWC""`, the data is stored in the order of `batch_shape + [in_width, in_channels]`. The `""NCW""` format stores data as `batch_shape + [in_channels, in_width]`. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as input. Raises: ValueError: if `data_format` is invalid." 9570,conv1d_transpose,tensorflow/tensorflow/python/ops/nn_ops.py,1979,function,"The transpose of `conv1d`. This operation is sometimes called ""deconvolution"" after (Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d` rather than an actual deconvolution. Args: input: A 3-D `Tensor` of type `float` and shape `[batch, in_width, in_channels]` for `NWC` data format or `[batch, in_channels, in_width]` for `NCW` data format. filters: A 3-D `Tensor` with the same type as `input` and shape `[filter_width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `input`. output_shape: A 1-D `Tensor`, containing three elements, representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1` or `3`. The number of entries by which the filter is moved right at each step. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. `'NWC'` and `'NCW'` are supported. dilations: An int or list of `ints` that has length `1` or `3` which defaults to 1. The dilation factor for each dimension of input. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. Dilations in the batch and depth dimensions must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `input`. Raises: ValueError: If input/output depth does not match `filter`'s shape, if `output_shape` is not at 3-element vector, if `padding` is other than `'VALID'` or `'SAME'`, or if `data_format` is invalid. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))" 9571,conv2d_v2,tensorflow/tensorflow/python/ops/nn_ops.py,2068,function,"Computes a 2-D convolution given `input` and 4-D `filters` tensors. The `input` tensor may have rank `4` or higher, where shape dimensions `[:-3]` are considered batch dimensions (`batch_shape`). Given an input tensor of shape `batch_shape + [in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following: 1. Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`. 2. Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`. 3. For each patch, right-multiplies the filter matrix and the image patch vector. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Usage Example: >>> x_in = np.array([[ ... [[2], [1], [2], [0], [1]], ... [[1], [3], [2], [2], [3]], ... [[1], [1], [3], [3], [0]], ... [[2], [2], [0], [1], [1]], ... [[0], [0], [3], [1], [2]], ]]) >>> kernel_in = np.array([ ... [ [[2, 0.1]], [[3, 0.2]] ], ... [ [[0, 0.3]],[[1, 0.4]] ], ]) >>> x = tf.constant(x_in, dtype=tf.float32) >>> kernel = tf.constant(kernel_in, dtype=tf.float32) >>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID') Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. A Tensor of rank at least 4. The dimension order is interpreted according to the value of `data_format`; with the all-but-inner-3 dimensions acting as batch dimensions. See below for details. filters: A `Tensor`. Must have the same type as `input`. A 4-D tensor of shape `[filter_height, filter_width, in_channels, out_channels]` strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. The dimension order is determined by the value of `data_format`, see below for details. padding: Either the `string` `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `""NHWC"", ""NCHW""`. Defaults to `""NHWC""`. Specify the data format of the input and output data. With the default format ""NHWC"", the data is stored in the order of: `batch_shape + [height, width, channels]`. Alternatively, the format could be ""NCHW"", the data storage order of: `batch_shape + [channels, height, width]`. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input` and the same outer batch shape." 9572,conv2d,tensorflow/tensorflow/python/ops/nn_ops.py,2173,function,"Computes a 2-D convolution given 4-D `input` and `filter` tensors. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, out_channels]`, this op performs the following: 1. Flattens the filter to a 2-D matrix with shape `[filter_height * filter_width * in_channels, output_channels]`. 2. Extracts image patches from the input tensor to form a *virtual* tensor of shape `[batch, out_height, out_width, filter_height * filter_width * in_channels]`. 3. For each patch, right-multiplies the filter matrix and the image patch vector. In detail, with the default NHWC format, output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. A 4-D tensor. The dimension order is interpreted according to the value of `data_format`, see below for details. filter: A `Tensor`. Must have the same type as `input`. A 4-D tensor of shape `[filter_height, filter_width, in_channels, out_channels]` strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. The dimension order is determined by the value of `data_format`, see below for details. padding: Either the `string` `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `""NHWC"", ""NCHW""`. Defaults to `""NHWC""`. Specify the data format of the input and output data. With the default format ""NHWC"", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be ""NCHW"", the data storage order of: [batch, channels, height, width]. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: A name for the operation (optional). filters: Alias for filter. Returns: A `Tensor`. Has the same type as `input`." 9573,conv2d_backprop_filter,tensorflow/tensorflow/python/ops/nn_ops.py,2295,function,"Computes the gradients of convolution with respect to the filter. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape `[batch, in_height, in_width, in_channels]`. filter_sizes: A `Tensor` of type `int32`. An integer vector representing the tensor shape of `filter`, where `filter` is a 4-D `[filter_height, filter_width, in_channels, out_channels]` tensor. out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: Either the `string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `""NHWC"", ""NCHW""`. Defaults to `""NHWC""`. Specify the data format of the input and output data. With the default format ""NHWC"", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be ""NCHW"", the data storage order of: [batch, in_channels, in_height, in_width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`." 9574,conv2d_backprop_input,tensorflow/tensorflow/python/ops/nn_ops.py,2356,function,"Computes the gradients of convolution with respect to the input. Args: input_sizes: A `Tensor` of type `int32`. An integer vector representing the shape of `input`, where `input` is a 4-D `[batch, height, width, channels]` tensor. filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with shape `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. Must be in the same order as the dimension specified with format. padding: Either the `string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. data_format: An optional `string` from: `""NHWC"", ""NCHW""`. Defaults to `""NHWC""`. Specify the data format of the input and output data. With the default format ""NHWC"", the data is stored in the order of: [batch, in_height, in_width, in_channels]. Alternatively, the format could be ""NCHW"", the data storage order of: [batch, in_channels, in_height, in_width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). filters: Alias for filter. Returns: A `Tensor`. Has the same type as `filter`." 9575,conv2d_transpose,tensorflow/tensorflow/python/ops/nn_ops.py,2421,function,"The transpose of `conv2d`. This operation is sometimes called ""deconvolution"" after (Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d` rather than an actual deconvolution. Args: value: A 4-D `Tensor` of type `float` and shape `[batch, height, width, in_channels]` for `NHWC` data format or `[batch, in_channels, height, width]` for `NCHW` data format. filter: A 4-D `Tensor` with the same type as `value` and shape `[height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `value`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the returned tensor. input: Alias for value. filters: Alias for filter. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))" 9576,conv2d_transpose_v2,tensorflow/tensorflow/python/ops/nn_ops.py,2498,function,"The transpose of `conv2d`. This operation is sometimes called ""deconvolution"" after (Zeiler et al., 2010), but is really the transpose (gradient) of `atrous_conv2d` rather than an actual deconvolution. Args: input: A 4-D `Tensor` of type `float` and shape `[batch, height, width, in_channels]` for `NHWC` data format or `[batch, in_channels, height, width]` for `NCHW` data format. filters: A 4-D `Tensor` with the same type as `input` and shape `[height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `input`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. dilations: An int or list of `ints` that has length `1`, `2` or `4`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 4-d tensor must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `input`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))" 9577,_conv2d_expanded_batch,tensorflow/tensorflow/python/ops/nn_ops.py,2574,function,Helper function for `convolution_internal`; handles expanded batches. 9578,atrous_conv2d_transpose,tensorflow/tensorflow/python/ops/nn_ops.py,2611,function,"The transpose of `atrous_conv2d`. This operation is sometimes called ""deconvolution"" after (Zeiler et al., 2010), but is really the transpose (gradient) of `atrous_conv2d` rather than an actual deconvolution. Args: value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC` format. Its shape is `[batch, in_height, in_width, in_channels]`. filters: A 4-D `Tensor` with the same type as `value` and shape `[filter_height, filter_width, out_channels, in_channels]`. `filters`' `in_channels` dimension must match that of `value`. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height `filter_height + (filter_height - 1) * (rate - 1)` and effective width `filter_width + (filter_width - 1) * (rate - 1)`, produced by inserting `rate - 1` zeros along consecutive elements across the `filters`' spatial dimensions. output_shape: A 1-D `Tensor` of shape representing the output shape of the deconvolution op. rate: A positive int32. The stride with which we sample input values across the `height` and `width` dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the `height` and `width` dimensions. In the literature, the same parameter is sometimes called `input stride` or `dilation`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less than one, or if the output_shape is not a tensor with 4 elements. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))" 9579,depthwise_conv2d_native,tensorflow/tensorflow/python/ops/nn_ops.py,2771,function,"Computes a 2-D depthwise convolution. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter / kernel tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]`, containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. Thus, the output has `in_channels * channel_multiplier` channels. ``` for k in 0..in_channels-1 for q in 0..channel_multiplier-1 output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] ``` Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1]`. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. filter: A `Tensor`. Must have the same type as `input`. strides: A list of `ints`. 1-D of length 4. The stride of the sliding window for each dimension of `input`. padding: Controls how to pad the image before applying the convolution. Can be the string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `""NHWC"", ""NCHW""`. Defaults to `""NHWC""`. Specify the data format of the input and output data. With the default format ""NHWC"", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be ""NCHW"", the data storage order of: [batch, channels, height, width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`." 9580,depthwise_conv2d_native_backprop_input,tensorflow/tensorflow/python/ops/nn_ops.py,2851,function,"Computes the gradients of depthwise convolution with respect to the input. Args: input_sizes: A `Tensor` of type `int32`. An integer vector representing the shape of `input`, based on `data_format`. For example, if `data_format` is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor. filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape `[filter_height, filter_width, in_channels, depthwise_multiplier]`. out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then out_backprop shape is `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. padding: Controls how to pad the image before applying the convolution. Can be the string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `""NHWC"", ""NCHW""`. Defaults to `""NHWC""`. Specify the data format of the input and output data. With the default format ""NHWC"", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be ""NCHW"", the data storage order of: [batch, channels, height, width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `filter`." 9581,depthwise_conv2d_native_backprop_filter,tensorflow/tensorflow/python/ops/nn_ops.py,2921,function,"Computes the gradients of depthwise convolution with respect to the filter. Args: input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, in_width, in_channels]` tensor. filter_sizes: A `Tensor` of type `int32`. An integer vector representing the tensor shape of `filter`, where `filter` is a 4-D `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape based on `data_format`. For example, if `data_format` is 'NHWC' then out_backprop shape is `[batch, out_height, out_width, out_channels]`. Gradients w.r.t. the output of the convolution. strides: A list of `ints`. The stride of the sliding window for each dimension of the input of the convolution. padding: Controls how to pad the image before applying the convolution. Can be the string `""SAME""` or `""VALID""` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is `""NHWC""`, this should be in the form `[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used and data_format is `""NCHW""`, this should be in the form `[[0, 0], [0, 0], [pad_top, pad_bottom], [pad_left, pad_right]]`. data_format: An optional `string` from: `""NHWC"", ""NCHW""`. Defaults to `""NHWC""`. Specify the data format of the input and output data. With the default format ""NHWC"", the data is stored in the order of: [batch, height, width, channels]. Alternatively, the format could be ""NCHW"", the data storage order of: [batch, channels, height, width]. dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D tensor of length 4. The dilation factor for each dimension of `input`. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions must be 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`." 9582,_conv3d_expanded_batch,tensorflow/tensorflow/python/ops/nn_ops.py,2984,function,Helper function for `conv3d`; handles expanded batches. 9583,conv3d_v2,tensorflow/tensorflow/python/ops/nn_ops.py,3026,function, 9584,conv3d_v1,tensorflow/tensorflow/python/ops/nn_ops.py,3041,function, 9585,conv3d_transpose,tensorflow/tensorflow/python/ops/nn_ops.py,3062,function,"The transpose of `conv3d`. This operation is sometimes called ""deconvolution"" after (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` rather than an actual deconvolution. Args: value: A 5-D `Tensor` of type `float` and shape `[batch, depth, height, width, in_channels]`. filter: A 5-D `Tensor` with the same type as `value` and shape `[depth, height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `value`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: A list of ints. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout of the input and output tensors. Defaults to `'NDHWC'`. name: Optional name for the returned tensor. input: Alias of value. filters: Alias of filter. dilations: An int or list of `ints` that has length `1`, `3` or `5`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 5-d tensor must be 1. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filter`'s shape, or if padding is other than `'VALID'` or `'SAME'`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))" 9586,conv3d_transpose_v2,tensorflow/tensorflow/python/ops/nn_ops.py,3134,function,"The transpose of `conv3d`. This operation is sometimes called ""deconvolution"" after (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` rather than an actual deconvolution. Args: input: A 5-D `Tensor` of type `float` and shape `[batch, depth, height, width, in_channels]` for `NDHWC` data format or `[batch, in_channels, depth, height, width]` for `NCDHW` data format. filters: A 5-D `Tensor` with the same type as `input` and shape `[depth, height, width, output_channels, in_channels]`. `filter`'s `in_channels` dimension must match that of `input`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. 'NDHWC' and 'NCDHW' are supported. dilations: An int or list of `ints` that has length `1`, `3` or `5`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the `D`, `H` and `W` dimension. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. Dilations in the batch and depth dimensions if a 5-d tensor must be 1. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `input`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))" 9587,conv_transpose,tensorflow/tensorflow/python/ops/nn_ops.py,3214,function,"The transpose of `convolution`. This operation is sometimes called ""deconvolution"" after (Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d` rather than an actual deconvolution. Args: input: An N+2 dimensional `Tensor` of shape `[batch_size] + input_spatial_shape + [in_channels]` if data_format does not start with ""NC"" (default), or `[batch_size, in_channels] + input_spatial_shape` if data_format starts with ""NC"". It must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. filters: An N+2 dimensional `Tensor` with the same type as `input` and shape `spatial_filter_shape + [in_channels, out_channels]`. output_shape: A 1-D `Tensor` representing the output shape of the deconvolution op. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of `input`. If a single value is given it is replicated in the spatial dimensions. By default the `N` and `C` dimensions are set to 0. The dimension order is determined by the value of `data_format`, see below for details. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with ""NC""), or the second dimension (if `data_format` starts with ""NC""). For N=1, the valid values are ""NWC"" (default) and ""NCW"". For N=2, the valid values are ""NHWC"" (default) and ""NCHW"". For N=3, the valid values are ""NDHWC"" (default) and ""NCDHW"". dilations: An int or list of `ints` that has length `1`, `N` or `N+2`, defaults to 1. The dilation factor for each dimension of`input`. If a single value is given it is replicated in the spatial dimensions. By default the `N` and `C` dimensions are set to 1. If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension. The dimension order is determined by the value of `data_format`, see above for details. name: A name for the operation (optional). If not specified ""conv_transpose"" is used. Returns: A `Tensor` with the same type as `value`. References: Deconvolutional Networks: [Zeiler et al., 2010] (https://ieeexplore.ieee.org/abstract/document/5539957) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))" 9588,_tf_deterministic_ops,tensorflow/tensorflow/python/ops/nn_ops.py,3297,function, 9589,bias_add,tensorflow/tensorflow/python/ops/nn_ops.py,3312,function,"Adds `bias` to `value`. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions. Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the case where both types are quantized. Args: value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, or `complex128`. bias: A 1-D `Tensor` with size matching the channel dimension of `value`. Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the default) is specified then 'N..C' is assumed. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. Raises: ValueError if data format is unrecognized, if `value` has less than two dimensions when `data_format` is 'N..C'/`None` or `value` has less then three dimensions when `data_format` is `NC..`, if `bias` does not have exactly one dimension (is a vector), or if the size of `bias` does not match the size of the channel dimension of `value`." 9590,bias_add_v1,tensorflow/tensorflow/python/ops/nn_ops.py,3373,function,"Adds `bias` to `value`. This is a deprecated version of bias_add and will soon to be removed. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions. Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the case where both types are quantized. Args: value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, or `complex128`. bias: A 1-D `Tensor` with size matching the last dimension of `value`. Must be the same type as `value` unless `value` is a quantized type, in which case a different quantized type may be used. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`." 9591,crelu,tensorflow/tensorflow/python/ops/nn_ops.py,3402,function,"Computes Concatenated ReLU. Concatenates a ReLU which selects only the positive part of the activation with a ReLU which selects only the *negative* part of the activation. Note that as a result this non-linearity doubles the depth of the activations. Source: [Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units. W. Shang, et al.](https://arxiv.org/abs/1603.05201) Args: features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`. name: A name for the operation (optional). axis: The axis that the output values are concatenated along. Default is -1. Returns: A `Tensor` with the same type as `features`. References: Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units: [Shang et al., 2016](http://proceedings.mlr.press/v48/shang16) ([pdf](http://proceedings.mlr.press/v48/shang16.pdf))" 9592,crelu_v2,tensorflow/tensorflow/python/ops/nn_ops.py,3435,function, 9593,relu6,tensorflow/tensorflow/python/ops/nn_ops.py,3442,function,"Computes Rectified Linear 6: `min(max(features, 0), 6)`. Args: features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `features`. References: Convolutional Deep Belief Networks on CIFAR-10: Krizhevsky et al., 2010 ([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf))" 9594,leaky_relu,tensorflow/tensorflow/python/ops/nn_ops.py,3465,function,"Compute the Leaky ReLU activation function. Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models. AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013] (https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). Args: features: A `Tensor` representing preactivation values. Must be one of the following types: `float16`, `float32`, `float64`, `int32`, `int64`. alpha: Slope of the activation function at x < 0. name: A name for the operation (optional). Returns: The activation value. References: Rectifier Nonlinearities Improve Neural Network Acoustic Models: [Maas et al., 2013] (http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422) ([pdf] (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf))" 9595,_flatten_outer_dims,tensorflow/tensorflow/python/ops/nn_ops.py,3496,function,Flattens logits' outer dimensions and keep its last dimension. 9596,_softmax,tensorflow/tensorflow/python/ops/nn_ops.py,3523,function,"Helper function for softmax and log_softmax. It reshapes and transposes the input logits into a 2-D Tensor and then invokes the tf.nn._softmax or tf.nn._log_softmax function. The output would be transposed and reshaped back. Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax dim: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `dim` is beyond the last dimension of `logits`." 9597,softmax,tensorflow/tensorflow/python/ops/nn_ops.py,3605,function,"Computes softmax activations. This function performs the equivalent of softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis) See: https://en.wikipedia.org/wiki/Softmax_function Example usage: >>> tf.nn.softmax([-1, 0., 1.]) Args: logits: A non-empty `Tensor`, or an object whose type has a registered `Tensor` conversion function. Must be one of the following types: `half`,`float32`, `float64`. See also `convert_to_tensor` axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). dim: Deprecated alias for `axis`. Returns: A `Tensor`. Has the same type and shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`. TypeError: If no conversion function is registered for `logits` to Tensor. RuntimeError: If a registered conversion function returns an invalid value." 9598,softmax_v2,tensorflow/tensorflow/python/ops/nn_ops.py,3649,function,"Computes softmax activations. This function performs the equivalent of softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type and shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`." 9599,log_softmax,tensorflow/tensorflow/python/ops/nn_ops.py,3678,function,"Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). dim: Deprecated alias for `axis`. Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`." 9600,log_softmax_v2,tensorflow/tensorflow/python/ops/nn_ops.py,3708,function,"Computes log softmax activations. For each batch `i` and class `j` we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `logits`. Same shape as `logits`. Raises: InvalidArgumentError: if `logits` is empty or `axis` is beyond the last dimension of `logits`." 9601,_ensure_xent_args,tensorflow/tensorflow/python/ops/nn_ops.py,3734,function, 9602,softmax_cross_entropy_with_logits_v2,tensorflow/tensorflow/python/ops/nn_ops.py,3745,function,"Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. Usage: >>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]] >>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]] >>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits) **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `axis` argument specifying the class dimension. `logits` and `labels` must have the same dtype (either `float16`, `float32`, or `float64`). Backpropagation will happen into both `logits` and `labels`. To disallow backpropagation into `labels`, pass label tensors through `tf.stop_gradient` before feeding it to this function. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Per-label activations, typically a linear output. These activation energies are interpreted as unnormalized log probabilities. axis: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`." 9603,softmax_cross_entropy_with_logits_v2_helper,tensorflow/tensorflow/python/ops/nn_ops.py,3809,function,"Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `axis` argument specifying the class dimension. `logits` and `labels` must have the same dtype (either `float16`, `float32`, or `float64`). Backpropagation will happen into both `logits` and `labels`. To disallow backpropagation into `labels`, pass label tensors through `tf.stop_gradient` before feeding it to this function. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Unscaled log probabilities. axis: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). dim: Deprecated alias for axis. Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`." 9604,softmax_cross_entropy_with_logits,tensorflow/tensorflow/python/ops/nn_ops.py,3937,function,"Computes softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** While the classes are mutually exclusive, their probabilities need not be. All that is required is that each row of `labels` is a valid probability distribution. If they are not, the computation of the gradient will be incorrect. If using exclusive `labels` (wherein one and only one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits and labels of shape `[batch_size, num_classes]`, but higher dimensions are supported, with the `dim` argument specifying the class dimension. Backpropagation will happen only into `logits`. To calculate a cross entropy loss that allows backpropagation into both `logits` and `labels`, see `tf.nn.softmax_cross_entropy_with_logits_v2`. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: _sentinel: Used to prevent positional parameters. Internal, do not use. labels: Each vector along the class dimension should hold a valid probability distribution e.g. for the case in which labels are of shape `[batch_size, num_classes]`, each row of `labels[i]` must be a valid probability distribution. logits: Per-label activations, typically a linear output. These activation energies are interpreted as unnormalized log probabilities. dim: The class dimension. Defaulted to -1 which is the last dimension. name: A name for the operation (optional). axis: Alias for dim. Returns: A `Tensor` that contains the softmax cross entropy loss. Its type is the same as `logits` and its shape is the same as `labels` except that it does not have the last dimension of `labels`." 9605,sparse_softmax_cross_entropy_with_logits,tensorflow/tensorflow/python/ops/nn_ops.py,4005,function,"Computes sparse softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** For this operation, the probability of a given label is considered exclusive. That is, soft classes are not allowed, and the `labels` vector must provide a single specific index for the true class for each row of `logits` (each minibatch entry). For soft softmax classification with a probability distribution for each entry, see `softmax_cross_entropy_with_logits_v2`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits of shape `[batch_size, num_classes]` and have labels of shape `[batch_size]`, but higher dimensions are supported, in which case the `dim`-th dimension is assumed to be of size `num_classes`. `logits` must have the dtype of `float16`, `float32`, or `float64`, and `labels` must have the dtype of `int32` or `int64`. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: _sentinel: Used to prevent positional parameters. Internal, do not use. labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Per-label activations (typically a linear output) of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or `float64`. These activation energies are interpreted as unnormalized log probabilities. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `labels` and of the same type as `logits` with the softmax cross entropy loss. Raises: ValueError: If logits are scalars (need to have rank >= 1) or if the rank of the labels is not equal to the rank of the logits minus one." 9606,sparse_softmax_cross_entropy_with_logits_v2,tensorflow/tensorflow/python/ops/nn_ops.py,4131,function,"Computes sparse softmax cross entropy between `logits` and `labels`. Measures the probability error in discrete classification tasks in which the classes are mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is labeled with one and only one label: an image can be a dog or a truck, but not both. **NOTE:** For this operation, the probability of a given label is considered exclusive. That is, soft classes are not allowed, and the `labels` vector must provide a single specific index for the true class for each row of `logits` (each minibatch entry). For soft softmax classification with a probability distribution for each entry, see `softmax_cross_entropy_with_logits_v2`. **WARNING:** This op expects unscaled logits, since it performs a `softmax` on `logits` internally for efficiency. Do not call this op with the output of `softmax`, as it will produce incorrect results. A common use case is to have logits of shape `[batch_size, num_classes]` and have labels of shape `[batch_size]`, but higher dimensions are supported, in which case the `dim`-th dimension is assumed to be of size `num_classes`. `logits` must have the dtype of `float16`, `float32`, or `float64`, and `labels` must have the dtype of `int32` or `int64`. **Note that to avoid confusion, it is required to pass only named arguments to this function.** Args: labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or `float64`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `labels` and of the same type as `logits` with the softmax cross entropy loss. Raises: ValueError: If logits are scalars (need to have rank >= 1) or if the rank of the labels is not equal to the rank of the logits minus one." 9607,avg_pool_v2,tensorflow/tensorflow/python/ops/nn_ops.py,4184,function,"Performs the avg pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if `data_format` does not start with ""NC"" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with ""NC"". Pooling happens over the spatial dimensions only. ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. Specifies the channel dimension. For N=1 it can be either ""NWC"" (default) or ""NCW"", for N=2 it can be either ""NHWC"" (default) or ""NCHW"" and for N=3 either ""NDHWC"" (default) or ""NCDHW"". name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The average pooled output tensor." 9608,avg_pool,tensorflow/tensorflow/python/ops/nn_ops.py,4247,function,"Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. input: Alias for value. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor." 9609,avg_pool2d,tensorflow/tensorflow/python/ops/nn_ops.py,4292,function,"Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC' and 'NCHW' are supported. name: Optional name for the operation. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor." 9610,avg_pool1d,tensorflow/tensorflow/python/ops/nn_ops.py,4332,function,"Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1` or `3`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1` or `3`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: An optional string from: ""NWC"", ""NCW"". Defaults to ""NWC"". name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor." 9611,avg_pool3d,tensorflow/tensorflow/python/ops/nn_ops.py,4378,function,"Performs the average pooling on the input. Each entry in `output` is the mean of the corresponding size `ksize` window in `value`. Args: input: A 5-D `Tensor` of shape `[batch, height, width, channels]` and type `float32`, `float64`, `qint8`, `quint8`, or `qint32`. ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. 'NDHWC' and 'NCDHW' are supported. name: Optional name for the operation. Returns: A `Tensor` with the same type as `value`. The average pooled output tensor." 9612,max_pool_v2,tensorflow/tensorflow/python/ops/nn_ops.py,4419,function,"Performs the max pooling on the input. Args: input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape + [num_channels]` if `data_format` does not start with ""NC"" (default), or `[batch_size, num_channels] + input_spatial_shape` if data_format starts with ""NC"". Pooling happens over the spatial dimensions only. ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. Specifies the channel dimension. For N=1 it can be either ""NWC"" (default) or ""NCW"", for N=2 it can be either ""NHWC"" (default) or ""NCHW"" and for N=3 either ""NDHWC"" (default) or ""NCDHW"". name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor." 9613,max_pool,tensorflow/tensorflow/python/ops/nn_ops.py,4480,function,"Performs the max pooling on the input. Args: value: A 4-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. name: Optional name for the operation. input: Alias for value. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor." 9614,max_pool1d,tensorflow/tensorflow/python/ops/nn_ops.py,4530,function,"Performs the max pooling on the input. Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1` or `3`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1` or `3`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: An optional string from: ""NWC"", ""NCW"". Defaults to ""NWC"". name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor." 9615,max_pool2d,tensorflow/tensorflow/python/ops/nn_ops.py,4575,function,"Performs the max pooling on the input. Args: input: A 4-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. name: Optional name for the operation. Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor." 9616,max_pool3d,tensorflow/tensorflow/python/ops/nn_ops.py,4614,function,"Performs the max pooling on the input. Args: input: A 5-D `Tensor` of the format specified by `data_format`. ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `3` or `5`. The stride of the sliding window for each dimension of the input tensor. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the ""returns"" section of `tf.nn.convolution` for details. data_format: An optional string from: ""NDHWC"", ""NCDHW"". Defaults to ""NDHWC"". The data format of the input and output data. With the default format ""NDHWC"", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be ""NCDHW"", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. name: A name for the operation (optional). Returns: A `Tensor` of format specified by `data_format`. The max pooled output tensor." 9617,max_pool_with_argmax_v2,tensorflow/tensorflow/python/ops/nn_ops.py,4657,function,"Performs max pooling on the input and outputs both max values and indices. The indices in `argmax` are flattened, so that a maximum value at position `[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if `include_batch_in_index` is False; `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. The indices returned are always in `[0, height) x [0, width)` before flattening, even if padding is involved and the mathematically correct answer is outside (either negative or too large). This is a bug, but fixing it is difficult to do in a safe backwards compatible way, especially due to flattening. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D with shape `[batch, height, width, channels]`. Input to pool over. ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of the window for each dimension of the input tensor. strides: An int or list of `ints` that has length `1`, `2` or `4`. The stride of the sliding window for each dimension of the input tensor. padding: A `string` from: `""SAME"", ""VALID""`. The type of padding algorithm to use. data_format: An optional `string`, must be set to `""NHWC""`. Defaults to `""NHWC""`. Specify the data format of the input and output data. output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. The dtype of the returned argmax tensor. include_batch_in_index: An optional `boolean`. Defaults to `False`. Whether to include batch dimension in flattened index of `argmax`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output, argmax). output: A `Tensor`. Has the same type as `input`. argmax: A `Tensor` of type `output_dtype`." 9618,max_pool_with_argmax_v1,tensorflow/tensorflow/python/ops/nn_ops.py,4727,function, 9619,_calc_conv3d_flops,tensorflow/tensorflow/python/ops/nn_ops.py,4758,function,Calculates the compute resources needed for Conv3D. 9620,_calc_conv_flops,tensorflow/tensorflow/python/ops/nn_ops.py,4777,function,Calculates the compute resources needed for Conv2D. 9621,_calc_depthwise_conv_flops,tensorflow/tensorflow/python/ops/nn_ops.py,4796,function,Calculates the compute resources needed for DepthwiseConv2dNative. 9622,_calc_bias_add_flops,tensorflow/tensorflow/python/ops/nn_ops.py,4812,function,Calculates the computing needed for BiasAdd. 9623,xw_plus_b,tensorflow/tensorflow/python/ops/nn_ops.py,4822,function,"Computes matmul(x, weights) + biases. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified ""xw_plus_b"" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units." 9624,xw_plus_b_v1,tensorflow/tensorflow/python/ops/nn_ops.py,4844,function,"Computes matmul(x, weights) + biases. This is a deprecated version of that will soon be removed. Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified ""xw_plus_b_v1"" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units." 9625,_get_noise_shape,tensorflow/tensorflow/python/ops/nn_ops.py,4868,function, 9626,dropout,tensorflow/tensorflow/python/ops/nn_ops.py,4898,function,"Computes dropout. For each element of `x`, with probability `rate`, outputs `0`, and otherwise scales up the input by `1 / (1-rate)`. The scaling is such that the expected sum is unchanged. By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. For example, if `shape(x) = [k, l, m, n]` and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be kept independently and each row and column will be kept or not kept together. Args: x: A floating point tensor. keep_prob: (deprecated) A deprecated alias for `(1-rate)`. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. name: A name for this operation (optional). rate: A scalar `Tensor` with the same type as `x`. The probability that each element of `x` is discarded. Returns: A Tensor of the same shape of `x`. Raises: ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point tensor." 9627,dropout_v2,tensorflow/tensorflow/python/ops/nn_ops.py,4950,function,"Computes dropout: randomly sets elements to zero to prevent overfitting. Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x. When converting 1.x code, please use named arguments to ensure behavior stays consistent. See also: `tf.keras.layers.Dropout` for a dropout layer. [Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN models. Inputs elements are randomly set to zero (and the other elements are rescaled). This encourages each node to be independently useful, as it cannot rely on the output of other nodes. More precisely: With probability `rate` elements of `x` are set to `0`. The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the expected value is preserved. >>> tf.random.set_seed(0) >>> x = tf.ones([3,5]) >>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy() array([[2., 0., 0., 2., 2.], [2., 2., 2., 2., 2.], [2., 0., 2., 0., 2.]], dtype=float32) >>> tf.random.set_seed(0) >>> x = tf.ones([3,5]) >>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy() array([[0., 0., 0., 5., 5.], [0., 5., 0., 5., 0.], [5., 0., 5., 0., 5.]], dtype=float32) >>> tf.nn.dropout(x, rate = 0.0) == x By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. This is useful for dropping whole channels from an image or sequence. For example: >>> tf.random.set_seed(0) >>> x = tf.ones([3,10]) >>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy() array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.], [0., 0., 0., 3., 3., 0., 3., 3., 3., 0.], [0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32) Args: x: A floating point tensor. rate: A scalar `Tensor` with the same type as x. The probability that each element is dropped. For example, setting rate=0.1 would drop 10% of input elements. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. seed: A Python integer. Used to create random seeds. See `tf.random.set_seed` for behavior. name: A name for this operation (optional). Returns: A Tensor of the same shape of `x`. Raises: ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point tensor. `rate=1` is disallowed, because the output would be all zeros, which is likely not what was intended." 9628,top_k,tensorflow/tensorflow/python/ops/nn_ops.py,5072,function,"Finds values and indices of the `k` largest entries for the last dimension. If the input is a vector (rank=1), finds the `k` largest entries in the vector and outputs their values and indices as vectors. Thus `values[j]` is the `j`-th largest entry in `input`, and its index is `indices[j]`. For matrices (resp. higher rank input), computes the top `k` entries in each row (resp. vector along the last dimension). Thus, values.shape = indices.shape = input.shape[:-1] + [k] If two elements are equal, the lower-index element appears first. Args: input: 1-D or higher `Tensor` with last dimension at least `k`. k: 0-D `int32` `Tensor`. Number of top elements to look for along the last dimension (along each row for matrices). sorted: If true the resulting `k` elements will be sorted by the values in descending order. name: Optional name for the operation. Returns: values: The `k` largest elements along each last dimensional slice. indices: The indices of `values` within the last dimension of `input`." 9629,nth_element,tensorflow/tensorflow/python/ops/nn_ops.py,5101,function,"Finds values of the `n`-th smallest value for the last dimension. Note that n is zero-indexed. If the input is a vector (rank-1), finds the entries which is the nth-smallest value in the vector and outputs their values as scalar tensor. For matrices (resp. higher rank input), computes the entries which is the nth-smallest value in each row (resp. vector along the last dimension). Thus, values.shape = input.shape[:-1] Args: input: 1-D or higher `Tensor` with last dimension at least `n+1`. n: A `Tensor` of type `int32`. 0-D. Position of sorted vector to select along the last dimension (along each row for matrices). Valid range of n is `[0, input.shape[:-1])` reverse: An optional `bool`. Defaults to `False`. When set to True, find the nth-largest value in the vector and vice versa. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. The `n`-th order statistic along each last dimensional slice." 9630,fractional_max_pool,tensorflow/tensorflow/python/ops/nn_ops.py,5135,function,"Performs fractional max pooling on the input. This is a deprecated version of `fractional_max_pool`. Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word ""fractional"", means that the overall reduction ratio N does not have to be an integer. The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries. First we define the following: 1. input_row_length : the number of rows from the input set 2. output_row_length : which will be smaller than the input 3. alpha = input_row_length / output_row_length : our reduction ratio 4. K = floor(alpha) 5. row_pooling_sequence : this is the result list of pool boundary rows Then, row_pooling_sequence should satisfy: 1. a[0] = 0 : the first value of the sequence is 0 2. a[end] = input_row_length : the last value of the sequence is the size 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 4. length(row_pooling_sequence) = output_row_length+1 Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check (Graham, 2015) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2` instead. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional max pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. References: Fractional Max-Pooling: [Graham, 2015](https://arxiv.org/abs/1412.6071) ([pdf](https://arxiv.org/pdf/1412.6071.pdf))" 9631,fractional_max_pool_v2,tensorflow/tensorflow/python/ops/nn_ops.py,5220,function,"Performs fractional max pooling on the input. Fractional max pooling is slightly different than regular max pooling. In regular max pooling, you downsize an input set by taking the maximum value of smaller N x N subsections of the set (often 2x2), and try to reduce the set by a factor of N, where N is an integer. Fractional max pooling, as you might expect from the word ""fractional"", means that the overall reduction ratio N does not have to be an integer. The sizes of the pooling regions are generated randomly but are fairly uniform. For example, let's look at the height dimension, and the constraints on the list of rows that will be pool boundaries. First we define the following: 1. input_row_length : the number of rows from the input set 2. output_row_length : which will be smaller than the input 3. alpha = input_row_length / output_row_length : our reduction ratio 4. K = floor(alpha) 5. row_pooling_sequence : this is the result list of pool boundary rows Then, row_pooling_sequence should satisfy: 1. a[0] = 0 : the first value of the sequence is 0 2. a[end] = input_row_length : the last value of the sequence is the size 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 4. length(row_pooling_sequence) = output_row_length+1 Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper (Graham, 2015) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional max pooling. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional max pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. References: Fractional Max-Pooling: [Graham, 2015](https://arxiv.org/abs/1412.6071) ([pdf](https://arxiv.org/pdf/1412.6071.pdf))" 9632,fractional_avg_pool,tensorflow/tensorflow/python/ops/nn_ops.py,5308,function,"Performs fractional average pooling on the input. This is a deprecated version of `fractional_avg_pool`. Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region. Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper (Graham, 2015) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional avg pooling. deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2` instead. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional avg pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. References: Fractional Max-Pooling: [Graham, 2015](https://arxiv.org/abs/1412.6071) ([pdf](https://arxiv.org/pdf/1412.6071.pdf))" 9633,fractional_avg_pool_v2,tensorflow/tensorflow/python/ops/nn_ops.py,5372,function,"Performs fractional average pooling on the input. Fractional average pooling is similar to Fractional max pooling in the pooling region generation step. The only difference is that after pooling regions are generated, a mean operation is performed instead of a max operation in each pooling region. Args: value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`. pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for each dimension of `value`, currently only supports row and col dimension and should be >= 1.0. For example, a valid pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions respectively. pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`, generates the pooling sequence in a pseudorandom fashion, otherwise, in a random fashion. Check paper (Graham, 2015) for difference between pseudorandom and random. overlapping: An optional `bool`. Defaults to `False`. When set to `True`, it means when pooling, the values at the boundary of adjacent pooling cells are used by both cells. For example: `index 0 1 2 3 4` `value 20 5 16 3 7` If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. The result would be [20, 16] for fractional avg pooling. seed: An optional `int`. Defaults to `0`. If set to be non-zero, the random number generator is seeded by the given seed. Otherwise it is seeded by a random seed. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (`output`, `row_pooling_sequence`, `col_pooling_sequence`). output: Output `Tensor` after fractional avg pooling. Has the same type as `value`. row_pooling_sequence: A `Tensor` of type `int64`. col_pooling_sequence: A `Tensor` of type `int64`. References: Fractional Max-Pooling: [Graham, 2015](https://arxiv.org/abs/1412.6071) ([pdf](https://arxiv.org/pdf/1412.6071.pdf))" 9634,_calc_dilation2d_flops,tensorflow/tensorflow/python/ops/nn_ops.py,5434,function,Calculates the compute resources needed for Dilation2D. 9635,erosion2d,tensorflow/tensorflow/python/ops/nn_ops.py,5451,function,"Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors. The `value` tensor has shape `[batch, in_height, in_width, depth]` and the `kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default ""NHWC"" `data_format`. In detail, the grayscale morphological 2-D erosion is given by: output[b, y, x, c] = min_{dy, dx} value[b, strides[1] * y - rates[1] * dy, strides[2] * x - rates[2] * dx, c] - kernel[dy, dx, c] Duality: The erosion of `value` by the `kernel` is equal to the negation of the dilation of `-value` by the reflected `kernel`. Args: value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`. kernel: A `Tensor`. Must have the same type as `value`. 3-D with shape `[kernel_height, kernel_width, depth]`. strides: A list of `ints` that has length `>= 4`. 1-D of length 4. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. rates: A list of `ints` that has length `>= 4`. 1-D of length 4. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. padding: A `string` from: `""SAME"", ""VALID""`. The type of padding algorithm to use. name: A name for the operation (optional). If not specified ""erosion2d"" is used. Returns: A `Tensor`. Has the same type as `value`. 4-D with shape `[batch, out_height, out_width, depth]`. Raises: ValueError: If the `value` depth does not match `kernel`' shape, or if padding is other than `'VALID'` or `'SAME'`." 9636,erosion2d_v2,tensorflow/tensorflow/python/ops/nn_ops.py,5511,function,"Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors. The `value` tensor has shape `[batch, in_height, in_width, depth]` and the `filters` tensor has shape `[filters_height, filters_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default ""NHWC"" `data_format`. In detail, the grayscale morphological 2-D erosion is given by: output[b, y, x, c] = min_{dy, dx} value[b, strides[1] * y - dilations[1] * dy, strides[2] * x - dilations[2] * dx, c] - filters[dy, dx, c] Duality: The erosion of `value` by the `filters` is equal to the negation of the dilation of `-value` by the reflected `filters`. Args: value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`. filters: A `Tensor`. Must have the same type as `value`. 3-D with shape `[filters_height, filters_width, depth]`. strides: A list of `ints` that has length `>= 4`. 1-D of length 4. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. padding: A `string` from: `""SAME"", ""VALID""`. The type of padding algorithm to use. data_format: A `string`, only `""NHWC""` is currently supported. dilations: A list of `ints` that has length `>= 4`. 1-D of length 4. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. name: A name for the operation (optional). If not specified ""erosion2d"" is used. Returns: A `Tensor`. Has the same type as `value`. 4-D with shape `[batch, out_height, out_width, depth]`. Raises: ValueError: If the `value` depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`." 9637,in_top_k,tensorflow/tensorflow/python/ops/nn_ops.py,5581,function,"Says whether the targets are in the top `K` predictions. This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the prediction for the target class is finite (not inf, -inf, or nan) and among the top `k` predictions among all predictions for example `i`. Note that the behavior of `InTopK` differs from the `TopK` op in its handling of ties; if multiple classes have the same prediction value and straddle the top-`k` boundary, all of those classes are considered to be in the top `k`. More formally, let \\(predictions_i\\) be the predictions for all classes for example `i`, \\(targets_i\\) be the target class for example `i`, \\(out_i\\) be the output for example `i`, $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ Args: predictions: A `Tensor` of type `float32`. A `batch_size` x `classes` tensor. targets: A `Tensor`. Must be one of the following types: `int32`, `int64`. A `batch_size` vector of class ids. k: An `int`. Number of top elements to look at for computing precision. name: A name for the operation (optional). Returns: A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`." 9638,in_top_k_v2,tensorflow/tensorflow/python/ops/nn_ops.py,5616,function, 9639,ZeroFractionTest,tensorflow/tensorflow/python/ops/nn_test.py,48,class, 9640,SoftmaxTest,tensorflow/tensorflow/python/ops/nn_test.py,98,class, 9641,LogPoissonLossTest,tensorflow/tensorflow/python/ops/nn_test.py,158,class, 9642,LogSoftmaxTest,tensorflow/tensorflow/python/ops/nn_test.py,201,class, 9643,L2LossTest,tensorflow/tensorflow/python/ops/nn_test.py,245,class, 9644,L2NormalizeTest,tensorflow/tensorflow/python/ops/nn_test.py,270,class, 9645,DropoutTest,tensorflow/tensorflow/python/ops/nn_test.py,333,class, 9646,ComputeSampledLogitsTest,tensorflow/tensorflow/python/ops/nn_test.py,543,class, 9647,CReluTest,tensorflow/tensorflow/python/ops/nn_test.py,986,class, 9648,ReluTest,tensorflow/tensorflow/python/ops/nn_test.py,997,class, 9649,LeakyReluTest,tensorflow/tensorflow/python/ops/nn_test.py,1019,class, 9650,SwishTest,tensorflow/tensorflow/python/ops/nn_test.py,1062,class, 9651,MomentsTest,tensorflow/tensorflow/python/ops/nn_test.py,1091,class, 9652,DataFormatDimMapTest,tensorflow/tensorflow/python/ops/nn_test.py,1160,class, 9653,DataFormatVectorPermuteTest,tensorflow/tensorflow/python/ops/nn_test.py,1220,class, 9654,AvgPoolTest,tensorflow/tensorflow/python/ops/nn_test.py,1322,class, 9655,MaxPoolTest,tensorflow/tensorflow/python/ops/nn_test.py,1407,class, 9656,ConvolutionTest,tensorflow/tensorflow/python/ops/nn_test.py,1504,class, 9657,ConvTransposeTest,tensorflow/tensorflow/python/ops/nn_test.py,1517,class, 9658,RaggedEmbeddingTest,tensorflow/tensorflow/python/ops/nn_test.py,1598,class, 9659,SigmoidCrossEntropyWithLogitsTest,tensorflow/tensorflow/python/ops/nn_xent_test.py,38,class, 9660,WeightedCrossEntropyTest,tensorflow/tensorflow/python/ops/nn_xent_test.py,114,class, 9661,verify_tensor_all_finite,tensorflow/tensorflow/python/ops/numerics.py,35,function,"Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). x: Alias for t. message: Alias for msg. Returns: Same tensor as `t`." 9662,verify_tensor_all_finite_v2,tensorflow/tensorflow/python/ops/numerics.py,56,function,"Assert that the tensor does not contain any NaN's or Inf's. Args: x: Tensor to check. message: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `x`." 9663,add_check_numerics_ops,tensorflow/tensorflow/python/ops/numerics.py,76,function,"Connect a `tf.debugging.check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the current default graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Note: This API is not compatible with the use of `tf.cond` or `tf.while_loop`, and will raise a `ValueError` if you attempt to call it in such a graph. Returns: A `group` op depending on all `check_numerics` ops added. Raises: ValueError: If the graph contains any numeric operations in a control flow structure. RuntimeError: If called with eager execution enabled. @compatibility(eager) Not compatible with eager execution. To check for `Inf`s and `NaN`s under eager execution, call `tf.debugging.enable_check_numerics()` once before executing the checked operations. @end_compatibility" 9664,is_differentiable,tensorflow/tensorflow/python/ops/op_selector.py,25,function, 9665,is_iterable,tensorflow/tensorflow/python/ops/op_selector.py,32,function,Return true if the object is iterable. 9666,concatenate_unique,tensorflow/tensorflow/python/ops/op_selector.py,43,function,"Add all the elements of `lb` to `la` if they are not there already. The elements added to `la` maintain ordering with respect to `lb`. Args: la: List of Python objects. lb: List of Python objects. Returns: `la`: The list `la` with missing elements from `lb`." 9667,get_tensors,tensorflow/tensorflow/python/ops/op_selector.py,62,function,"get all the tensors which are input or output of an op in the graph. Args: graph: a `tf.Graph`. Returns: A list of `tf.Tensor`. Raises: TypeError: if graph is not a `tf.Graph`." 9668,get_unique_graph,tensorflow/tensorflow/python/ops/op_selector.py,80,function,"Return the unique graph used by the all the elements in tops. Args: tops: list of elements to check (usually a list of tf.Operation and/or tf.Tensor). Or a tf.Graph. check_types: check that the element in tops are of given type(s). If None, the types (tf.Operation, tf.Tensor) are used. none_if_empty: don't raise an error if tops is an empty list, just return None. Returns: The unique graph used by all the tops. Raises: TypeError: if tops is not a iterable of tf.Operation. ValueError: if the graph is not unique." 9669,check_graphs,tensorflow/tensorflow/python/ops/op_selector.py,118,function,"Check that all the element in args belong to the same graph. Args: *args: a list of object with a obj.graph property. Raises: ValueError: if all the elements do not belong to the same graph." 9670,make_list_of_t,tensorflow/tensorflow/python/ops/op_selector.py,134,function,"Convert ts to a list of `tf.Tensor`. Args: ts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor. check_graph: if `True` check if all the tensors belong to the same graph. allow_graph: if `False` a `tf.Graph` cannot be converted. ignore_ops: if `True`, silently ignore `tf.Operation`. Returns: A newly created list of `tf.Tensor`. Raises: TypeError: if `ts` cannot be converted to a list of `tf.Tensor` or, if `check_graph` is `True`, if all the ops do not belong to the same graph." 9671,get_generating_ops,tensorflow/tensorflow/python/ops/op_selector.py,164,function,"Return all the generating ops of the tensors in `ts`. Args: ts: a list of `tf.Tensor` Returns: A list of all the generating `tf.Operation` of the tensors in `ts`. Raises: TypeError: if `ts` cannot be converted to a list of `tf.Tensor`." 9672,get_consuming_ops,tensorflow/tensorflow/python/ops/op_selector.py,178,function,"Return all the consuming ops of the tensors in ts. Args: ts: a list of `tf.Tensor` Returns: A list of all the consuming `tf.Operation` of the tensors in `ts`. Raises: TypeError: if ts cannot be converted to a list of `tf.Tensor`." 9673,make_list_of_op,tensorflow/tensorflow/python/ops/op_selector.py,197,function,"Convert ops to a list of `tf.Operation`. Args: tops: can be an iterable of `tf.Operation`, a `tf.Graph` or a single operation. check_graph: if `True` check if all the operations belong to the same graph. allow_graph: if `False` a `tf.Graph` cannot be converted. ignore_ts: if True, silently ignore `tf.Tensor`. Returns: A newly created list of `tf.Operation`. Raises: TypeError: if tops cannot be converted to a list of `tf.Operation` or, if `check_graph` is `True`, if all the ops do not belong to the same graph." 9674,_get_inputs,tensorflow/tensorflow/python/ops/op_selector.py,229,function, 9675,get_backward_walk_ops,tensorflow/tensorflow/python/ops/op_selector.py,237,function,"Do a backward graph walk and return all the visited ops. Args: seed_ops: an iterable of operations from which the backward graph walk starts. If a list of tensors is given instead, the seed_ops are set to be the generators of those tensors. inclusive: if True the given seed_ops are also part of the resulting set. within_ops: an iterable of `tf.Operation` within which the search is restricted. If `within_ops` is `None`, the search is performed within the whole graph. within_ops_fn: if provided, a function on ops that should return True iff the op is within the graph traversal. This can be used along within_ops, in which case an op is within if it is also in within_ops. stop_at_ts: an iterable of tensors at which the graph walk stops. control_inputs: if True, control inputs will be used while moving backward. only_differentiable: if True, only traverse ops which are differentiable. This includes natively differentiable ops, or ops with custom gradients. Returns: A Python set of all the `tf.Operation` behind `seed_ops`. Raises: TypeError: if `seed_ops` or `within_ops` cannot be converted to a list of `tf.Operation`." 9676,UnliftableError,tensorflow/tensorflow/python/ops/op_selector.py,311,class,Raised if a Tensor cannot be lifted from the graph. 9677,_as_operation,tensorflow/tensorflow/python/ops/op_selector.py,318,function, 9678,graph_inputs,tensorflow/tensorflow/python/ops/op_selector.py,324,function, 9679,_path_from,tensorflow/tensorflow/python/ops/op_selector.py,328,function,"Find one path from `from_op` to `tensor`, ignoring `sources`. Args: from_op: A `tf.Operation`. tensor: A `tf.Operation` or `tf.Tensor`. sources: A list of `tf.Tensor`. Returns: A python string containing the path, or ""??"" if none is found." 9680,map_subgraph,tensorflow/tensorflow/python/ops/op_selector.py,368,function,"Walk a Graph and capture the subgraph between init_tensor and sources. Note: This function mutates visited_ops and op_outputs. Arguments: init_tensor: A Tensor or Operation where the subgraph terminates. sources: A set of Tensors where subgraph extraction should stop. disallowed_placeholders: An optional set of ops which may not appear in the lifted graph. Defaults to all placeholders. visited_ops: A set of operations which were visited in a prior pass. op_outputs: A defaultdict containing the outputs of an op which are to be copied into the new subgraph. add_sources: A boolean indicating whether placeholders which are not in sources should be allowed. Returns: The set of placeholders upon which init_tensor depends and are not in sources. Raises: UnliftableError: if init_tensor depends on a placeholder which is not in sources and add_sources is False." 9681,SelectTest,tensorflow/tensorflow/python/ops/op_selector_test.py,28,class, 9682,_OptionalFromValueGrad,tensorflow/tensorflow/python/ops/optional_grad.py,26,function, 9683,_OptionalGetValueGrad,tensorflow/tensorflow/python/ops/optional_grad.py,32,function, 9684,VarLenFeature,tensorflow/tensorflow/python/ops/parsing_config.py,48,class,"Configuration for parsing a variable-length input feature. Fields: dtype: Data type of input." 9685,RaggedFeature,tensorflow/tensorflow/python/ops/parsing_config.py,58,class,"Configuration for passing a RaggedTensor input feature. `value_key` specifies the feature key for a variable-length list of values; and `partitions` specifies zero or more feature keys for partitioning those values into higher dimensions. Each element of `partitions` must be one of the following: * `tf.io.RaggedFeature.RowSplits(key: string)` * `tf.io.RaggedFeature.RowLengths(key: string)` * `tf.io.RaggedFeature.RowStarts(key: string)` * `tf.io.RaggedFeature.RowLimits(key: string)` * `tf.io.RaggedFeature.ValueRowIds(key: string)` * `tf.io.RaggedFeature.UniformRowLength(length: int)`. Where `key` is a feature key whose values are used to partition the values. Partitions are listed from outermost to innermost. * If `len(partitions) == 0` (the default), then: * A feature from a single `tf.Example` is parsed into a 1D `tf.Tensor`. * A feature from a batch of `tf.Example`s is parsed into a 2D `tf.RaggedTensor`, where the outer dimension is the batch dimension, and the inner (ragged) dimension is the feature length in each example. * If `len(partitions) == 1`, then: * A feature from a single `tf.Example` is parsed into a 2D `tf.RaggedTensor`, where the values taken from the `value_key` are separated into rows using the partition key. * A feature from a batch of `tf.Example`s is parsed into a 3D `tf.RaggedTensor`, where the outer dimension is the batch dimension, the two inner dimensions are formed by separating the `value_key` values from each example into rows using that example's partition key. * If `len(partitions) > 1`, then: * A feature from a single `tf.Example` is parsed into a `tf.RaggedTensor` whose rank is `len(partitions)+1`, and whose ragged_rank is `len(partitions)`. * A feature from a batch of `tf.Example`s is parsed into a `tf.RaggedTensor` whose rank is `len(partitions)+2` and whose ragged_rank is `len(partitions)+1`, where the outer dimension is the batch dimension. There is one exception: if the final (i.e., innermost) element(s) of `partitions` are `UniformRowLength`s, then the values are simply reshaped (as a higher-dimensional `tf.Tensor`), rather than being wrapped in a `tf.RaggedTensor`. #### Examples >>> import google.protobuf.text_format as pbtext >>> example_batch = [ ... pbtext.Merge(r''' ... features { ... feature {key: ""v"" value {int64_list {value: [3, 1, 4, 1, 5, 9]}}} ... feature {key: ""s1"" value {int64_list {value: [0, 2, 3, 3, 6]}}} ... feature {key: ""s2"" value {int64_list {value: [0, 2, 3, 4]}}} ... }''', tf.train.Example()).SerializeToString(), ... pbtext.Merge(r''' ... features { ... feature {key: ""v"" value {int64_list {value: [2, 7, 1, 8, 2, 8, 1]}}} ... feature {key: ""s1"" value {int64_list {value: [0, 3, 4, 5, 7]}}} ... feature {key: ""s2"" value {int64_list {value: [0, 1, 1, 4]}}} ... }''', tf.train.Example()).SerializeToString()] >>> features = { ... # Zero partitions: returns 1D tf.Tensor for each Example. ... 'f1': tf.io.RaggedFeature(value_key=""v"", dtype=tf.int64), ... # One partition: returns 2D tf.RaggedTensor for each Example. ... 'f2': tf.io.RaggedFeature(value_key=""v"", dtype=tf.int64, partitions=[ ... tf.io.RaggedFeature.RowSplits(""s1"")]), ... # Two partitions: returns 3D tf.RaggedTensor for each Example. ... 'f3': tf.io.RaggedFeature(value_key=""v"", dtype=tf.int64, partitions=[ ... tf.io.RaggedFeature.RowSplits(""s2""), ... tf.io.RaggedFeature.RowSplits(""s1"")]) ... } >>> feature_dict = tf.io.parse_single_example(example_batch[0], features) >>> for (name, val) in sorted(feature_dict.items()): ... print('%s: %s' % (name, val)) f1: tf.Tensor([3 1 4 1 5 9], shape=(6,), dtype=int64) f2: f3: >>> feature_dict = tf.io.parse_example(example_batch, features) >>> for (name, val) in sorted(feature_dict.items()): ... print('%s: %s' % (name, val)) f1: f2: f3: Fields: dtype: Data type of the `RaggedTensor`. Must be one of: `tf.dtypes.int64`, `tf.dtypes.float32`, `tf.dtypes.string`. value_key: (Optional.) Key for a `Feature` in the input `Example`, whose parsed `Tensor` will be the resulting `RaggedTensor.flat_values`. If not specified, then it defaults to the key for this `RaggedFeature`. partitions: (Optional.) A list of objects specifying the row-partitioning tensors (from outermost to innermost). Each entry in this list must be one of: * `tf.io.RaggedFeature.RowSplits(key: string)` * `tf.io.RaggedFeature.RowLengths(key: string)` * `tf.io.RaggedFeature.RowStarts(key: string)` * `tf.io.RaggedFeature.RowLimits(key: string)` * `tf.io.RaggedFeature.ValueRowIds(key: string)` * `tf.io.RaggedFeature.UniformRowLength(length: int)`. Where `key` is a key for a `Feature` in the input `Example`, whose parsed `Tensor` will be the resulting row-partitioning tensor. row_splits_dtype: (Optional.) Data type for the row-partitioning tensor(s). One of `int32` or `int64`. Defaults to `int32`. validate: (Optional.) Boolean indicating whether or not to validate that the input values form a valid RaggedTensor. Defaults to `False`." 9686,SparseFeature,tensorflow/tensorflow/python/ops/parsing_config.py,224,class,"Configuration for parsing a sparse input feature from an `Example`. Note, preferably use `VarLenFeature` (possibly in combination with a `SequenceExample`) in order to parse out `SparseTensor`s instead of `SparseFeature` due to its simplicity. Closely mimicking the `SparseTensor` that will be obtained by parsing an `Example` with a `SparseFeature` config, a `SparseFeature` contains a * `value_key`: The name of key for a `Feature` in the `Example` whose parsed `Tensor` will be the resulting `SparseTensor.values`. * `index_key`: A list of names - one for each dimension in the resulting `SparseTensor` whose `indices[i][dim]` indicating the position of the `i`-th value in the `dim` dimension will be equal to the `i`-th value in the Feature with key named `index_key[dim]` in the `Example`. * `size`: A list of ints for the resulting `SparseTensor.dense_shape`. For example, we can represent the following 2D `SparseTensor` ```python SparseTensor(indices=[[3, 1], [20, 0]], values=[0.5, -1.0] dense_shape=[100, 3]) ``` with an `Example` input proto ```python features { feature { key: ""val"" value { float_list { value: [ 0.5, -1.0 ] } } } feature { key: ""ix0"" value { int64_list { value: [ 3, 20 ] } } } feature { key: ""ix1"" value { int64_list { value: [ 1, 0 ] } } } } ``` and `SparseFeature` config with 2 `index_key`s ```python SparseFeature(index_key=[""ix0"", ""ix1""], value_key=""val"", dtype=tf.float32, size=[100, 3]) ``` Fields: index_key: A single string name or a list of string names of index features. For each key the underlying feature's type must be `int64` and its length must always match that of the `value_key` feature. To represent `SparseTensor`s with a `dense_shape` of `rank` higher than 1 a list of length `rank` should be used. value_key: Name of value feature. The underlying feature's type must be `dtype` and its length must always match that of all the `index_key`s' features. dtype: Data type of the `value_key` feature. size: A Python int or list thereof specifying the dense shape. Should be a list if and only if `index_key` is a list. In that case the list must be equal to the length of `index_key`. Each for each entry `i` all values in the `index_key`[i] feature must be in `[0, size[i])`. already_sorted: A Python boolean to specify whether the values in `value_key` are already sorted by their index position. If so skip sorting. False by default (optional)." 9687,FixedLenFeature,tensorflow/tensorflow/python/ops/parsing_config.py,299,class,"Configuration for parsing a fixed-length input feature. To treat sparse input as dense, provide a `default_value`; otherwise, the parse functions will fail on any examples missing this feature. Fields: shape: Shape of input data. dtype: Data type of input. default_value: Value to be used if an example is missing this feature. It must be compatible with `dtype` and of the specified `shape`." 9688,FixedLenSequenceFeature,tensorflow/tensorflow/python/ops/parsing_config.py,320,class,"Configuration for parsing a variable-length input feature into a `Tensor`. The resulting `Tensor` of parsing a single `SequenceExample` or `Example` has a static `shape` of `[None] + shape` and the specified `dtype`. The resulting `Tensor` of parsing a `batch_size` many `Example`s has a static `shape` of `[batch_size, None] + shape` and the specified `dtype`. The entries in the `batch` from different `Examples` will be padded with `default_value` to the maximum length present in the `batch`. To treat a sparse input as dense, provide `allow_missing=True`; otherwise, the parse functions will fail on any examples missing this feature. Fields: shape: Shape of input data for dimension 2 and higher. First dimension is of variable length `None`. dtype: Data type of input. allow_missing: Whether to allow this feature to be missing from a feature list item. Is available only for parsing `SequenceExample` not for parsing `Examples`. default_value: Scalar value to be used to pad multiple `Example`s to their maximum length. Irrelevant for parsing a single `Example` or `SequenceExample`. Defaults to """" for dtype string and 0 otherwise (optional)." 9689,_ParseOpParams,tensorflow/tensorflow/python/ops/parsing_config.py,353,class,"Raw parameters used by `gen_parsing_ops`. Attributes: sparse_keys: A list of string keys in the examples' features. The results for these keys will be returned as `SparseTensor` objects. sparse_types: A list of `DTypes` of the same length as `sparse_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. dense_keys: A list of string keys in the examples' features. The results for these keys will be returned as `Tensor`s dense_types: A list of DTypes of the same length as `dense_keys`. Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported. dense_defaults: A dict mapping string keys to `Tensor`s. The keys of the dict must match the dense_keys of the feature. dense_shapes: A list of tuples with the same length as `dense_keys`. The shape of the data for each dense feature referenced by `dense_keys`. Required for any input tensors identified by `dense_keys`. Must be either fully defined, or may contain an unknown first dimension. An unknown first dimension means the feature is treated as having a variable number of blocks, and the output shape along this dimension is considered unknown at graph build time. Padding is applied for minibatch elements smaller than the maximum number of blocks for the given feature along this dimension. ragged_keys: A list of string keys in the examples' features. The results for these keys will be returned as `RaggedTensor` objects. ragged_value_types: A list of `DTypes` of the same length as `ragged_keys`, specifying the value type for each ragged feature. Must be one of: `tf.float32`, `tf.int64`, `tf.string`. ragged_split_types: A list of `DTypes` of the same length as `ragged_keys`, specifying the row_splits type for each ragged feature. Must be one of: `tf.int32`, `tf.int64`. dense_shapes_as_proto: dense_shapes converted to TensorShapeProto. dense_defaults_vec: A vector of `Tensor`s containing the default values, corresponding 1:1 with `dense_keys`. num_features: The total number of feature keys." 9690,_construct_tensors_for_composite_features,tensorflow/tensorflow/python/ops/parsing_config.py,661,function,"Creates tensors for SparseFeatures and RaggedFeatures. Constructs new dict based on `tensor_dict`. For each key in `features` whose value is a `SparseFeature`: * Looks up that SparseFeature's value_key and index_keys in tensor_dict. * Uses those tensors to construct a single SparseTensor. * Stores that SparseTensor in the output dict under the same key. For each key in `features` whose value is a `RaggedFeature`: * Looks up that RaggedFeature's value_key and partition keys in tensor_dict. * Uses those tensors to construct a single RaggedTensor. * Stores that RaggedTensor in the output dict under the same key. For any other key in `features`: * Copies that key and its value from tensor_dict to the output dictionary. Args: features: A `dict` mapping feature keys to `SparseFeature` or `RaggedFeature` values. Values of other types will be ignored. tensor_dict: A `dict` mapping feature keys to `Tensor`, `SparseTensor`, and `RaggedTensor` values. Expected to contain keys of the `SparseFeature`s' `index_key`s and `value_key`s and mapping them to `SparseTensor`s. Returns: A `dict` mapping feature keys to `Tensor`, `SparseTensor`, and `RaggedTensor` values. Similar to `tensor_dict` except each `SparseFeature` in `features` results in a single `SparseTensor`; and each `RaggedFeature` in `features` results in a single `RaggedTensor`." 9691,_add_ragged_partition,tensorflow/tensorflow/python/ops/parsing_config.py,752,function,"Creates a RaggedTensor from a values tensor and a partition tensor. Args: values: The values tensor for the new RaggedTensor. partition: The partition configuration object. Specifies the key that should be used to look up the partition tensor (unless partition is a RaggedFeature.UniformRowLength, in which case there is no partition tensor). tensor_dict: The dictionary mapping keys to tensors. row_splits_dtype: The dtype for the partition tensor. validate: Whether to validate that the values form a valid RaggedTensor. Returns: A new RaggedTensor formed from the values and partition tensors." 9692,_add_batched_ragged_partition,tensorflow/tensorflow/python/ops/parsing_config.py,797,function,"Adds a batched ragged partition tensor to a batched ragged tensor. Args: rt: A RaggedTensor with shape [batch_size, ...]. partition: The partition configuration object. Specifies the key that should be used to look up the partition tensor (unless partition is a RaggedFeature.UniformRowLength, in which case there is no partition tensor). The specified tensor must have shape [batch_size, ...]. tensor_dict: The dictionary mapping keys to tensors. feature_key: The name of the feature being parsed (for error messages). validate: Whether to validate that the values form a valid RaggedTensor. outer_splits: If not None, then we have two batch dimensions, and this is the row-splits for the collapsed batch dimension. Every partition tensor must have an outer row_splits that matches this value. Returns: A new RaggedTensor where each batch item `rt[i]` has been partitioned using the `partition_t[i]`." 9693,_build_ragged_tensors,tensorflow/tensorflow/python/ops/parsing_config.py,879,function,Builds RaggedTensors from the outputs of a parse op. 9694,_prepend_none_dimension,tensorflow/tensorflow/python/ops/parsing_ops.py,61,function,Returns a copy of features with adjusted FixedLenSequenceFeature shapes. 9695,parse_example_v2,tensorflow/tensorflow/python/ops/parsing_ops.py,82,function,"Parses `Example` protos into a `dict` of tensors. Parses a number of serialized [`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) protos given in `serialized`. We refer to `serialized` as a batch with `batch_size` many entries of individual `Example` protos. `example_names` may contain descriptive names for the corresponding serialized protos. These may be useful for debugging purposes, but they have no effect on the output. If not `None`, `example_names` must be the same length as `serialized`. This op parses serialized examples into a dictionary mapping keys to `Tensor` `SparseTensor`, and `RaggedTensor` objects. `features` is a dict from keys to `VarLenFeature`, `SparseFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` and `SparseFeature` is mapped to a `SparseTensor`; each `FixedLenFeature` is mapped to a `Tensor`; and each `RaggedFeature` is mapped to a `RaggedTensor`. Each `VarLenFeature` maps to a `SparseTensor` of the specified type representing a ragged matrix. Its indices are `[batch, index]` where `batch` identifies the example in `serialized`, and `index` is the value's index in the list of values associated with that feature and example. Each `SparseFeature` maps to a `SparseTensor` of the specified type representing a Tensor of `dense_shape` `[batch_size] + SparseFeature.size`. Its `values` come from the feature in the examples with key `value_key`. A `values[i]` comes from a position `k` in the feature of an example at batch entry `batch`. This positional information is recorded in `indices[i]` as `[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of the feature in the example at with key `SparseFeature.index_key[j]`. In other words, we split the indices (except the first index indicating the batch entry) of a `SparseTensor` by dimension into different features of the `Example`. Due to its complexity a `VarLenFeature` should be preferred over a `SparseFeature` whenever possible. Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or `tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`. `FixedLenFeature` entries with a `default_value` are optional. With no default value, we will fail if that `Feature` is missing from any example in `serialized`. Each `FixedLenSequenceFeature` `df` maps to a `Tensor` of the specified type (or `tf.float32` if not specified) and shape `(serialized.size(), None) + df.shape`. All examples in `serialized` will be padded with `default_value` along the second dimension. Each `RaggedFeature` maps to a `RaggedTensor` of the specified type. It is formed by stacking the `RaggedTensor` for each example, where the `RaggedTensor` for each individual example is constructed using the tensors specified by `RaggedTensor.values_key` and `RaggedTensor.partition`. See the `tf.io.RaggedFeature` documentation for details and examples. Examples: For example, if one expects a `tf.float32` `VarLenFeature` `ft` and three serialized `Example`s are provided: ``` serialized = [ features { feature { key: ""ft"" value { float_list { value: [1.0, 2.0] } } } }, features { feature []}, features { feature { key: ""ft"" value { float_list { value: [3.0] } } } ] ``` then the output will look like: ```python {""ft"": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]], values=[1.0, 2.0, 3.0], dense_shape=(3, 2)) } ``` If instead a `FixedLenSequenceFeature` with `default_value = -1.0` and `shape=[]` is used then the output will look like: ```python {""ft"": [[1.0, 2.0], [3.0, -1.0]]} ``` Given two `Example` input protos in `serialized`: ``` [ features { feature { key: ""kw"" value { bytes_list { value: [ ""knit"", ""big"" ] } } } feature { key: ""gps"" value { float_list { value: [] } } } }, features { feature { key: ""kw"" value { bytes_list { value: [ ""emmy"" ] } } } feature { key: ""dank"" value { int64_list { value: [ 42 ] } } } feature { key: ""gps"" value { } } } ] ``` And arguments ``` example_names: [""input0"", ""input1""], features: { ""kw"": VarLenFeature(tf.string), ""dank"": VarLenFeature(tf.int64), ""gps"": VarLenFeature(tf.float32), } ``` Then the output is a dictionary: ```python { ""kw"": SparseTensor( indices=[[0, 0], [0, 1], [1, 0]], values=[""knit"", ""big"", ""emmy""] dense_shape=[2, 2]), ""dank"": SparseTensor( indices=[[1, 0]], values=[42], dense_shape=[2, 1]), ""gps"": SparseTensor( indices=[], values=[], dense_shape=[2, 0]), } ``` For dense results in two serialized `Example`s: ``` [ features { feature { key: ""age"" value { int64_list { value: [ 0 ] } } } feature { key: ""gender"" value { bytes_list { value: [ ""f"" ] } } } }, features { feature { key: ""age"" value { int64_list { value: [] } } } feature { key: ""gender"" value { bytes_list { value: [ ""f"" ] } } } } ] ``` We can use arguments: ``` example_names: [""input0"", ""input1""], features: { ""age"": FixedLenFeature([], dtype=tf.int64, default_value=-1), ""gender"": FixedLenFeature([], dtype=tf.string), } ``` And the expected output is: ```python { ""age"": [[0], [-1]], ""gender"": [[""f""], [""f""]], } ``` An alternative to `VarLenFeature` to obtain a `SparseTensor` is `SparseFeature`. For example, given two `Example` input protos in `serialized`: ``` [ features { feature { key: ""val"" value { float_list { value: [ 0.5, -1.0 ] } } } feature { key: ""ix"" value { int64_list { value: [ 3, 20 ] } } } }, features { feature { key: ""val"" value { float_list { value: [ 0.0 ] } } } feature { key: ""ix"" value { int64_list { value: [ 42 ] } } } } ] ``` And arguments ``` example_names: [""input0"", ""input1""], features: { ""sparse"": SparseFeature( index_key=""ix"", value_key=""val"", dtype=tf.float32, size=100), } ``` Then the output is a dictionary: ```python { ""sparse"": SparseTensor( indices=[[0, 3], [0, 20], [1, 42]], values=[0.5, -1.0, 0.0] dense_shape=[2, 100]), } ``` See the `tf.io.RaggedFeature` documentation for examples showing how `RaggedFeature` can be used to obtain `RaggedTensor`s. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. features: A `dict` mapping feature keys to `FixedLenFeature`, `VarLenFeature`, `SparseFeature`, and `RaggedFeature` values. example_names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos in the batch. name: A name for this operation (optional). Returns: A `dict` mapping feature keys to `Tensor`, `SparseTensor`, and `RaggedTensor` values. Raises: ValueError: if any feature is invalid." 9696,parse_example,tensorflow/tensorflow/python/ops/parsing_ops.py,320,function, 9697,_parse_example_raw,tensorflow/tensorflow/python/ops/parsing_ops.py,327,function,"Parses `Example` protos. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. params: A `ParseOpParams` containing the parameters for the parse op. name: A name for this operation (optional). Returns: A `dict` mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s." 9698,parse_single_example,tensorflow/tensorflow/python/ops/parsing_ops.py,380,function,"Parses a single `Example` proto. Similar to `parse_example`, except: For dense tensors, the returned `Tensor` is identical to the output of `parse_example`, except there is no batch dimension, the output shape is the same as the shape given in `dense_shape`. For `SparseTensor`s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first (`batch_size`) entry of the shape vector is removed (it is now a single element vector). One might see performance advantages by batching `Example` protos with `parse_example` instead of using this function directly. Args: serialized: A scalar string Tensor, a single serialized Example. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. name: A name for this operation (optional). example_names: (Optional) A scalar string Tensor, the associated name. Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid." 9699,parse_single_example_v2,tensorflow/tensorflow/python/ops/parsing_ops.py,415,function,"Parses a single `Example` proto. Similar to `parse_example`, except: For dense tensors, the returned `Tensor` is identical to the output of `parse_example`, except there is no batch dimension, the output shape is the same as the shape given in `dense_shape`. For `SparseTensor`s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first (`batch_size`) entry of the shape vector is removed (it is now a single element vector). One might see performance advantages by batching `Example` protos with `parse_example` instead of using this function directly. Args: serialized: A scalar string Tensor, a single serialized Example. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. example_names: (Optional) A scalar string Tensor, the associated name. name: A name for this operation (optional). Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid." 9700,parse_sequence_example,tensorflow/tensorflow/python/ops/parsing_ops.py,457,function,"Parses a batch of `SequenceExample` protos. Parses a vector of serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) protos given in `serialized`. This op parses serialized sequence examples into a tuple of dictionaries, each mapping keys to `Tensor` and `SparseTensor` objects. The first dictionary contains mappings for keys appearing in `context_features`, and the second dictionary contains mappings for keys appearing in `sequence_features`. At least one of `context_features` and `sequence_features` must be provided and non-empty. The `context_features` keys are associated with a `SequenceExample` as a whole, independent of time / frame. In contrast, the `sequence_features` keys provide a way to access variable-length data within the `FeatureList` section of the `SequenceExample` proto. While the shapes of `context_features` values are fixed with respect to frame, the frame dimension (the first dimension) of `sequence_features` values may vary between `SequenceExample` protos, and even between `feature_list` keys within the same `SequenceExample`. `context_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenFeature` is mapped to a `Tensor`, of the specified type, shape, and default value. `sequence_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenSequenceFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor; and each `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type. The shape will be `(B,T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where `B` is the batch size, and `T` is the length of the associated `FeatureList` in the `SequenceExample`. For instance, `FixedLenSequenceFeature([])` yields a scalar 2-D `Tensor` of static shape `[None, None]` and dynamic shape `[B, T]`, while `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 3-D matrix `Tensor` of static shape `[None, None, k]` and dynamic shape `[B, T, k]`. Like the input, the resulting output tensors have a batch dimension. This means that the original per-example shapes of `VarLenFeature`s and `FixedLenSequenceFeature`s can be lost. To handle that situation, this op also provides dicts of shape tensors as part of the output. There is one dict for the context features, and one for the feature_list features. Context features of type `FixedLenFeature`s will not be present, since their shapes are already known by the caller. In situations where the input 'FixedLenFeature`s are of different lengths across examples, the shorter examples will be padded with default datatype values: 0 for numeric types, and the empty string for string types. Each `SparseTensor` corresponding to `sequence_features` represents a ragged vector. Its indices are `[time, index]`, where `time` is the `FeatureList` entry and `index` is the value's index in the list of values associated with that time. `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature` entries with `allow_missing=True` are optional; otherwise, we will fail if that `Feature` or `FeatureList` is missing from any example in `serialized`. `example_name` may contain a descriptive name for the corresponding serialized proto. This may be useful for debugging purposes, but it has no effect on the output. If not `None`, `example_name` must be a scalar. Args: serialized: A vector (1-D Tensor) of type string containing binary serialized `SequenceExample` protos. context_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with a `SequenceExample` as a whole. sequence_features: A `dict` mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with data within the `FeatureList` section of the `SequenceExample` proto. example_names: A vector (1-D Tensor) of strings (optional), the name of the serialized protos. name: A name for this operation (optional). Returns: A tuple of three `dict`s, each mapping keys to `Tensor`s, `SparseTensor`s, and `RaggedTensor`. The first dict contains the context key/values, the second dict contains the feature_list key/values, and the final dict contains the lengths of any dense feature_list features. Raises: ValueError: if any feature is invalid." 9701,_parse_sequence_example_raw,tensorflow/tensorflow/python/ops/parsing_ops.py,576,function,"Parses a vector of `SequenceExample` protos. Args: serialized: A vector (1-D Tensor) of type string, containing binary serialized `SequenceExample` protos. debug_name: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. context: A `ParseOpParams` containing the parameters for the parse op for the context features. feature_list: A `ParseOpParams` containing the parameters for the parse op for the feature_list features. name: A name for this operation (optional). Returns: A tuple of three `dict`s, each mapping keys to `Tensor`s, `SparseTensor`s, and `RaggedTensor`s. The first dict contains the context key/values, the second dict contains the feature_list key/values, and the final dict contains the lengths of any dense feature_list features. Raises: TypeError: if feature_list.dense_defaults is not either None or a dict." 9702,parse_single_sequence_example,tensorflow/tensorflow/python/ops/parsing_ops.py,702,function,"Parses a single `SequenceExample` proto. Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) proto given in `serialized`. This op parses a serialized sequence example into a tuple of dictionaries, each mapping keys to `Tensor` and `SparseTensor` objects. The first dictionary contains mappings for keys appearing in `context_features`, and the second dictionary contains mappings for keys appearing in `sequence_features`. At least one of `context_features` and `sequence_features` must be provided and non-empty. The `context_features` keys are associated with a `SequenceExample` as a whole, independent of time / frame. In contrast, the `sequence_features` keys provide a way to access variable-length data within the `FeatureList` section of the `SequenceExample` proto. While the shapes of `context_features` values are fixed with respect to frame, the frame dimension (the first dimension) of `sequence_features` values may vary between `SequenceExample` protos, and even between `feature_list` keys within the same `SequenceExample`. `context_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenFeature` is mapped to a `Tensor`, of the specified type, shape, and default value. `sequence_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenSequenceFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type. The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where `T` is the length of the associated `FeatureList` in the `SequenceExample`. For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of static shape `[None]` and dynamic shape `[T]`, while `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor` of static shape `[None, k]` and dynamic shape `[T, k]`. Each `SparseTensor` corresponding to `sequence_features` represents a ragged vector. Its indices are `[time, index]`, where `time` is the `FeatureList` entry and `index` is the value's index in the list of values associated with that time. `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature` entries with `allow_missing=True` are optional; otherwise, we will fail if that `Feature` or `FeatureList` is missing from any example in `serialized`. `example_name` may contain a descriptive name for the corresponding serialized proto. This may be useful for debugging purposes, but it has no effect on the output. If not `None`, `example_name` must be a scalar. Note that the batch version of this function, `tf.parse_sequence_example`, is written for better memory efficiency and will be faster on large `SequenceExample`s. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with a `SequenceExample` as a whole. sequence_features: A `dict` mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with data within the `FeatureList` section of the `SequenceExample` proto. example_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s. * The first dict contains the context key/values. * The second dict contains the feature_list key/values. Raises: ValueError: if any feature is invalid." 9703,_parse_single_sequence_example_raw,tensorflow/tensorflow/python/ops/parsing_ops.py,811,function,"Parses a single `SequenceExample` proto. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context: A `ParseOpParams` containing the parameters for the parse op for the context features. feature_list: A `ParseOpParams` containing the parameters for the parse op for the feature_list features. debug_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s. The first dict contains the context key/values. The second dict contains the feature_list key/values. Raises: TypeError: if feature_list.dense_defaults is not either None or a dict." 9704,decode_raw,tensorflow/tensorflow/python/ops/parsing_ops.py,846,function,"Convert raw byte strings into tensors. Args: input_bytes: Each element of the input Tensor is converted to an array of bytes. out_type: `DType` of the output. Acceptable types are `half`, `float`, `double`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`. little_endian: Whether the `input_bytes` data is in little-endian format. Data will be converted into host byte order if necessary. fixed_length: If set, the first `fixed_length` bytes of each element will be converted. Data will be zero-padded or truncated to the specified length. `fixed_length` must be a multiple of the size of `out_type`. `fixed_length` must be specified if the elements of `input_bytes` are of variable length. name: A name for the operation (optional). Returns: A `Tensor` object storing the decoded bytes." 9705,decode_raw_v1,tensorflow/tensorflow/python/ops/parsing_ops.py,892,function,"Convert raw byte strings into tensors. Args: input_bytes: Each element of the input Tensor is converted to an array of bytes. out_type: `DType` of the output. Acceptable types are `half`, `float`, `double`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`. little_endian: Whether the `input_bytes` data is in little-endian format. Data will be converted into host byte order if necessary. name: A name for the operation (optional). bytes: Deprecated parameter. Use `input_bytes` instead. Returns: A `Tensor` object storing the decoded bytes." 9706,decode_csv,tensorflow/tensorflow/python/ops/parsing_ops.py,935,function,"Convert CSV records to tensors. Each column maps to one tensor. RFC 4180 format is expected for the CSV records. (https://tools.ietf.org/html/rfc4180) Note that we allow leading and trailing spaces with int or float field. Args: records: A `Tensor` of type `string`. Each string is a record/row in the csv and all records should have the same format. record_defaults: A list of `Tensor` objects with specific types. Acceptable types are `float32`, `float64`, `int32`, `int64`, `string`. One tensor per column of the input record, with either a scalar default value for that column or an empty vector if the column is required. field_delim: An optional `string`. Defaults to `"",""`. char delimiter to separate fields in a record. use_quote_delim: An optional `bool`. Defaults to `True`. If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). name: A name for the operation (optional). na_value: Additional string to recognize as NA/NaN. select_cols: Optional sorted list of column indices to select. If specified, only this subset of columns will be parsed and returned. Returns: A list of `Tensor` objects. Has the same type as `record_defaults`. Each tensor will have the same shape as records. Raises: ValueError: If any of the arguments is malformed." 9707,decode_csv_v2,tensorflow/tensorflow/python/ops/parsing_ops.py,984,function,"Convert CSV records to tensors. Each column maps to one tensor. RFC 4180 format is expected for the CSV records. (https://tools.ietf.org/html/rfc4180) Note that we allow leading and trailing spaces with int or float field. Args: records: A `Tensor` of type `string`. Each string is a record/row in the csv and all records should have the same format. record_defaults: A list of `Tensor` objects with specific types. Acceptable types are `float32`, `float64`, `int32`, `int64`, `string`. One tensor per column of the input record, with either a scalar default value for that column or an empty vector if the column is required. field_delim: An optional `string`. Defaults to `"",""`. char delimiter to separate fields in a record. use_quote_delim: An optional `bool`. Defaults to `True`. If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). na_value: Additional string to recognize as NA/NaN. select_cols: Optional sorted list of column indices to select. If specified, only this subset of columns will be parsed and returned. name: A name for the operation (optional). Returns: A list of `Tensor` objects. Has the same type as `record_defaults`. Each tensor will have the same shape as records. Raises: ValueError: If any of the arguments is malformed." 9708,_assert_scalar,tensorflow/tensorflow/python/ops/parsing_ops.py,1042,function,"Asserts that `value` is scalar, and returns `value`." 9709,variable_axis_size_partitioner,tensorflow/tensorflow/python/ops/partitioned_variables.py,72,function,"Get a partitioner for VariableScope to keep shards below `max_shard_bytes`. This partitioner will shard a Variable along one axis, attempting to keep the maximum shard size below `max_shard_bytes`. In practice, this is not always possible when sharding along only one axis. When this happens, this axis is sharded as much as possible (i.e., every dimension becomes a separate shard). If the partitioner hits the `max_shards` limit, then each shard may end up larger than `max_shard_bytes`. By default `max_shards` equals `None` and no limit on the number of shards is enforced. One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost `64MB`, to keep below the protobuf byte limit. Args: max_shard_bytes: The maximum size any given shard is allowed to be. axis: The axis to partition along. Default: outermost axis. bytes_per_string_element: If the `Variable` is of type string, this provides an estimate of how large each scalar in the `Variable` is. max_shards: The maximum number of shards in int created taking precedence over `max_shard_bytes`. Returns: A partition function usable as the `partitioner` argument to `variable_scope` and `get_variable`. Raises: ValueError: If any of the byte counts are non-positive." 9710,min_max_variable_partitioner,tensorflow/tensorflow/python/ops/partitioned_variables.py,158,function,"Partitioner to allocate minimum size per slice. Returns a partitioner that partitions the variable of given shape and dtype such that each partition has a minimum of `min_slice_size` slice of the variable. The maximum number of such partitions (upper bound) is given by `max_partitions`. Args: max_partitions: Upper bound on the number of partitions. Defaults to 1. axis: Axis along which to partition the variable. Defaults to 0. min_slice_size: Minimum size of the variable slice per partition. Defaults to 256K. bytes_per_string_element: If the `Variable` is of type string, this provides an estimate of how large each scalar in the `Variable` is. Returns: A partition function usable as the `partitioner` argument to `variable_scope` and `get_variable`." 9711,fixed_size_partitioner,tensorflow/tensorflow/python/ops/partitioned_variables.py,222,function,"Partitioner to specify a fixed number of shards along given axis. Args: num_shards: `int`, number of shards to partition variable. axis: `int`, axis to partition on. Returns: A partition function usable as the `partitioner` argument to `variable_scope` and `get_variable`." 9712,create_partitioned_variables,tensorflow/tensorflow/python/ops/partitioned_variables.py,244,function,"Create a list of partitioned variables according to the given `slicing`. Currently only one dimension of the full variable can be sliced, and the full variable can be reconstructed by the concatenation of the returned list along that dimension. Args: shape: List of integers. The shape of the full variable. slicing: List of integers. How to partition the variable. Must be of the same length as `shape`. Each value indicate how many slices to create in the corresponding dimension. Presently only one of the values can be more than 1; that is, the variable can only be sliced along one dimension. For convenience, The requested number of partitions does not have to divide the corresponding dimension evenly. If it does not, the shapes of the partitions are incremented by 1 starting from partition 0 until all slack is absorbed. The adjustment rules may change in the future, but as you can save/restore these variables with different slicing specifications this should not be a problem. initializer: A `Tensor` of shape `shape` or a variable initializer function. If a function, it will be called once for each slice, passing the shape and data type of the slice as parameters. The function must return a tensor with the same shape as the slice. dtype: Type of the variables. Ignored if `initializer` is a `Tensor`. trainable: If True also add all the variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. collections: List of graph collections keys to add the variables to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. name: Optional name for the full variable. Defaults to `""PartitionedVariable""` and gets uniquified automatically. reuse: Boolean or `None`; if `True` and name is set, it would reuse previously created variables. if `False` it will create new variables. if `None`, it would inherit the parent scope reuse. Returns: A list of Variables corresponding to the slicing. Raises: ValueError: If any of the arguments is malformed." 9713,Conv2DTest,tensorflow/tensorflow/python/ops/quantized_conv_ops_test.py,29,class, 9714,QuantizedOpsTest,tensorflow/tensorflow/python/ops/quantized_ops_test.py,29,class, 9715,add_leading_unit_dimensions,tensorflow/tensorflow/python/ops/random_grad.py,33,function, 9716,_RandomGammaGrad,tensorflow/tensorflow/python/ops/random_grad.py,41,function,"Returns the gradient of a Gamma sample w.r.t. alpha. The gradient is computed using implicit differentiation (Figurnov et al., 2018). Args: op: A `RandomGamma` operation. We assume that the inputs to the operation are `shape` and `alpha` tensors, and the output is the `sample` tensor. grad: The incoming gradient `dloss / dsample` of the same shape as `op.outputs[0]`. Returns: A `Tensor` with derivatives `dloss / dalpha`. References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) ([pdf] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))" 9717,_StatelessRandomGammaV2Grad,tensorflow/tensorflow/python/ops/random_grad.py,81,function,"Returns the gradient of a Gamma sample w.r.t. alpha. The gradient is computed using implicit differentiation (Figurnov et al., 2018). Args: op: A `StatelessRandomGamma` operation. We assume that the inputs to the operation are `shape`, `seed` and `alpha` tensors, and the output is the `sample` tensor. grad: The incoming gradient `dloss / dsample` of the same shape as `op.outputs[0]`. Returns: A `Tensor` with derivatives `dloss / dalpha`. References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) ([pdf] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))" 9718,_Ndtr,tensorflow/tensorflow/python/ops/random_grad.py,124,function,Normal distribution function. 9719,_StatelessParameterizedTruncatedNormalGrad,tensorflow/tensorflow/python/ops/random_grad.py,139,function,"Returns the gradient of a TruncatedNormal sample w.r.t. parameters. The gradient is computed using implicit differentiation (Figurnov et al., 2018). Args: op: A `StatelessParameterizedTruncatedNormal` operation. We assume that the inputs to the operation are `shape`, `seed`, `mean`, `stddev`, `minval`, and `maxval` tensors, and the output is the `sample` tensor. grad: The incoming gradient `dloss / dsample` of the same shape as `op.outputs[0]`. Returns: A list of `Tensor` with derivates with respect to each parameter. References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) ([pdf] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))" 9720,random_normal,tensorflow/tensorflow/python/ops/random_ops.py,46,function,"Outputs random values from a normal distribution. Example that generates a new set of random values every time: >>> tf.random.set_seed(5); >>> tf.random.normal([4], 0, 1, tf.float32) Example that outputs a reproducible result: >>> tf.random.set_seed(5); >>> tf.random.normal([2,2], 0, 1, tf.float32, seed=1) In this case, we are setting both the global and operation-level seed to ensure this result is reproducible. See `tf.random.set_seed` for more information. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A Tensor or Python value of type `dtype`, broadcastable with `stddev`. The mean of the normal distribution. stddev: A Tensor or Python value of type `dtype`, broadcastable with `mean`. The standard deviation of the normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See `tf.random.set_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values." 9721,parameterized_truncated_normal,tensorflow/tensorflow/python/ops/random_ops.py,104,function,"Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. means: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddevs: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the truncated normal distribution. minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of the truncated normal distribution. maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of the truncated normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See `tf.random.set_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values." 9722,truncated_normal,tensorflow/tensorflow/python/ops/random_ops.py,162,function,"Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution, before truncation. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See `tf.random.set_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values." 9723,random_uniform,tensorflow/tensorflow/python/ops/random_ops.py,210,function,"Outputs random values from a uniform distribution. The generated values follow a uniform distribution in the range `[minval, maxval)`. The lower bound `minval` is included in the range, while the upper bound `maxval` is excluded. For floats, the default range is `[0, 1)`. For ints, at least `maxval` must be specified explicitly. In the integer case, the random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2**32` or `2**64`). Examples: >>> tf.random.uniform(shape=[2]) >>> tf.random.uniform(shape=[], minval=-1., maxval=0.) >>> tf.random.uniform(shape=[], minval=5, maxval=10, dtype=tf.int64) The `seed` argument produces a deterministic sequence of tensors across multiple calls. To repeat that sequence, use `tf.random.set_seed`: >>> tf.random.set_seed(5) >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10) >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10) >>> tf.random.set_seed(5) >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10) >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10) Without `tf.random.set_seed` but with a `seed` argument is specified, small changes to function graphs or previously executed operations will change the returned value. See `tf.random.set_seed` for details. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. minval: A Tensor or Python value of type `dtype`, broadcastable with `shape` (for integer types, broadcasting is not supported, so it needs to be a scalar). The lower bound on the range of random values to generate (inclusive). Defaults to 0. maxval: A Tensor or Python value of type `dtype`, broadcastable with `shape` (for integer types, broadcasting is not supported, so it needs to be a scalar). The upper bound on the range of random values to generate (exclusive). Defaults to 1 if `dtype` is floating point. dtype: The type of the output: `float16`, `float32`, `float64`, `int32`, or `int64`. seed: A Python integer. Used in combination with `tf.random.set_seed` to create a reproducible sequence of tensors across multiple calls. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random uniform values. Raises: ValueError: If `dtype` is integral and `maxval` is not specified." 9724,random_shuffle,tensorflow/tensorflow/python/ops/random_ops.py,322,function,"Randomly shuffles a tensor along its first dimension. The tensor is shuffled along dimension 0, such that each `value[j]` is mapped to one and only one `output[i]`. For example, a mapping that might occur for a 3x2 tensor is: ```python [[1, 2], [[5, 6], [3, 4], ==> [1, 2], [5, 6]] [3, 4]] ``` Args: value: A Tensor to be shuffled. seed: A Python integer. Used to create a random seed for the distribution. See `tf.random.set_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of same shape and type as `value`, shuffled along its first dimension." 9725,random_crop,tensorflow/tensorflow/python/ops/random_ops.py,355,function,"Randomly crops a tensor to a given size. Slices a shape `size` portion out of `value` at a uniformly chosen offset. Requires `value.shape >= size`. If a dimension should not be cropped, pass the full size of that dimension. For example, RGB images can be cropped with `size = [crop_height, crop_width, 3]`. Args: value: Input tensor to crop. size: 1-D tensor with size the rank of `value`. seed: Python integer. Used to create a random seed. See `tf.random.set_seed` for behavior. name: A name for this operation (optional). Returns: A cropped tensor of the same rank as `value` and shape `size`." 9726,multinomial,tensorflow/tensorflow/python/ops/random_ops.py,401,function,"Draws samples from a multinomial distribution. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 5) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A Python integer. Used to create a random seed for the distribution. See `tf.random.set_seed` for behavior. name: Optional name for the operation. output_dtype: integer type to use for the output. Defaults to int64. Returns: The drawn samples of shape `[batch_size, num_samples]`." 9727,categorical,tensorflow/tensorflow/python/ops/random_ops.py,429,function,"Draws samples from a categorical distribution. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 5) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. dtype: integer type to use for the output. Defaults to int64. seed: A Python integer. Used to create a random seed for the distribution. See `tf.random.set_seed` for behavior. name: Optional name for the operation. Returns: The drawn samples of shape `[batch_size, num_samples]`." 9728,multinomial_categorical_impl,tensorflow/tensorflow/python/ops/random_ops.py,456,function,Implementation for random.categorical (v1) and random.categorical (v2). 9729,_maybe_set_static_shape_helper,tensorflow/tensorflow/python/ops/random_ops.py,467,function, 9730,random_gamma,tensorflow/tensorflow/python/ops/random_ops.py,480,function,"Draws `shape` samples from each of the given Gamma distribution(s). `alpha` is the shape parameter describing the distribution(s), and `beta` is the inverse scale parameter(s). Note: Because internal calculations are done using `float64` and casting has `floor` semantics, we must manually map zero outcomes to the smallest possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise should. This bias can only happen for small values of `alpha`, i.e., `alpha << 1` or large values of `beta`, i.e., `beta >> 1`. The samples are differentiable w.r.t. alpha and beta. The derivatives are computed using the approach described in (Figurnov et al., 2018). Example: ```python samples = tf.random.gamma([10], [0.5, 1.5]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random.gamma([7, 5], [0.5, 1.5]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions alpha = tf.constant([[1.],[3.],[5.]]) beta = tf.constant([[3., 4.]]) samples = tf.random.gamma([30], alpha=alpha, beta=beta) # samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions. loss = tf.reduce_mean(tf.square(samples)) dloss_dalpha, dloss_dbeta = tf.gradients(loss, [alpha, beta]) # unbiased stochastic derivatives of the loss function alpha.shape == dloss_dalpha.shape # True beta.shape == dloss_dbeta.shape # True ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per alpha/beta-parameterized distribution. alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha` provides the shape parameter(s) describing the gamma distribution(s) to sample. Must be broadcastable with `beta`. beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1. `beta` provides the inverse scale parameter(s) of the gamma distribution(s) to sample. Must be broadcastable with `alpha`. dtype: The type of alpha, beta, and the output: `float16`, `float32`, or `float64`. seed: A Python integer. Used to create a random seed for the distributions. See `tf.random.set_seed` for behavior. name: Optional name for the operation. Returns: samples: a `Tensor` of shape `tf.concat([shape, tf.shape(alpha + beta)], axis=0)` with values of type `dtype`. References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) ([pdf] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))" 9731,random_poisson,tensorflow/tensorflow/python/ops/random_ops.py,574,function,"Draws `shape` samples from each of the given Poisson distribution(s). `lam` is the rate parameter describing the distribution(s). Example: ```python samples = tf.random.poisson([0.5, 1.5], [10]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random.poisson([12.2, 3.3], [7, 5]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions ``` Args: lam: A Tensor or Python value or N-D array of type `dtype`. `lam` provides the rate parameter(s) describing the poisson distribution(s) to sample. shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per ""rate""-parameterized distribution. dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or `int64`. seed: A Python integer. Used to create a random seed for the distributions. See `tf.random.set_seed` for behavior. name: Optional name for the operation. Returns: samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)` with values of type `dtype`." 9732,random_poisson_v2,tensorflow/tensorflow/python/ops/random_ops.py,614,function,"Draws `shape` samples from each of the given Poisson distribution(s). `lam` is the rate parameter describing the distribution(s). Example: ```python samples = tf.random.poisson([10], [0.5, 1.5]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random.poisson([7, 5], [12.2, 3.3]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per ""rate""-parameterized distribution. lam: A Tensor or Python value or N-D array of type `dtype`. `lam` provides the rate parameter(s) describing the poisson distribution(s) to sample. dtype: The type of the output: `float16`, `float32`, `float64`, `int32` or `int64`. seed: A Python integer. Used to create a random seed for the distributions. See `tf.random.set_seed` for behavior. name: Optional name for the operation. Returns: samples: a `Tensor` of shape `tf.concat([shape, tf.shape(lam)], axis=0)` with values of type `dtype`." 9733,RawOpsTest,tensorflow/tensorflow/python/ops/raw_ops_test.py,30,class, 9734,get_resource_handle_data,tensorflow/tensorflow/python/ops/resource_variable_ops.py,66,function, 9735,get_eager_safe_handle_data,tensorflow/tensorflow/python/ops/resource_variable_ops.py,76,function,Get the data handle from the Tensor `handle`. 9736,_set_handle_shapes_and_types,tensorflow/tensorflow/python/ops/resource_variable_ops.py,86,function,"Sets the shape inference result HandleData on tensor. Args: tensor: A `Tensor` or `EagerTensor`. handle_data: A `CppShapeInferenceResult.HandleData`. graph_mode: A python bool." 9737,_combine_handle_data,tensorflow/tensorflow/python/ops/resource_variable_ops.py,112,function,"Concats HandleData from tensors `handle` and `initial_value`. Args: handle: A `Tensor` of dtype `resource`. initial_value: A `Tensor`. Returns: A `CppShapeInferenceResult.HandleData`. If `initial_value` has dtype `variant`, the `HandleData` contains the concatenation of the shape_and_type from both `handle` and `initial_value`. Raises: RuntimeError: If handle, which was returned by VarHandleOp, either has no handle data, or its len(handle_data.shape_and_type) != 1." 9738,_variable_handle_from_shape_and_dtype,tensorflow/tensorflow/python/ops/resource_variable_ops.py,148,function,"Create a variable handle, copying in handle data from `initial_value`." 9739,eager_safe_variable_handle,tensorflow/tensorflow/python/ops/resource_variable_ops.py,200,function,"Creates a variable handle with information to do shape inference. The dtype is read from `initial_value` and stored in the returned resource tensor's handle data. If `initial_value.dtype == tf.variant`, we additionally extract the handle data (if any) from `initial_value` and append it to the `handle_data`. In this case, the returned tensor's handle data is in the form ``` is_set: true shape_and_type { shape { // initial_value.shape } dtype: DT_VARIANT } shape_and_type { // handle_data(initial_value).shape_and_type[0] } shape_and_type { // handle_data(initial_value).shape_and_type[1] } ... ``` Ops that read from this tensor, such as `ReadVariableOp` and `AssignVariableOp`, know that `handle_data(handle).shape_and_type[1:]` correspond to the handle data of the variant(s) stored in the Variable. Args: initial_value: A `Tensor`. shape: The shape of the handle data. Can be `TensorShape(None)` (i.e. unknown shape). shared_name: A string. name: A string. graph_mode: A python bool. Returns: The handle, a `Tensor` of type `resource`." 9740,_handle_graph,tensorflow/tensorflow/python/ops/resource_variable_ops.py,249,function, 9741,EagerResourceDeleter,tensorflow/tensorflow/python/ops/resource_variable_ops.py,260,class,"An object which cleans up a resource handle. An alternative to defining a __del__ method on an object. The intended use is that ResourceVariables or other objects with resource handles will maintain a single reference to this object. When the parent object is collected, this object will be too. Even if the parent object is part of a reference cycle, the cycle will be collectable." 9742,shape_safe_assign_variable_handle,tensorflow/tensorflow/python/ops/resource_variable_ops.py,311,function,Helper that checks shape compatibility and assigns variable. 9743,_maybe_set_handle_data,tensorflow/tensorflow/python/ops/resource_variable_ops.py,321,function, 9744,variable_accessed,tensorflow/tensorflow/python/ops/resource_variable_ops.py,333,function,Records that `variable` was accessed for the tape and FuncGraph. 9745,BaseResourceVariable,tensorflow/tensorflow/python/ops/resource_variable_ops.py,341,class,A python variable from an existing handle. 9746,ResourceVariable,tensorflow/tensorflow/python/ops/resource_variable_ops.py,1392,class,"Variable based on resource handles. See the [Variables How To](https://tensorflow.org/guide/variables) for a high level overview. A `ResourceVariable` allows you to maintain state across subsequent calls to session.run. The `ResourceVariable` constructor requires an initial value for the variable, which can be a `Tensor` of any type and shape. The initial value defines the type and shape of the variable. After construction, the type and shape of the variable are fixed. The value can be changed using one of the assign methods. Just like any `Tensor`, variables created with `tf.Variable(use_resource=True)` can be used as inputs for other Ops in the graph. Additionally, all the operators overloaded for the `Tensor` class are carried over to variables, so you can also add nodes to the graph by just doing arithmetic on variables. Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each usage of a ResourceVariable in a TensorFlow graph adds a read_value operation to the graph. The Tensors returned by a read_value operation are guaranteed to see all modifications to the value of the variable which happen in any operation on which the read_value depends on (either directly, indirectly, or via a control dependency) and guaranteed to not see any modification to the value of the variable from operations that depend on the read_value operation. Updates from operations that have no dependency relationship to the read_value operation might or might not be visible to read_value. For example, if there is more than one assignment to a ResourceVariable in a single session.run call there is a well-defined value for each operation which uses the variable's value if the assignments and the read are connected by edges in the graph. Consider the following example, in which two writes can cause tf.Variable and tf.ResourceVariable to behave differently: ```python a = tf.Variable(1.0, use_resource=True) a.initializer.run() assign = a.assign(2.0) with tf.control_dependencies([assign]): b = a.read_value() with tf.control_dependencies([b]): other_assign = a.assign(3.0) with tf.control_dependencies([other_assign]): # Will print 2.0 because the value was read before other_assign ran. If # `a` was a tf.Variable instead, 2.0 or 3.0 could be printed. tf.compat.v1.Print(b, [b]).eval() ```" 9747,UninitializedVariable,tensorflow/tensorflow/python/ops/resource_variable_ops.py,1844,class,A variable with no initializer. 9748,_dense_var_to_tensor,tensorflow/tensorflow/python/ops/resource_variable_ops.py,1934,function, 9749,_UnreadVariable,tensorflow/tensorflow/python/ops/resource_variable_ops.py,1944,class,"Represents a future for a read of a variable. Pretends to be the tensor if anyone looks." 9750,_ReadGrad,tensorflow/tensorflow/python/ops/resource_variable_ops.py,2075,function,Gradient for read op. 9751,variable_shape,tensorflow/tensorflow/python/ops/resource_variable_ops.py,2080,function, 9752,_GatherGrad,tensorflow/tensorflow/python/ops/resource_variable_ops.py,2091,function,Gradient for gather op. 9753,_to_proto_fn,tensorflow/tensorflow/python/ops/resource_variable_ops.py,2104,function,Converts Variable and ResourceVariable to VariableDef for collections. 9754,_from_proto_fn,tensorflow/tensorflow/python/ops/resource_variable_ops.py,2109,function,Creates Variable or ResourceVariable from VariableDef as needed. 9755,is_resource_variable,tensorflow/tensorflow/python/ops/resource_variable_ops.py,2153,function,"""Returns True if `var` is to be considered a ResourceVariable." 9756,copy_to_graph_uninitialized,tensorflow/tensorflow/python/ops/resource_variable_ops.py,2159,function,"Copies an existing variable to a new graph, with no initializer." 9757,VariableSpec,tensorflow/tensorflow/python/ops/resource_variable_ops.py,2182,class,Describes a tf.Variable. 9758,register_resource,tensorflow/tensorflow/python/ops/resources.py,38,function,"Registers a resource into the appropriate collections. This makes the resource findable in either the shared or local resources collection. Args: handle: op which returns a handle for the resource. create_op: op which initializes the resource. is_initialized_op: op which returns a scalar boolean tensor of whether the resource has been initialized. is_shared: if True, the resource gets added to the shared resource collection; otherwise it gets added to the local resource collection." 9759,shared_resources,tensorflow/tensorflow/python/ops/resources.py,60,function,Returns resources visible to all tasks in the cluster. 9760,local_resources,tensorflow/tensorflow/python/ops/resources.py,65,function,Returns resources intended to be local to this session. 9761,report_uninitialized_resources,tensorflow/tensorflow/python/ops/resources.py,70,function,"Returns the names of all uninitialized resources in resource_list. If the returned tensor is empty then all resources have been initialized. Args: resource_list: resources to check. If None, will use shared_resources() + local_resources(). name: name for the resource-checking op. Returns: Tensor containing names of the handles of all resources which have not yet been initialized." 9762,initialize_resources,tensorflow/tensorflow/python/ops/resources.py,108,function,"Initializes the resources in the given list. Args: resource_list: list of resources to initialize. name: name of the initialization op. Returns: op responsible for initializing all resources." 9763,_transpose_batch_time,tensorflow/tensorflow/python/ops/rnn.py,44,function,"Transposes the batch and time dimensions of a Tensor. If the input tensor has rank < 2 it returns the original tensor. Retains as much of the static shape information as possible. Args: x: A Tensor. Returns: x transposed along the first two dimensions." 9764,_best_effort_input_batch_size,tensorflow/tensorflow/python/ops/rnn.py,70,function,"Get static input batch size if available, with fallback to the dynamic one. Args: flat_input: An iterable of time major input Tensors of shape `[max_time, batch_size, ...]`. All inputs should have compatible batch sizes. Returns: The batch size in Python integer if available, or a scalar Tensor otherwise. Raises: ValueError: if there is any input with an invalid shape." 9765,_infer_state_dtype,tensorflow/tensorflow/python/ops/rnn.py,97,function,"Infer the dtype of an RNN state. Args: explicit_dtype: explicitly declared dtype or None. state: RNN's hidden state. Must be a Tensor or a nested iterable containing Tensors. Returns: dtype: inferred dtype of hidden state. Raises: ValueError: if `state` has heterogeneous dtypes or is empty." 9766,_maybe_tensor_shape_from_tensor,tensorflow/tensorflow/python/ops/rnn.py,127,function, 9767,_should_cache,tensorflow/tensorflow/python/ops/rnn.py,134,function,"Returns True if a default caching device should be set, otherwise False." 9768,_rnn_step,tensorflow/tensorflow/python/ops/rnn.py,151,function,"Calculate one step of a dynamic RNN minibatch. Returns an (output, state) pair conditioned on `sequence_length`. When skip_conditionals=False, the pseudocode is something like: if t >= max_sequence_length: return (zero_output, state) if t < min_sequence_length: return call_cell() # Selectively output zeros or output, old state or new state depending # on whether we've finished calculating each row. new_output, new_state = call_cell() final_output = np.vstack([ zero_output if time >= sequence_length[r] else new_output_r for r, new_output_r in enumerate(new_output) ]) final_state = np.vstack([ state[r] if time >= sequence_length[r] else new_state_r for r, new_state_r in enumerate(new_state) ]) return (final_output, final_state) Args: time: int32 `Tensor` scalar. sequence_length: int32 `Tensor` vector of size [batch_size]. min_sequence_length: int32 `Tensor` scalar, min of sequence_length. max_sequence_length: int32 `Tensor` scalar, max of sequence_length. zero_output: `Tensor` vector of shape [output_size]. state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`, or a list/tuple of such tensors. call_cell: lambda returning tuple of (new_output, new_state) where new_output is a `Tensor` matrix of shape `[batch_size, output_size]`. new_state is a `Tensor` matrix of shape `[batch_size, state_size]`. state_size: The `cell.state_size` associated with the state. skip_conditionals: Python bool, whether to skip using the conditional calculations. This is useful for `dynamic_rnn`, where the input tensor matches `max_sequence_length`, and using conditionals just slows everything down. Returns: A tuple of (`final_output`, `final_state`) as given by the pseudocode above: final_output is a `Tensor` matrix of shape [batch_size, output_size] final_state is either a single `Tensor` matrix, or a tuple of such matrices (matching length and shapes of input `state`). Raises: ValueError: If the cell returns a state tuple whose length does not match that returned by `state_size`." 9769,_reverse_seq,tensorflow/tensorflow/python/ops/rnn.py,300,function,"Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features) or nested tuples of tensors. lengths: A `Tensor` of dimension batch_size, containing lengths for each sequence in the batch. If ""None"" is specified, simply reverses the list. Returns: time-reversed sequence" 9770,bidirectional_dynamic_rnn,tensorflow/tensorflow/python/ops/rnn.py,347,function,"Creates a dynamic version of bidirectional recurrent neural network. Takes input and builds independent forward and backward RNNs. The input_size of forward and backward cell must match. The initial state for both directions is zero by default (but can be set optionally) and no intermediate states are ever returned -- the network is fully unrolled for the given (passed in) length(s) of the sequence(s) or completely unrolled if length(s) is not given. Args: cell_fw: An instance of RNNCell, to be used for forward direction. cell_bw: An instance of RNNCell, to be used for backward direction. inputs: The RNN inputs. If time_major == False (default), this must be a tensor of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If time_major == True, this must be a tensor of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences in the batch. If not provided, all batch entries are assumed to be full sequences; and time reversal is applied from time `0` to `max_time` for each sequence. initial_state_fw: (optional) An initial state for the forward RNN. This must be a tensor of appropriate type and shape `[batch_size, cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell_fw.state_size`. initial_state_bw: (optional) Same as for `initial_state_fw`, but using the corresponding properties of `cell_bw`. dtype: (optional) The data type for the initial states and expected output. Required if initial_states are not provided or RNN states have a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to ""bidirectional_rnn"" Returns: A tuple (outputs, output_states) where: outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`. If time_major == False (default), output_fw will be a `Tensor` shaped: `[batch_size, max_time, cell_fw.output_size]` and output_bw will be a `Tensor` shaped: `[batch_size, max_time, cell_bw.output_size]`. If time_major == True, output_fw will be a `Tensor` shaped: `[max_time, batch_size, cell_fw.output_size]` and output_bw will be a `Tensor` shaped: `[max_time, batch_size, cell_bw.output_size]`. It returns a tuple instead of a single concatenated `Tensor`, unlike in the `bidirectional_rnn`. If the concatenated one is preferred, the forward and backward outputs can be concatenated as `tf.concat(outputs, 2)`. output_states: A tuple (output_state_fw, output_state_bw) containing the forward and the backward final states of bidirectional rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`." 9771,dynamic_rnn,tensorflow/tensorflow/python/ops/rnn.py,505,function,"Creates a recurrent neural network specified by RNNCell `cell`. Performs fully dynamic unrolling of `inputs`. Example: ```python # create a BasicRNNCell rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size) # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size] # defining initial state initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32) # 'state' is a tensor of shape [batch_size, cell_state_size] outputs, state = tf.compat.v1.nn.dynamic_rnn(rnn_cell, input_data, initial_state=initial_state, dtype=tf.float32) ``` ```python # create 2 LSTMCells rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size) for size in [128, 256]] # create a RNN cell composed sequentially of a number of RNNCells multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers) # 'outputs' is a tensor of shape [batch_size, max_time, 256] # 'state' is a N-tuple where N is the number of LSTMCells containing a # tf.nn.rnn_cell.LSTMStateTuple for each cell outputs, state = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell, inputs=data, dtype=tf.float32) ``` Args: cell: An instance of RNNCell. inputs: The RNN inputs. If `time_major == False` (default), this must be a `Tensor` of shape: `[batch_size, max_time, ...]`, or a nested tuple of such elements. If `time_major == True`, this must be a `Tensor` of shape: `[max_time, batch_size, ...]`, or a nested tuple of such elements. This may also be a (possibly nested) tuple of Tensors satisfying this property. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. In this case, input to `cell` at each time-step will replicate the structure of these tuples, except for the time dimension (from which the time is taken). The input to `cell` at each time step will be a `Tensor` or (possibly nested) tuple of Tensors each with dimensions `[batch_size, ...]`. sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used to copy-through state and zero-out outputs when past a batch element's sequence length. This parameter enables users to extract the last valid state and properly padded outputs, so it is provided for correctness. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. time_major: The shape format of the `inputs` and `outputs` Tensors. If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. scope: VariableScope for the created subgraph; defaults to ""rnn"". Returns: A pair (outputs, state) where: outputs: The RNN output `Tensor`. If time_major == False (default), this will be a `Tensor` shaped: `[batch_size, max_time, cell.output_size]`. If time_major == True, this will be a `Tensor` shaped: `[max_time, batch_size, cell.output_size]`. Note, if `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `outputs` will be a tuple having the same structure as `cell.output_size`, containing Tensors having shapes corresponding to the shape data in `cell.output_size`. state: The final state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. If cells are `LSTMCells` `state` will be a tuple containing a `LSTMStateTuple` for each cell. Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If inputs is None or an empty list." 9772,_dynamic_rnn_loop,tensorflow/tensorflow/python/ops/rnn.py,703,function,"Internal implementation of Dynamic RNN. Args: cell: An instance of RNNCell. inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested tuple of such elements. initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if `cell.state_size` is a tuple, then this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. parallel_iterations: Positive Python int. swap_memory: A Python boolean sequence_length: (optional) An `int32` `Tensor` of shape [batch_size]. dtype: (optional) Expected dtype of output. If not specified, inferred from initial_state. Returns: Tuple `(final_outputs, final_state)`. final_outputs: A `Tensor` of shape `[time, batch_size, cell.output_size]`. If `cell.output_size` is a (possibly nested) tuple of ints or `TensorShape` objects, then this returns a (possibly nested) tuple of Tensors matching the corresponding shapes. final_state: A `Tensor`, or possibly nested tuple of Tensors, matching in length and shapes to `initial_state`. Raises: ValueError: If the input depth cannot be inferred via shape inference from the inputs. ValueError: If time_step is not the same for all the elements in the inputs. ValueError: If batch_size is not the same for all the elements in the inputs." 9773,raw_rnn,tensorflow/tensorflow/python/ops/rnn.py,919,function,"Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`. **NOTE: This method is still in testing, and the API may change.** This function is a more primitive version of `dynamic_rnn` that provides more direct access to the inputs each iteration. It also provides more control over when to start and finish reading the sequence, and what to emit for the output. For example, it can be used to implement the dynamic decoder of a seq2seq model. Instead of working with `Tensor` objects, most operations work with `TensorArray` objects directly. The operation of `raw_rnn`, in pseudo-code, is basically the following: ```python time = tf.constant(0, dtype=tf.int32) (finished, next_input, initial_state, emit_structure, loop_state) = loop_fn( time=time, cell_output=None, cell_state=None, loop_state=None) emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype) state = initial_state while not all(finished): (output, cell_state) = cell(next_input, state) (next_finished, next_input, next_state, emit, loop_state) = loop_fn( time=time + 1, cell_output=output, cell_state=cell_state, loop_state=loop_state) # Emit zeros and copy forward state for minibatch entries that are finished. state = tf.where(finished, state, next_state) emit = tf.where(finished, tf.zeros_like(emit_structure), emit) emit_ta = emit_ta.write(time, emit) # If any new minibatch entries are marked as finished, mark these. finished = tf.logical_or(finished, next_finished) time += 1 return (emit_ta, state, loop_state) ``` with the additional properties that output and state may be (possibly nested) tuples, as determined by `cell.output_size` and `cell.state_size`, and as a result the final `state` and `emit_ta` may themselves be tuples. A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this: ```python inputs = tf.compat.v1.placeholder(shape=(max_time, batch_size, input_depth), dtype=tf.float32) sequence_length = tf.compat.v1.placeholder(shape=(batch_size,), dtype=tf.int32) inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time) inputs_ta = inputs_ta.unstack(inputs) cell = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units) def loop_fn(time, cell_output, cell_state, loop_state): emit_output = cell_output # == None for time == 0 if cell_output is None: # time == 0 next_cell_state = cell.zero_state(batch_size, tf.float32) else: next_cell_state = cell_state elements_finished = (time >= sequence_length) finished = tf.reduce_all(elements_finished) next_input = tf.cond( finished, lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32), lambda: inputs_ta.read(time)) next_loop_state = None return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state) outputs_ta, final_state, _ = raw_rnn(cell, loop_fn) outputs = outputs_ta.stack() ``` Args: cell: An instance of RNNCell. loop_fn: A callable that takes inputs `(time, cell_output, cell_state, loop_state)` and returns the tuple `(finished, next_input, next_cell_state, emit_output, next_loop_state)`. Here `time` is an int32 scalar `Tensor`, `cell_output` is a `Tensor` or (possibly nested) tuple of tensors as determined by `cell.output_size`, and `cell_state` is a `Tensor` or (possibly nested) tuple of tensors, as determined by the `loop_fn` on its first call (and should match `cell.state_size`). The outputs are: `finished`, a boolean `Tensor` of shape `[batch_size]`, `next_input`: the next input to feed to `cell`, `next_cell_state`: the next state to feed to `cell`, and `emit_output`: the output to store for this iteration. Note that `emit_output` should be a `Tensor` or (possibly nested) tuple of tensors which is aggregated in the `emit_ta` inside the `while_loop`. For the first call to `loop_fn`, the `emit_output` corresponds to the `emit_structure` which is then used to determine the size of the `zero_tensor` for the `emit_ta` (defaults to `cell.output_size`). For the subsequent calls to the `loop_fn`, the `emit_output` corresponds to the actual output tensor that is to be aggregated in the `emit_ta`. The parameter `cell_state` and output `next_cell_state` may be either a single or (possibly nested) tuple of tensors. The parameter `loop_state` and output `next_loop_state` may be either a single or (possibly nested) tuple of `Tensor` and `TensorArray` objects. This last parameter may be ignored by `loop_fn` and the return value may be `None`. If it is not `None`, then the `loop_state` will be propagated through the RNN loop, for use purely by `loop_fn` to keep track of its own state. The `next_loop_state` parameter returned may be `None`. The first call to `loop_fn` will be `time = 0`, `cell_output = None`, `cell_state = None`, and `loop_state = None`. For this call: The `next_cell_state` value should be the value with which to initialize the cell's state. It may be a final state from a previous RNN or it may be the output of `cell.zero_state()`. It should be a (possibly nested) tuple structure of tensors. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of appropriate type and shape `[batch_size] + cell.state_size`. If `cell.state_size` is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. The `emit_output` value may be either `None` or a (possibly nested) tuple structure of tensors, e.g., `(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`. If this first `emit_output` return value is `None`, then the `emit_ta` result of `raw_rnn` will have the same structure and dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same structure, shapes (prepended with a `batch_size` dimension), and dtypes as `emit_output`. The actual values returned for `emit_output` at this initializing call are ignored. Note, this emit structure must be consistent across all time steps. parallel_iterations: (Default: 32). The number of iterations to run in parallel. Those operations which do not have any temporal dependency and can be run in parallel, will be. This parameter trades off time for space. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. scope: VariableScope for the created subgraph; defaults to ""rnn"". Returns: A tuple `(emit_ta, final_state, final_loop_state)` where: `emit_ta`: The RNN output `TensorArray`. If `loop_fn` returns a (possibly nested) set of Tensors for `emit_output` during initialization, (inputs `time = 0`, `cell_output = None`, and `loop_state = None`), then `emit_ta` will have the same structure, dtypes, and shapes as `emit_output` instead. If `loop_fn` returns `emit_output = None` during this call, the structure of `cell.output_size` is used: If `cell.output_size` is a (possibly nested) tuple of integers or `TensorShape` objects, then `emit_ta` will be a tuple having the same structure as `cell.output_size`, containing TensorArrays whose elements' shapes correspond to the shape data in `cell.output_size`. `final_state`: The final cell state. If `cell.state_size` is an int, this will be shaped `[batch_size, cell.state_size]`. If it is a `TensorShape`, this will be shaped `[batch_size] + cell.state_size`. If it is a (possibly nested) tuple of ints or `TensorShape`, this will be a tuple having the corresponding shapes. `final_loop_state`: The final loop state as returned by `loop_fn`. Raises: TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not a `callable`." 9774,static_rnn,tensorflow/tensorflow/python/ops/rnn.py,1246,function,"Creates a recurrent neural network specified by RNNCell `cell`. The simplest form of RNN network generated is: ```python state = cell.zero_state(...) outputs = [] for input_ in inputs: output, state = cell(input_, state) outputs.append(output) return (outputs, state) ``` However, a few other options are available: An initial state can be provided. If the sequence_length vector is provided, dynamic calculation is performed. This method of calculation does not compute the RNN steps past the maximum sequence length of the minibatch (thus saving computational time), and properly propagates the state at an example's sequence length to the final state output. The dynamic calculation performed is, at time `t` for batch row `b`, ```python (output, state)(b, t) = (t >= sequence_length(b)) ? (zeros(cell.output_size), states(b, sequence_length(b) - 1)) : cell(input(b, t), state(b, t - 1)) ``` Args: cell: An instance of RNNCell. inputs: A length T list of inputs, each a `Tensor` of shape `[batch_size, input_size]`, or a nested tuple of such elements. initial_state: (optional) An initial state for the RNN. If `cell.state_size` is an integer, this must be a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell.state_size`. dtype: (optional) The data type for the initial state and expected output. Required if initial_state is not provided or RNN state has a heterogeneous dtype. sequence_length: Specifies the length of each sequence in inputs. An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`. scope: VariableScope for the created subgraph; defaults to ""rnn"". Returns: A pair (outputs, state) where: - outputs is a length T list of outputs (one for each input), or a nested tuple of such elements. - state is the final state Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If `inputs` is `None` or an empty list, or if the input depth (column size) cannot be inferred from inputs via shape inference." 9775,static_state_saving_rnn,tensorflow/tensorflow/python/ops/rnn.py,1425,function,"RNN that accepts a state saver for time-truncated RNN calculation. Args: cell: An instance of `RNNCell`. inputs: A length T list of inputs, each a `Tensor` of shape `[batch_size, input_size]`. state_saver: A state saver object with methods `state` and `save_state`. state_name: Python string or tuple of strings. The name to use with the state_saver. If the cell returns tuples of states (i.e., `cell.state_size` is a tuple) then `state_name` should be a tuple of strings having the same length as `cell.state_size`. Otherwise it should be a single string. sequence_length: (optional) An int32/int64 vector size [batch_size]. See the documentation for rnn() for more details about sequence_length. scope: VariableScope for the created subgraph; defaults to ""rnn"". Returns: A pair (outputs, state) where: outputs is a length T list of outputs (one for each input) states is the final state Raises: TypeError: If `cell` is not an instance of RNNCell. ValueError: If `inputs` is `None` or an empty list, or if the arity and type of `state_name` does not match that of `cell.state_size`." 9776,static_bidirectional_rnn,tensorflow/tensorflow/python/ops/rnn.py,1520,function,"Creates a bidirectional recurrent neural network. Similar to the unidirectional case above (rnn) but takes input and builds independent forward and backward RNNs with the final forward and backward outputs depth-concatenated, such that the output will have the format [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of forward and backward cell must match. The initial state for both directions is zero by default (but can be set optionally) and no intermediate states are ever returned -- the network is fully unrolled for the given (passed in) length(s) of the sequence(s) or completely unrolled if length(s) is not given. Args: cell_fw: An instance of RNNCell, to be used for forward direction. cell_bw: An instance of RNNCell, to be used for backward direction. inputs: A length T list of inputs, each a tensor of shape [batch_size, input_size], or a nested tuple of such elements. initial_state_fw: (optional) An initial state for the forward RNN. This must be a tensor of appropriate type and shape `[batch_size, cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a tuple of tensors having shapes `[batch_size, s] for s in cell_fw.state_size`. initial_state_bw: (optional) Same as for `initial_state_fw`, but using the corresponding properties of `cell_bw`. dtype: (optional) The data type for the initial state. Required if either of the initial states are not provided. sequence_length: (optional) An int32/int64 vector, size `[batch_size]`, containing the actual lengths for each of the sequences. scope: VariableScope for the created subgraph; defaults to ""bidirectional_rnn"" Returns: A tuple (outputs, output_state_fw, output_state_bw) where: outputs is a length `T` list of outputs (one for each input), which are depth-concatenated forward and backward outputs. output_state_fw is the final state of the forward rnn. output_state_bw is the final state of the backward rnn. Raises: TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`. ValueError: If inputs is None or an empty list." 9777,_block_lstm_grad,tensorflow/tensorflow/python/ops/rnn_grad.py,24,function,Gradient for the BlockLSTM op. 9778,RNNGradTest,tensorflow/tensorflow/python/ops/rnn_grad_test.py,36,class, 9779,deterministic_random_uniform,tensorflow/tensorflow/python/ops/rnn_grad_test.py,139,function, 9780,icfo_to_ifco,tensorflow/tensorflow/python/ops/rnn_grad_test.py,143,function,Convert gates' weights and biases from ICFO to IFCO layout. 9781,_maybe_copy_to_context_device,tensorflow/tensorflow/python/ops/script_ops.py,58,function,Copy an EagerTensor to the current device if it's not on `device_name`. 9782,EagerFunc,tensorflow/tensorflow/python/ops/script_ops.py,71,class,A wrapper for a function owned by an EagerPyFunc. 9783,FuncRegistry,tensorflow/tensorflow/python/ops/script_ops.py,154,class,"A helper class to keep track of registered py functions. FuncRegistry keeps a map from unique tokens (string) to python functions, which takes numpy arrays and outputs numpy arrays." 9784,_internal_py_func,tensorflow/tensorflow/python/ops/script_ops.py,274,function,See documentation for py_func and eager_py_func. 9785,_EagerPyFuncGrad,tensorflow/tensorflow/python/ops/script_ops.py,355,function,Computes the gradient of an EagerPyFunc. 9786,eager_py_func,tensorflow/tensorflow/python/ops/script_ops.py,375,function,"Wraps a python function into a TensorFlow op that executes it eagerly. This function allows expressing computations in a TensorFlow graph as Python functions. In particular, it wraps a Python function `func` in a once-differentiable TensorFlow operation that executes it with eager execution enabled. As a consequence, `tf.py_function` makes it possible to express control flow using Python constructs (`if`, `while`, `for`, etc.), instead of TensorFlow control flow constructs (`tf.cond`, `tf.while_loop`). For example, you might use `tf.py_function` to implement the log huber function: ```python def log_huber(x, m): if tf.abs(x) <= m: return x**2 else: return m**2 * (1 - 2 * tf.math.log(m) + tf.math.log(x**2)) x = tf.compat.v1.placeholder(tf.float32) m = tf.compat.v1.placeholder(tf.float32) y = tf.py_function(func=log_huber, inp=[x, m], Tout=tf.float32) dy_dx = tf.gradients(y, x)[0] with tf.compat.v1.Session() as sess: # The session executes `log_huber` eagerly. Given the feed values below, # it will take the first branch, so `y` evaluates to 1.0 and # `dy_dx` evaluates to 2.0. y, dy_dx = sess.run([y, dy_dx], feed_dict={x: 1.0, m: 2.0}) ``` You can also use `tf.py_function` to debug your models at runtime using Python tools, i.e., you can isolate portions of your code that you want to debug, wrap them in Python functions and insert `pdb` tracepoints or print statements as desired, and wrap those functions in `tf.py_function`. For more information on eager execution, see the [Eager guide](https://tensorflow.org/guide/eager). `tf.py_function` is similar in spirit to `tf.compat.v1.py_func`, but unlike the latter, the former lets you use TensorFlow operations in the wrapped Python function. In particular, while `tf.compat.v1.py_func` only runs on CPUs and wraps functions that take NumPy arrays as inputs and return NumPy arrays as outputs, `tf.py_function` can be placed on GPUs and wraps functions that take Tensors as inputs, execute TensorFlow operations in their bodies, and return Tensors as outputs. Like `tf.compat.v1.py_func`, `tf.py_function` has the following limitations with respect to serialization and distribution: * The body of the function (i.e. `func`) will not be serialized in a `GraphDef`. Therefore, you should not use this function if you need to serialize your model and restore it in a different environment. * The operation must run in the same address space as the Python program that calls `tf.py_function()`. If you are using distributed TensorFlow, you must run a `tf.distribute.Server` in the same process as the program that calls `tf.py_function()` and you must pin the created operation to a device in that server (e.g. using `with tf.device():`). Args: func: A Python function which accepts a list of `Tensor` objects having element types that match the corresponding `tf.Tensor` objects in `inp` and returns a list of `Tensor` objects (or a single `Tensor`, or `None`) having element types that match the corresponding values in `Tout`. inp: A list of `Tensor` objects. Tout: A list or tuple of tensorflow data types or a single tensorflow data type if there is only one, indicating what `func` returns; an empty list if no value is returned (i.e., if the return value is `None`). name: A name for the operation (optional). Returns: A list of `Tensor` or a single `Tensor` which `func` computes; an empty list if `func` returns None." 9787,py_func_common,tensorflow/tensorflow/python/ops/script_ops.py,462,function,"Wraps a python function and uses it as a TensorFlow op. Given a python function `func`, which takes numpy arrays as its arguments and returns numpy arrays as its outputs, wrap this function as an operation in a TensorFlow graph. The following snippet constructs a simple TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation in the graph: ```python def my_func(x): # x will be a numpy array with the contents of the placeholder below return np.sinh(x) input = tf.compat.v1.placeholder(tf.float32) y = tf.compat.v1.py_func(my_func, [input], tf.float32) ``` **N.B.** The `tf.compat.v1.py_func()` operation has the following known limitations: * The body of the function (i.e. `func`) will not be serialized in a `GraphDef`. Therefore, you should not use this function if you need to serialize your model and restore it in a different environment. * The operation must run in the same address space as the Python program that calls `tf.compat.v1.py_func()`. If you are using distributed TensorFlow, you must run a `tf.distribute.Server` in the same process as the program that calls `tf.compat.v1.py_func()` and you must pin the created operation to a device in that server (e.g. using `with tf.device():`). Args: func: A Python function, which accepts `ndarray` objects as arguments and returns a list of `ndarray` objects (or a single `ndarray`). This function must accept as many arguments as there are tensors in `inp`, and these argument types will match the corresponding `tf.Tensor` objects in `inp`. The returns `ndarray`s must match the number and types defined `Tout`. Important Note: Input and output numpy `ndarray`s of `func` are not guaranteed to be copies. In some cases their underlying memory will be shared with the corresponding TensorFlow tensors. In-place modification or storing `func` input or return values in python datastructures without explicit (np.)copy can have non-deterministic consequences. inp: A list of `Tensor` objects. Tout: A list or tuple of tensorflow data types or a single tensorflow data type if there is only one, indicating what `func` returns. stateful: (Boolean.) If True, the function should be considered stateful. If a function is stateless, when given the same input it will return the same output and have no observable side effects. Optimizations such as common subexpression elimination are only performed on stateless operations. name: A name for the operation (optional). Returns: A list of `Tensor` or a single `Tensor` which `func` computes." 9788,py_func,tensorflow/tensorflow/python/ops/script_ops.py,557,function, 9789,numpy_function,tensorflow/tensorflow/python/ops/script_ops.py,566,function,"Wraps a python function and uses it as a TensorFlow op. Given a python function `func` wrap this function as an operation in a TensorFlow function. `func` must take numpy arrays as its arguments and return numpy arrays as its outputs. The following example creates a TensorFlow graph with `np.sinh()` as an operation in the graph: >>> def my_numpy_func(x): ... # x will be a numpy array with the contents of the input to the ... # tf.function ... return np.sinh(x) >>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)]) ... def tf_function(input): ... y = tf.numpy_function(my_numpy_func, [input], tf.float32) ... return y * y >>> tf_function(tf.constant(1.)) Comparison to `tf.py_function`: `tf.py_function` and `tf.numpy_function` are very similar, except that `tf.numpy_function` takes numpy arrays, and not `tf.Tensor`s. If you want the function to contain `tf.Tensors`, and have any TensorFlow operations executed in the function be differentiable, please use `tf.py_function`. Note: The `tf.numpy_function` operation has the following known limitations: * The body of the function (i.e. `func`) will not be serialized in a `tf.SavedModel`. Therefore, you should not use this function if you need to serialize your model and restore it in a different environment. * The operation must run in the same address space as the Python program that calls `tf.numpy_function()`. If you are using distributed TensorFlow, you must run a `tf.distribute.Server` in the same process as the program that calls `tf.numpy_function` you must pin the created operation to a device in that server (e.g. using `with tf.device():`). * Since the function takes numpy arrays, you cannot take gradients through a numpy_function. If you require something that is differentiable, please consider using tf.py_function. * The resulting function is assumed stateful and will never be optimized. Args: func: A Python function, which accepts `numpy.ndarray` objects as arguments and returns a list of `numpy.ndarray` objects (or a single `numpy.ndarray`). This function must accept as many arguments as there are tensors in `inp`, and these argument types will match the corresponding `tf.Tensor` objects in `inp`. The returns `numpy.ndarray`s must match the number and types defined `Tout`. Important Note: Input and output `numpy.ndarray`s of `func` are not guaranteed to be copies. In some cases their underlying memory will be shared with the corresponding TensorFlow tensors. In-place modification or storing `func` input or return values in python datastructures without explicit (np.)copy can have non-deterministic consequences. inp: A list of `tf.Tensor` objects. Tout: A list or tuple of tensorflow data types or a single tensorflow data type if there is only one, indicating what `func` returns. name: (Optional) A name for the operation. Returns: Single or list of `tf.Tensor` which `func` computes." 9790,NumpyFunctionTest,tensorflow/tensorflow/python/ops/script_ops_test.py,28,class, 9791,encode_resource_handle,tensorflow/tensorflow/python/ops/session_ops.py,36,function,Encode a ResourceHandle proto as custom numpy struct type. 9792,TensorHandle,tensorflow/tensorflow/python/ops/session_ops.py,42,class,Represents a handle for a live tensor in a session. 9793,get_session_handle,tensorflow/tensorflow/python/ops/session_ops.py,139,function,"Return the handle of `data`. This is EXPERIMENTAL and subject to change. Keep `data` ""in-place"" in the runtime and create a handle that can be used to retrieve `data` in a subsequent run(). Combined with `get_session_tensor`, we can keep a tensor produced in one run call in place, and use it as the input in a future run call. Args: data: A tensor to be stored in the session. name: Optional name prefix for the return tensor. Returns: A scalar string tensor representing a unique handle for `data`. Raises: TypeError: if `data` is not a Tensor. Example: ```python c = tf.multiply(a, b) h = tf.compat.v1.get_session_handle(c) h = sess.run(h) p, a = tf.compat.v1.get_session_tensor(h.handle, tf.float32) b = tf.multiply(a, 10) c = sess.run(b, feed_dict={p: h.handle}) ```" 9794,get_session_tensor,tensorflow/tensorflow/python/ops/session_ops.py,182,function,"Get the tensor of type `dtype` by feeding a tensor handle. This is EXPERIMENTAL and subject to change. Get the value of the tensor from a tensor handle. The tensor is produced in a previous run() and stored in the state of the session. Args: handle: The string representation of a persistent tensor handle. dtype: The type of the output tensor. name: Optional name prefix for the return tensor. Returns: A pair of tensors. The first is a placeholder for feeding a tensor handle and the second is the tensor in the session state keyed by the tensor handle. Example: ```python c = tf.multiply(a, b) h = tf.compat.v1.get_session_handle(c) h = sess.run(h) p, a = tf.compat.v1.get_session_tensor(h.handle, tf.float32) b = tf.multiply(a, 10) c = sess.run(b, feed_dict={p: h.handle}) ```" 9795,delete_session_tensor,tensorflow/tensorflow/python/ops/session_ops.py,223,function,"Delete the tensor for the given tensor handle. This is EXPERIMENTAL and subject to change. Delete the tensor of a given tensor handle. The tensor is produced in a previous run() and stored in the state of the session. Args: handle: The string representation of a persistent tensor handle. name: Optional name prefix for the return tensor. Returns: A pair of graph elements. The first is a placeholder for feeding a tensor handle and the second is a deletion operation." 9796,_register_handle_feeder,tensorflow/tensorflow/python/ops/session_ops.py,246,function, 9797,_get_handle_feeder,tensorflow/tensorflow/python/ops/session_ops.py,250,function, 9798,_get_handle_reader,tensorflow/tensorflow/python/ops/session_ops.py,254,function,Return a read subgraph for this handle. 9799,_get_handle_mover,tensorflow/tensorflow/python/ops/session_ops.py,270,function,Return a move subgraph for this pair of feeder and handle. 9800,_get_handle_deleter,tensorflow/tensorflow/python/ops/session_ops.py,291,function,Return a deletion subgraph for this handle. 9801,set_size,tensorflow/tensorflow/python/ops/sets_impl.py,37,function,"Compute number of unique elements along last dimension of `a`. Args: a: `SparseTensor`, with indices sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a`. Returns: `int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the number of unique elements in the corresponding `[0...n-1]` dimension of `a`. Raises: TypeError: If `a` is an invalid types." 9802,_convert_to_tensors_or_sparse_tensors,tensorflow/tensorflow/python/ops/sets_impl.py,70,function,"Convert to tensor types, and flip order if necessary. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. b: `Tensor` or `SparseTensor` of the same type as `a`. Returns: Tuple of `(a, b, flipped)`, where `a` and `b` have been converted to `Tensor` or `SparseTensor`, and `flipped` indicates whether the order has been flipped to make it dense,sparse instead of sparse,dense (since the set ops do not support the latter)." 9803,_set_operation,tensorflow/tensorflow/python/ops/sets_impl.py,95,function,"Compute set operation of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. Must be `SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be sorted in row-major order. set_operation: String indicating set operation. See SetOperationOp::SetOperationFromContext for valid values. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` with the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the results of the set operation. Raises: TypeError: If inputs are invalid types. ValueError: If `a` is sparse and `b` is dense." 9804,set_intersection,tensorflow/tensorflow/python/ops/sets_impl.py,141,function,"Compute set intersection of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Example: ```python import tensorflow as tf import collections # Represent the following array of sets as a sparse tensor: # a = np.array([[{1, 2}, {3}], [{4}, {5, 6}]]) a = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 2), ((0, 1, 0), 3), ((1, 0, 0), 4), ((1, 1, 0), 5), ((1, 1, 1), 6), ]) a = tf.sparse.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2,2,2]) # b = np.array([[{1}, {}], [{4}, {5, 6, 7, 8}]]) b = collections.OrderedDict([ ((0, 0, 0), 1), ((1, 0, 0), 4), ((1, 1, 0), 5), ((1, 1, 1), 6), ((1, 1, 2), 7), ((1, 1, 3), 8), ]) b = tf.sparse.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4]) # `tf.sets.intersection` is applied to each aligned pair of sets. tf.sets.intersection(a, b) # The result will be equivalent to either of: # # np.array([[{1}, {}], [{4}, {5, 6}]]) # # collections.OrderedDict([ # ((0, 0, 0), 1), # ((1, 0, 0), 4), # ((1, 1, 0), 5), # ((1, 1, 1), 6), # ]) ``` Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices must be sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the intersections." 9805,set_difference,tensorflow/tensorflow/python/ops/sets_impl.py,212,function,"Compute set difference of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Example: ```python import tensorflow as tf import collections # Represent the following array of sets as a sparse tensor: # a = np.array([[{1, 2}, {3}], [{4}, {5, 6}]]) a = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 2), ((0, 1, 0), 3), ((1, 0, 0), 4), ((1, 1, 0), 5), ((1, 1, 1), 6), ]) a = tf.sparse.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2, 2, 2]) # np.array([[{1, 3}, {2}], [{4, 5}, {5, 6, 7, 8}]]) b = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 3), ((0, 1, 0), 2), ((1, 0, 0), 4), ((1, 0, 1), 5), ((1, 1, 0), 5), ((1, 1, 1), 6), ((1, 1, 2), 7), ((1, 1, 3), 8), ]) b = tf.sparse.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4]) # `set_difference` is applied to each aligned pair of sets. tf.sets.difference(a, b) # The result will be equivalent to either of: # # np.array([[{2}, {3}], [{}, {}]]) # # collections.OrderedDict([ # ((0, 0, 0), 2), # ((0, 1, 0), 3), # ]) ``` Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices must be sorted in row-major order. aminusb: Whether to subtract `b` from `a`, vs vice versa. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the differences. Raises: TypeError: If inputs are invalid types, or if `a` and `b` have different types. ValueError: If `a` is sparse and `b` is dense. errors_impl.InvalidArgumentError: If the shapes of `a` and `b` do not match in any dimension other than the last dimension." 9806,set_union,tensorflow/tensorflow/python/ops/sets_impl.py,294,function,"Compute set union of elements in last dimension of `a` and `b`. All but the last dimension of `a` and `b` must match. Example: ```python import tensorflow as tf import collections # [[{1, 2}, {3}], [{4}, {5, 6}]] a = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 2), ((0, 1, 0), 3), ((1, 0, 0), 4), ((1, 1, 0), 5), ((1, 1, 1), 6), ]) a = tf.sparse.SparseTensor(list(a.keys()), list(a.values()), dense_shape=[2, 2, 2]) # [[{1, 3}, {2}], [{4, 5}, {5, 6, 7, 8}]] b = collections.OrderedDict([ ((0, 0, 0), 1), ((0, 0, 1), 3), ((0, 1, 0), 2), ((1, 0, 0), 4), ((1, 0, 1), 5), ((1, 1, 0), 5), ((1, 1, 1), 6), ((1, 1, 2), 7), ((1, 1, 3), 8), ]) b = tf.sparse.SparseTensor(list(b.keys()), list(b.values()), dense_shape=[2, 2, 4]) # `set_union` is applied to each aligned pair of sets. tf.sets.union(a, b) # The result will be a equivalent to either of: # # np.array([[{1, 2, 3}, {2, 3}], [{4, 5}, {5, 6, 7, 8}]]) # # collections.OrderedDict([ # ((0, 0, 0), 1), # ((0, 0, 1), 2), # ((0, 0, 2), 3), # ((0, 1, 0), 2), # ((0, 1, 1), 3), # ((1, 0, 0), 4), # ((1, 0, 1), 5), # ((1, 1, 0), 5), # ((1, 1, 1), 6), # ((1, 1, 2), 7), # ((1, 1, 3), 8), # ]) ``` Args: a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices must be sorted in row-major order. b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices must be sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a` and `b`. Returns: A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but the last dimension the same. Elements along the last dimension contain the unions." 9807,SobolSampleOpTest,tensorflow/tensorflow/python/ops/sobol_ops_test.py,30,class, 9808,sort,tensorflow/tensorflow/python/ops/sort_ops.py,39,function,"Sorts a tensor. Usage: ```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.sort(a,axis=-1,direction='ASCENDING',name=None) c = tf.keras.backend.eval(b) # Here, c = [ 1. 2.8 10. 26.9 62.3 166.32] ``` Args: values: 1-D or higher numeric `Tensor`. axis: The axis along which to sort. The default is -1, which sorts the last axis. direction: The direction in which to sort the values (`'ASCENDING'` or `'DESCENDING'`). name: Optional name for the operation. Returns: A `Tensor` with the same dtype and shape as `values`, with the elements sorted along the given `axis`. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid." 9809,argsort,tensorflow/tensorflow/python/ops/sort_ops.py,73,function,"Returns the indices of a tensor that give its sorted order along an axis. For a 1D tensor, `tf.gather(values, tf.argsort(values))` is equivalent to `tf.sort(values)`. For higher dimensions, the output has the same shape as `values`, but along the given axis, values represent the index of the sorted element in that slice of the tensor at the given position. Usage: ```python import tensorflow as tf a = [1, 10, 26.9, 2.8, 166.32, 62.3] b = tf.argsort(a,axis=-1,direction='ASCENDING',stable=False,name=None) c = tf.keras.backend.eval(b) # Here, c = [0 3 1 2 5 4] ``` Args: values: 1-D or higher numeric `Tensor`. axis: The axis along which to sort. The default is -1, which sorts the last axis. direction: The direction in which to sort the values (`'ASCENDING'` or `'DESCENDING'`). stable: If True, equal elements in the original tensor will not be re-ordered in the returned order. Unstable sort is not yet implemented, but will eventually be the default for performance reasons. If you require a stable order, pass `stable=True` for forwards compatibility. name: Optional name for the operation. Returns: An int32 `Tensor` with the same shape as `values`. The indices that would sort each slice of the given `values` along the given `axis`. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid." 9810,_sort_or_argsort,tensorflow/tensorflow/python/ops/sort_ops.py,115,function,"Internal sort/argsort implementation. Args: values: The input values. axis: The axis along which to sort. direction: 'ASCENDING' or 'DESCENDING'. return_argsort: Whether to return the argsort result. Returns: Either the sorted values, or the indices of the sorted values in the original tensor. See the `sort` and `argsort` docstrings. Raises: ValueError: If axis is not a constant scalar, or the direction is invalid." 9811,_descending_sort,tensorflow/tensorflow/python/ops/sort_ops.py,146,function,"Sorts values in reverse using `top_k`. Args: values: Tensor of numeric values. axis: Index of the axis which values should be sorted along. return_argsort: If False, return the sorted values. If True, return the indices that would sort the values. Returns: The sorted values." 9812,_ascending_sort,tensorflow/tensorflow/python/ops/sort_ops.py,200,function, 9813,SortTest,tensorflow/tensorflow/python/ops/sort_ops_test.py,35,class, 9814,_SparseReorderGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,37,function,"Gradients for the SparseReorder op. Args: op: the SparseReorder op unused_output_indices_grad: the incoming gradients of the output indices output_values_grad: the incoming gradients of the output values Returns: Gradient for each of the 3 input tensors: (input_indices, input_values, input_shape) The gradients for input_indices and input_shape is None." 9815,_SparseAddGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,66,function,"The backward operator for the SparseAdd op. The SparseAdd op calculates A + B, where A, B, and the sum are all represented as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. non-empty values of the sum, and outputs the gradients w.r.t. the non-empty values of A and B. Args: op: the SparseAdd op *grads: the incoming gradients, one element per output of `op` Returns: Gradient for each of the 6 input tensors of SparseAdd: (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh) The gradients for the indices, shapes, and the threshold are None." 9816,_SparseTensorDenseAddGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,100,function, 9817,_SparseReduceSumGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,107,function,Similar to gradient for the Sum Op (i.e. tf.reduce_sum()). 9818,_SparseSliceGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,120,function,"The backward operator for the SparseSlice op. This op takes in the upstream gradient w.r.t. non-empty values of the sliced `SparseTensor`, and outputs the gradients w.r.t. the non-empty values of input `SparseTensor`. Args: op: the SparseSlice op *grads: the incoming gradients, one element per output of `op` Returns: Gradient for each of the 5 input tensors of SparseSlice: (indices, values, shape, start, size) The gradients for the indices, shape, start and the size are None." 9819,_SparseTensorDenseMatMulGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,149,function,"Gradients for the dense tensor in the SparseTensorDenseMatMul op. If either input is complex, no gradient is provided. Args: op: the SparseTensorDenseMatMul op grad: the incoming gradient Returns: Gradient for each of the 4 input tensors: (sparse_indices, sparse_values, sparse_shape, dense_tensor) The gradients for indices and shape are None. Raises: TypeError: When the two operands don't have the same type." 9820,_SparseDenseCwiseAddGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,205,function, 9821,_SparseDenseCwiseMulOrDivGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,210,function,"Common code for SparseDenseCwise{Mul,Div} gradients." 9822,_SparseDenseCwiseMulGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,245,function,Gradients for SparseDenseCwiseMul. 9823,_SparseDenseCwiseDivGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,251,function,Gradients for SparseDenseCwiseDiv. 9824,_SparseSoftmaxGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,257,function,"Gradients for SparseSoftmax. The calculation is the same as SoftmaxGrad: grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax where we now only operate on the non-zero values present in the SparseTensors. Args: op: the SparseSoftmax op. grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values. Returns: Gradients w.r.t. the input (sp_indices, sp_values, sp_shape)." 9825,_SparseSparseMaximumGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,290,function, 9826,_SparseSparseMinimumGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,296,function, 9827,_SparseFillEmptyRowsGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,302,function,Gradients for SparseFillEmptyRows. 9828,_SparseToDenseGrad,tensorflow/tensorflow/python/ops/sparse_grad.py,316,function, 9829,_convert_to_sparse_tensor,tensorflow/tensorflow/python/ops/sparse_ops.py,57,function,"Convert `sp_input` to `SparseTensor` and return it. Args: sp_input: `SparseTensor` or `SparseTensorValue`. Returns: `sp_input` converted to `SparseTensor`. Raises: ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`." 9830,_convert_to_sparse_tensors,tensorflow/tensorflow/python/ops/sparse_ops.py,76,function,"Convert `sp_inputs` to `SparseTensor` objects and return them. Args: sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue` objects. Returns: `sp_inputs` converted to `SparseTensor` objects. Raises: ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor `SparseTensorValue`." 9831,_make_int64_tensor,tensorflow/tensorflow/python/ops/sparse_ops.py,97,function, 9832,from_dense,tensorflow/tensorflow/python/ops/sparse_ops.py,108,function,"Converts a dense tensor into a sparse tensor. Only elements not equal to zero will be present in the result. The resulting `SparseTensor` has the same dtype and shape as the input. Args: tensor: A dense `Tensor` to be converted to a `SparseTensor`. name: Optional name for the op. Returns: The `SparseTensor`." 9833,sparse_expand_dims,tensorflow/tensorflow/python/ops/sparse_ops.py,131,function,"Returns a tensor with an length 1 axis inserted at index `axis`. Given a tensor `input`, this operation inserts a dimension of length 1 at the dimension index `axis` of `input`'s shape. The dimension index follows python indexing rules: It's zero-based, a negative index it is counted backward from the end. This operation is useful to: * Add an outer ""batch"" dimension to a single element. * Align axes for broadcasting. * To add an inner vector length axis to a tensor of scalars. For example: If you have a sparse tensor with shape `[height, width, depth]`: >>> sp = tf.sparse.SparseTensor(indices=[[3,4,1]], values=[7,], ... dense_shape=[10,10,3]) You can add an outer `batch` axis by passing `axis=0`: >>> tf.sparse.expand_dims(sp, axis=0).shape.as_list() [1, 10, 10, 3] The new axis location matches Python `list.insert(axis, 1)`: >>> tf.sparse.expand_dims(sp, axis=1).shape.as_list() [10, 1, 10, 3] Following standard python indexing rules, a negative `axis` counts from the end so `axis=-1` adds an inner most dimension: >>> tf.sparse.expand_dims(sp, axis=-1).shape.as_list() [10, 10, 3, 1] Note: Unlike `tf.expand_dims` this function includes a default value for the `axis`: `-1`. So if `axis is not specified, an inner dimension is added. >>> sp.shape.as_list() [10, 10, 3] >>> tf.sparse.expand_dims(sp).shape.as_list() [10, 10, 3, 1] This operation requires that `axis` is a valid index for `input.shape`, following python indexing rules: ``` -1-tf.rank(input) <= axis <= tf.rank(input) ``` This operation is related to: * `tf.expand_dims`, which provides this functionality for dense tensors. * `tf.squeeze`, which removes dimensions of size 1, from dense tensors. * `tf.sparse.reshape`, which provides more flexible reshaping capability. Args: sp_input: A `SparseTensor`. axis: 0-D (scalar). Specifies the dimension index at which to expand the shape of `input`. Must be in the range `[-rank(sp_input) - 1, rank(sp_input)]`. Defaults to `-1`. name: The name of the output `SparseTensor`. Returns: A `SparseTensor` with the same data as `sp_input`, but its shape has an additional dimension of size 1 added." 9834,sparse_eye,tensorflow/tensorflow/python/ops/sparse_ops.py,237,function,"Creates a two-dimensional sparse tensor with ones along the diagonal. Args: num_rows: Non-negative integer or `int32` scalar `tensor` giving the number of rows in the resulting matrix. num_columns: Optional non-negative integer or `int32` scalar `tensor` giving the number of columns in the resulting matrix. Defaults to `num_rows`. dtype: The type of element in the resulting `Tensor`. name: A name for this `Op`. Defaults to ""eye"". Returns: A `SparseTensor` of shape [num_rows, num_columns] with ones along the diagonal." 9835,sparse_concat,tensorflow/tensorflow/python/ops/sparse_ops.py,275,function,"Concatenates a list of `SparseTensor` along the specified dimension. Concatenation is with respect to the dense versions of each sparse input. It is assumed that each inputs is a `SparseTensor` whose elements are ordered along increasing dimension number. If expand_nonconcat_dim is False, all inputs' shapes must match, except for the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are allowed to vary among all inputs. The `indices`, `values`, and `shapes` lists must have the same length. If expand_nonconcat_dim is False, then the output shape is identical to the inputs', except along the concat dimension, where it is the sum of the inputs' sizes along that dimension. If expand_nonconcat_dim is True, then the output shape along the non-concat dimensions will be expand to be the largest among all inputs, and it is the sum of the inputs sizes along the concat dimension. The output elements will be resorted to preserve the sort order along increasing dimension number. This op runs in `O(M log M)` time, where `M` is the total number of non-empty values across all inputs. This is due to the need for an internal sort in order to concatenate efficiently across an arbitrary dimension. For example, if `axis = 1` and the inputs are sp_inputs[0]: shape = [2, 3] [0, 2]: ""a"" [1, 0]: ""b"" [1, 1]: ""c"" sp_inputs[1]: shape = [2, 4] [0, 1]: ""d"" [0, 2]: ""e"" then the output will be shape = [2, 7] [0, 2]: ""a"" [0, 4]: ""d"" [0, 5]: ""e"" [1, 0]: ""b"" [1, 1]: ""c"" Graphically this is equivalent to doing [ a] concat [ d e ] = [ a d e ] [b c ] [ ] [b c ] Another example, if 'axis = 1' and the inputs are sp_inputs[0]: shape = [3, 3] [0, 2]: ""a"" [1, 0]: ""b"" [2, 1]: ""c"" sp_inputs[1]: shape = [2, 4] [0, 1]: ""d"" [0, 2]: ""e"" if expand_nonconcat_dim = False, this will result in an error. But if expand_nonconcat_dim = True, this will result in: shape = [3, 7] [0, 2]: ""a"" [0, 4]: ""d"" [0, 5]: ""e"" [1, 0]: ""b"" [2, 1]: ""c"" Graphically this is equivalent to doing [ a] concat [ d e ] = [ a d e ] [b ] [ ] [b ] [ c ] [ c ] Args: axis: Dimension to concatenate along. Must be in range [-rank, rank), where rank is the number of dimensions in each input `SparseTensor`. sp_inputs: List of `SparseTensor` to concatenate. name: A name prefix for the returned tensors (optional). expand_nonconcat_dim: Whether to allow the expansion in the non-concat dimensions. Defaulted to False. concat_dim: The old (deprecated) name for axis. expand_nonconcat_dims: alias for expand_nonconcat_dim Returns: A `SparseTensor` with the concatenated output. Raises: TypeError: If `sp_inputs` is not a list of `SparseTensor`." 9836,sparse_concat_v2,tensorflow/tensorflow/python/ops/sparse_ops.py,388,function, 9837,sparse_add,tensorflow/tensorflow/python/ops/sparse_ops.py,430,function,"Adds two tensors, at least one of each is a `SparseTensor`. If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order of arguments does not matter. Use vanilla `tf.add()` for adding two dense `Tensor`s. The shapes of the two operands must match: broadcasting is not supported. The indices of any input `SparseTensor` are assumed ordered in standard lexicographic order. If this is not the case, before this step run `SparseReorder` to restore index ordering. If both arguments are sparse, we perform ""clipping"" as follows. By default, if two values sum to zero at some index, the output `SparseTensor` would still include that particular location in its index, storing a zero in the corresponding value slot. To override this, callers can specify `thresh`, indicating that if the sum has a magnitude strictly smaller than `thresh`, its corresponding value and index would then not be included. In particular, `thresh == 0.0` (default) means everything is kept and actual thresholding happens only for a positive value. For example, suppose the logical sum of two sparse operands is (densified): [ 2] [.1 0] [ 6 -.2] Then, * `thresh == 0` (the default): all 5 index/value pairs will be returned. * `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three index/value pairs will be returned. * `thresh == 0.21`: .1, 0, and -.2 will vanish. Args: a: The first operand; `SparseTensor` or `Tensor`. b: The second operand; `SparseTensor` or `Tensor`. At least one operand must be sparse. threshold: An optional 0-D `Tensor` (defaults to `0`). The magnitude threshold that determines if an output value/index pair takes space. Its dtype should match that of the values if they are real; if the latter are complex64/complex128, then the dtype should be float32/float64, correspondingly. thresh: Deprecated alias for `threshold`. Returns: A `SparseTensor` or a `Tensor`, representing the sum. Raises: TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead." 9838,sparse_add_v2,tensorflow/tensorflow/python/ops/sparse_ops.py,491,function,"Adds two tensors, at least one of each is a `SparseTensor`. If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order of arguments does not matter. Use vanilla `tf.add()` for adding two dense `Tensor`s. The shapes of the two operands must match: broadcasting is not supported. The indices of any input `SparseTensor` are assumed ordered in standard lexicographic order. If this is not the case, before this step run `SparseReorder` to restore index ordering. If both arguments are sparse, we perform ""clipping"" as follows. By default, if two values sum to zero at some index, the output `SparseTensor` would still include that particular location in its index, storing a zero in the corresponding value slot. To override this, callers can specify `threshold`, indicating that if the sum has a magnitude strictly smaller than `threshold`, its corresponding value and index would then not be included. In particular, `threshold == 0.0` (default) means everything is kept and actual thresholding happens only for a positive value. For example, suppose the logical sum of two sparse operands is (densified): [ 2] [.1 0] [ 6 -.2] Then, * `threshold == 0` (the default): all 5 index/value pairs will be returned. * `threshold == 0.11`: only .1 and 0 will vanish, and the remaining three index/value pairs will be returned. * `threshold == 0.21`: .1, 0, and -.2 will vanish. Args: a: The first operand; `SparseTensor` or `Tensor`. b: The second operand; `SparseTensor` or `Tensor`. At least one operand must be sparse. threshold: A 0-D `Tensor`. The magnitude threshold that determines if an output value/index pair takes space. Its dtype should match that of the values if they are real; if the latter are complex64/complex128, then the dtype should be float32/float64, correspondingly. Returns: A `SparseTensor` or a `Tensor`, representing the sum. Raises: TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead." 9839,sparse_cross,tensorflow/tensorflow/python/ops/sparse_ops.py,575,function,"Generates sparse cross from a list of sparse and dense tensors. For example, if the inputs are * inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: ""a"" [1, 0]: ""b"" [1, 1]: ""c"" * inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: ""d"" [1, 0]: ""e"" * inputs[2]: Tensor [[""f""], [""g""]] then the output will be: shape = [2, 2] [0, 0]: ""a_X_d_X_f"" [1, 0]: ""b_X_e_X_g"" [1, 1]: ""c_X_e_X_g"" Customized separator ""_Y_"": >>> inp_0 = tf.constant([['a'], ['b']]) >>> inp_1 = tf.constant([['c'], ['d']]) >>> output = tf.sparse.cross([inp_0, inp_1], separator='_Y_') >>> output.values Args: inputs: An iterable of `Tensor` or `SparseTensor`. name: Optional name for the op. separator: A string added between each string being joined. Defaults to '_X_'. Returns: A `SparseTensor` of type `string`." 9840,sparse_cross_hashed,tensorflow/tensorflow/python/ops/sparse_ops.py,635,function,"Generates hashed sparse cross from a list of sparse and dense tensors. For example, if the inputs are * inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: ""a"" [1, 0]: ""b"" [1, 1]: ""c"" * inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: ""d"" [1, 0]: ""e"" * inputs[2]: Tensor [[""f""], [""g""]] then the output will be: shape = [2, 2] [0, 0]: FingerprintCat64( Fingerprint64(""f""), FingerprintCat64( Fingerprint64(""d""), Fingerprint64(""a""))) [1, 0]: FingerprintCat64( Fingerprint64(""g""), FingerprintCat64( Fingerprint64(""e""), Fingerprint64(""b""))) [1, 1]: FingerprintCat64( Fingerprint64(""g""), FingerprintCat64( Fingerprint64(""e""), Fingerprint64(""c""))) Args: inputs: An iterable of `Tensor` or `SparseTensor`. num_buckets: An `int` that is `>= 0`. output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. hash_key: Integer hash_key that will be used by the `FingerprintCat64` function. If not given, will use a default key. name: Optional name for the op. Returns: A `SparseTensor` of type `int64`." 9841,_sparse_cross_internval_v2,tensorflow/tensorflow/python/ops/sparse_ops.py,686,function,See gen_sparse_ops.sparse_cross_v2. 9842,_sparse_cross_internal,tensorflow/tensorflow/python/ops/sparse_ops.py,712,function,See gen_sparse_ops.sparse_cross. 9843,sparse_dense_cwise_add,tensorflow/tensorflow/python/ops/sparse_ops.py,762,function,"Adds up a SparseTensor and a dense Tensor, using these special rules: (1) Broadcasts the dense side to have the same shape as the sparse side, if eligible; (2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition. By the rules, the result is a logical SparseTensor with exactly the same indices and shape, but possibly with different non-zero values. The output of this Op is the resultant non-zero values. Args: sp_t: the SparseTensor operand. dense_t: the dense Tensor operand; must have the same dtype and a broadcast-compatible shape as `sp_t`. Returns: output: the SparseTensor output." 9844,sparse_reorder,tensorflow/tensorflow/python/ops/sparse_ops.py,789,function,"Reorders a `SparseTensor` into the canonical, row-major ordering. Note that by convention, all sparse ops preserve the canonical ordering along increasing dimension number. The only time ordering can be violated is during manual manipulation of the indices and values to add entries. Reordering does not affect the shape of the `SparseTensor`. For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`: [0, 3]: b [0, 1]: a [3, 1]: d [2, 0]: c then the output will be a `SparseTensor` of shape `[4, 5]` and `indices` / `values`: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Args: sp_input: The input `SparseTensor`. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` with the same shape and non-empty values, but in canonical ordering. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9845,sparse_reshape,tensorflow/tensorflow/python/ops/sparse_ops.py,840,function,"Reshapes a `SparseTensor` to represent values in a new dense shape. This operation has the same semantics as `reshape` on the represented dense tensor. The indices of non-empty values in `sp_input` are recomputed based on the new dense shape, and a new `SparseTensor` is returned containing the new indices and new shape. The order of non-empty values in `sp_input` is unchanged. If one component of `shape` is the special value -1, the size of that dimension is computed so that the total dense size remains constant. At most one component of `shape` can be -1. The number of dense elements implied by `shape` must be the same as the number of dense elements originally represented by `sp_input`. For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`: [0, 0, 0]: a [0, 0, 1]: b [0, 1, 0]: c [1, 0, 0]: d [1, 2, 3]: e and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of shape `[9, 4]` and `indices` / `values`: [0, 0]: a [0, 1]: b [1, 2]: c [4, 2]: d [8, 1]: e Args: sp_input: The input `SparseTensor`. shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the represented `SparseTensor`. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` with the same non-empty values but with indices calculated by the new dense shape. Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If argument `shape` requests a `SparseTensor` with a different number of elements than `sp_input`. ValueError: If `shape` has more than one inferred (== -1) dimension." 9846,KeywordRequired,tensorflow/tensorflow/python/ops/sparse_ops.py,945,class, 9847,sparse_split,tensorflow/tensorflow/python/ops/sparse_ops.py,956,function,"Split a `SparseTensor` into `num_split` tensors along `axis`. If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split` each slice starting from 0:`shape[axis] % num_split` gets extra one dimension. For example, if `axis = 1` and `num_split = 2` and the input is: input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: output_tensor[0] = [ a ] [b c ] output_tensor[1] = [ d e ] [ ] Args: keyword_required: Python 2 standin for * (temporary for argument reorder) sp_input: The `SparseTensor` to split. num_split: A Python integer. The number of ways to split. axis: A 0-D `int32` `Tensor`. The dimension along which to split. Must be in range [-rank, rank), where rank is the number of dimensions in the input `SparseTensor`. name: A name for the operation (optional). split_dim: Deprecated old name for axis. Returns: `num_split` `SparseTensor` objects resulting from splitting `value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If the deprecated `split_dim` and `axis` are both non None." 9848,sparse_split_v2,tensorflow/tensorflow/python/ops/sparse_ops.py,1029,function,"Split a `SparseTensor` into `num_split` tensors along `axis`. If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split` each slice starting from 0:`shape[axis] % num_split` gets extra one dimension. For example: >>> indices = [[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]] >>> values = [1, 2, 3, 4, 5] >>> t = tf.SparseTensor(indices=indices, values=values, dense_shape=[2, 7]) >>> tf.sparse.to_dense(t) >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=1) >>> tf.sparse.to_dense(output[0]) >>> tf.sparse.to_dense(output[1]) >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=0) >>> tf.sparse.to_dense(output[0]) >>> tf.sparse.to_dense(output[1]) >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=-1) >>> tf.sparse.to_dense(output[0]) >>> tf.sparse.to_dense(output[1]) Args: sp_input: The `SparseTensor` to split. num_split: A Python integer. The number of ways to split. axis: A 0-D `int32` `Tensor`. The dimension along which to split. Must be in range [-rank, rank), where rank is the number of dimensions in the input `SparseTensor`. name: A name for the operation (optional). Returns: `num_split` `SparseTensor` objects resulting from splitting `value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9849,sparse_slice,tensorflow/tensorflow/python/ops/sparse_ops.py,1098,function,"Slice a `SparseTensor` based on the `start` and `size. For example, if the input is input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: sparse.slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse.slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ] Args: sp_input: The `SparseTensor` to split. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice. name: A name for the operation (optional). Returns: A `SparseTensor` objects resulting from splicing. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9850,sparse_to_dense,tensorflow/tensorflow/python/ops/sparse_ops.py,1151,function,"Converts a sparse representation into a dense tensor. Builds an array `dense` with shape `output_shape` such that ```python # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value) # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i] # If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ``` All other values in `dense` are set to `default_value`. If `sparse_values` is a scalar, all sparse indices are set to this single value. Indices should be sorted in lexicographic order, and indices must not contain any repeats. If `validate_indices` is True, these properties are checked during execution. Args: sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape of the dense output tensor. sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of `sparse_indices`, or a scalar value to be used for all sparse indices. default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value to set for indices not specified in `sparse_indices`. Defaults to zero. validate_indices: A boolean value. If True, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name for the operation (optional). Returns: Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`." 9851,sparse_reduce_max_v2,tensorflow/tensorflow/python/ops/sparse_ops.py,1207,function,"Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` if `output_is_sparse` is `False`, or a `SparseTensor` if `output_is_sparse` is `True`. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. The values not defined in `sp_input` don't participate in the reduce max, as opposed to be implicitly assumed 0 -- hence it can return negative values for sparse `axis`. But, in case there are no values in `axis`, it will reduce to 0. See second example below. For example: ```python # 'x' represents [[1, ?, 2] # [?, 3, ?]] # where ? is implicitly-zero. tf.sparse.reduce_max(x) ==> 3 tf.sparse.reduce_max(x, 0) ==> [1, 3, 2] tf.sparse.reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis. tf.sparse.reduce_max(x, 1, keepdims=True) ==> [[2], [3]] tf.sparse.reduce_max(x, [0, 1]) ==> 3 # 'y' represents [[-7, ?] # [ 4, 3] # [ ?, ?] tf.sparse.reduce_max(x, 1) ==> [-7, 4, 0] ``` Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. output_is_sparse: If true, returns a `SparseTensor` instead of a dense `Tensor` (the default). name: A name for the operation (optional). Returns: The reduced Tensor or the reduced SparseTensor if `output_is_sparse` is True." 9852,sparse_reduce_max,tensorflow/tensorflow/python/ops/sparse_ops.py,1295,function,"Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. The values not defined in `sp_input` don't participate in the reduce max, as opposed to be implicitly assumed 0 -- hence it can return negative values for sparse `reduction_axes`. But, in case there are no values in `reduction_axes`, it will reduce to 0. See second example below. For example: ```python # 'x' represents [[1, ?, 2] # [?, 3, ?]] # where ? is implicitly-zero. tf.sparse.reduce_max(x) ==> 3 tf.sparse.reduce_max(x, 0) ==> [1, 3, 2] tf.sparse.reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis. tf.sparse.reduce_max(x, 1, keepdims=True) ==> [[2], [3]] tf.sparse.reduce_max(x, [0, 1]) ==> 3 # 'y' represents [[-7, ?] # [ 4, 3] # [ ?, ?] tf.sparse.reduce_max(x, 1) ==> [-7, 4, 0] ``` Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of `axis`. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced Tensor." 9853,sparse_reduce_max_sparse,tensorflow/tensorflow/python/ops/sparse_ops.py,1365,function,"Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced SparseTensor." 9854,sparse_reduce_sum_v2,tensorflow/tensorflow/python/ops/sparse_ops.py,1415,function,"Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` if `output_is_sparse` is `False`, or a `SparseTensor` if `output_is_sparse` is `True`. Note: if `output_is_sparse` is True, a gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. For example: ```python # 'x' represents [[1, ?, 1] # [?, 1, ?]] # where ? is implicitly-zero. tf.sparse.reduce_sum(x) ==> 3 tf.sparse.reduce_sum(x, 0) ==> [1, 1, 1] tf.sparse.reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis. tf.sparse.reduce_sum(x, 1, keepdims=True) ==> [[2], [1]] tf.sparse.reduce_sum(x, [0, 1]) ==> 3 ``` Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. output_is_sparse: If true, returns a `SparseTensor` instead of a dense `Tensor` (the default). name: A name for the operation (optional). Returns: The reduced Tensor or the reduced SparseTensor if `output_is_sparse` is True." 9855,sparse_reduce_sum,tensorflow/tensorflow/python/ops/sparse_ops.py,1491,function,"Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. For example: ```python # 'x' represents [[1, ?, 1] # [?, 1, ?]] # where ? is implicitly-zero. tf.sparse.reduce_sum(x) ==> 3 tf.sparse.reduce_sum(x, 0) ==> [1, 1, 1] tf.sparse.reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis. tf.sparse.reduce_sum(x, 1, keepdims=True) ==> [[2], [1]] tf.sparse.reduce_sum(x, [0, 1]) ==> 3 ``` Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of `axis`. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced Tensor." 9856,sparse_reduce_sum_sparse,tensorflow/tensorflow/python/ops/sparse_ops.py,1548,function,"Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced SparseTensor." 9857,sparse_tensor_to_dense,tensorflow/tensorflow/python/ops/sparse_ops.py,1599,function,"Converts a `SparseTensor` into a dense tensor. This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s. For example, if `sp_input` has shape `[3, 5]` and non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c and `default_value` is `x`, then the output will be a dense `[3, 5]` string tensor with values: [[x a x b x] [x x x x x] [c x x x x]] Indices must be without repeats. This is only tested if `validate_indices` is `True`. Args: sp_input: The input `SparseTensor`. default_value: Scalar value to set for indices not specified in `sp_input`. Defaults to zero. validate_indices: A boolean value. If `True`, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name prefix for the returned tensors (optional). Returns: A dense tensor with shape `sp_input.dense_shape` and values specified by the non-empty values in `sp_input`. Indices not in `sp_input` are assigned `default_value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9858,sparse_to_indicator,tensorflow/tensorflow/python/ops/sparse_ops.py,1655,function,"Converts a `SparseTensor` of ids into a dense bool indicator tensor. The last dimension of `sp_input.indices` is discarded and replaced with the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True and False elsewhere in `output`. For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values: [0, 0, 0]: 0 [0, 1, 0]: 10 [1, 0, 3]: 103 [1, 1, 1]: 150 [1, 1, 2]: 149 [1, 1, 3]: 150 [1, 2, 1]: 121 and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool tensor with False everywhere except at positions (0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150), (1, 2, 121). Note that repeats are allowed in the input SparseTensor. This op is useful for converting `SparseTensor`s into dense formats for compatibility with ops that expect dense tensors. The input `SparseTensor` must be in row-major order. Args: sp_input: A `SparseTensor` with `values` property of type `int32` or `int64`. vocab_size: A scalar int64 Tensor (or Python int) containing the new size of the last dimension, `all(0 <= sp_input.values < vocab_size)`. name: A name prefix for the returned tensors (optional) Returns: A dense bool indicator tensor representing the indices with specified value. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9859,sparse_merge,tensorflow/tensorflow/python/ops/sparse_ops.py,1719,function,"Combines a batch of feature ids and values into a single `SparseTensor`. The most common use case for this function occurs when feature ids and their corresponding values are stored in `Example` protos on disk. `parse_example` will return a batch of ids and a batch of values, and this function joins them into a single logical `SparseTensor` for use in functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc. The `SparseTensor` returned by this function has the following properties: - `indices` is equivalent to `sp_ids.indices` with the last dimension discarded and replaced with `sp_ids.values`. - `values` is simply `sp_values.values`. - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn, vocab_size]`. For example, consider the following feature vectors: ```python vector1 = [-3, 0, 0, 0, 0, 0] vector2 = [ 0, 1, 0, 4, 1, 0] vector3 = [ 5, 0, 0, 9, 0, 0] ``` These might be stored sparsely in the following Example protos by storing only the feature ids (column number if the vectors are treated as a matrix) of the non-zero elements and the corresponding values: ```python examples = [Example(features={ ""ids"": Feature(int64_list=Int64List(value=[0])), ""values"": Feature(float_list=FloatList(value=[-3]))}), Example(features={ ""ids"": Feature(int64_list=Int64List(value=[1, 4, 3])), ""values"": Feature(float_list=FloatList(value=[1, 1, 4]))}), Example(features={ ""ids"": Feature(int64_list=Int64List(value=[0, 3])), ""values"": Feature(float_list=FloatList(value=[5, 9]))})] ``` The result of calling parse_example on these examples will produce a dictionary with entries for ""ids"" and ""values"". Passing those two objects to this function along with vocab_size=6, will produce a `SparseTensor` that sparsely represents all three instances. Namely, the `indices` property will contain the coordinates of the non-zero entries in the feature matrix (the first dimension is the row number in the matrix, i.e., the index within the batch, and the second dimension is the column number, i.e., the feature id); `values` will contain the actual values. `shape` will be the shape of the original matrix, i.e., (3, 6). For our example above, the output will be equal to: ```python SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]], values=[-3, 1, 4, 1, 5, 9], dense_shape=[3, 6]) ``` This method generalizes to higher-dimensions by simply providing a list for both the sp_ids as well as the vocab_size. In this case the resulting `SparseTensor` has the following properties: - `indices` is equivalent to `sp_ids[0].indices` with the last dimension discarded and concatenated with `sp_ids[0].values, sp_ids[1].values, ...`. - `values` is simply `sp_values.values`. - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn] + vocab_size`. Args: sp_ids: A single `SparseTensor` with `values` property of type `int32` or `int64` or a Python list of such `SparseTensor`s or a list thereof. sp_values: A `SparseTensor` of any type. vocab_size: A scalar `int64` Tensor (or Python int) containing the new size of the last dimension, `all(0 <= sp_ids.values < vocab_size)`. Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for all `i`. name: A name prefix for the returned tensors (optional) already_sorted: A boolean to specify whether the per-batch values in `sp_values` are already sorted. If so skip sorting, False by default (optional). Returns: A `SparseTensor` compactly representing a batch of feature ids and values, useful for passing to functions that expect such a `SparseTensor`. Raises: TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a `Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if `vocab_size` is not a or list thereof and `sp_ids` is a list. ValueError: If `sp_ids` and `vocab_size` are lists of different lengths." 9860,sparse_merge_impl,tensorflow/tensorflow/python/ops/sparse_ops.py,1815,function,Internal implementation for sparse_merge to avoid deprecation warnings. 9861,sparse_retain,tensorflow/tensorflow/python/ops/sparse_ops.py,1873,function,"Retains specified non-empty values within a `SparseTensor`. For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d and `to_retain = [True, False, False, True]`, then the output will be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values: [0, 1]: a [3, 1]: d Args: sp_input: The input `SparseTensor` with `N` non-empty elements. to_retain: A bool vector of length `N` with `M` true values. Returns: A `SparseTensor` with the same shape as the input and `M` non-empty elements corresponding to the true positions in `to_retain`. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9862,sparse_reset_shape,tensorflow/tensorflow/python/ops/sparse_ops.py,1921,function,"Resets the shape of a `SparseTensor` with indices and values unchanged. If `new_shape` is None, returns a copy of `sp_input` with its shape reset to the tight bounding box of `sp_input`. This will be a shape consisting of all zeros if sp_input has no values. If `new_shape` is provided, then it must be larger or equal in all dimensions compared to the shape of `sp_input`. When this condition is met, the returned SparseTensor will have its shape reset to `new_shape` and its indices and values unchanged from that of `sp_input.` For example: Consider a `sp_input` with shape [2, 3, 5]: [0, 0, 1]: a [0, 1, 0]: b [0, 2, 2]: c [1, 0, 3]: d - It is an error to set `new_shape` as [3, 7] since this represents a rank-2 tensor while `sp_input` is rank-3. This is either a ValueError during graph construction (if both shapes are known) or an OpError during run time. - Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or equal in every dimension compared to the original shape [2, 3, 5]. - On the other hand, setting new_shape as [2, 3, 4] is also an error: The third dimension is smaller than the original shape [2, 3, 5] (and an `InvalidArgumentError` will be raised). - If `new_shape` is None, the returned SparseTensor will have a shape [2, 3, 4], which is the tight bounding box of `sp_input`. Args: sp_input: The input `SparseTensor`. new_shape: None or a vector representing the new shape for the returned `SparseTensor`. Returns: A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is `new_shape` if that is set. Otherwise it is the tight bounding box of `input_sp` Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If `new_shape` represents a tensor with a different rank from that of `sp_input` (if shapes are known when graph is constructed). ValueError: If `new_shape` is determined during graph build to have dimension sizes that are too small. OpError: - If `new_shape` has dimension sizes that are too small. - If shapes are not known during graph construction time, and during run time it is found out that the ranks do not match." 9863,sparse_fill_empty_rows,tensorflow/tensorflow/python/ops/sparse_ops.py,2027,function,"Fills empty rows in the input 2-D `SparseTensor` with a default value. This op adds entries with the specified `default_value` at index `[row, 0]` for any row in the input that does not already have a value. For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: [0, 1]: a [0, 3]: b [1, 0]: default_value [2, 0]: c [3, 1]: d [4, 0]: default_value Note that the input may have empty columns at the end, with no effect on this op. The output `SparseTensor` will be in row-major order and will have the same shape as the input. This op also returns an indicator vector such that empty_row_indicator[i] = True iff row i was an empty row. Args: sp_input: A `SparseTensor` with shape `[N, M]`. default_value: The value to fill for empty rows, with the same type as `sp_input.` name: A name prefix for the returned tensors (optional) Returns: sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty rows filled in with `default_value`. empty_row_indicator: A bool vector of length `N` indicating whether each input row was empty. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9864,serialize_sparse,tensorflow/tensorflow/python/ops/sparse_ops.py,2093,function,"Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object. Args: sp_input: The input `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A 3-vector (1-D `Tensor`), with each column representing the serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9865,serialize_sparse_v2,tensorflow/tensorflow/python/ops/sparse_ops.py,2113,function,"Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object. Args: sp_input: The input `SparseTensor`. out_type: The `dtype` to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A 3-vector (1-D `Tensor`), with each column representing the serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9866,serialize_many_sparse,tensorflow/tensorflow/python/ops/sparse_ops.py,2141,function,"Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column represents serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9867,serialize_many_sparse_v2,tensorflow/tensorflow/python/ops/sparse_ops.py,2170,function,"Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. out_type: The `dtype` to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column represents serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9868,deserialize_sparse,tensorflow/tensorflow/python/ops/sparse_ops.py,2204,function,"Deserialize `SparseTensor` objects. The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where the last dimension stores serialized `SparseTensor` objects and the other N dimensions (N >= 0) correspond to a batch. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, its rank is the rank of the incoming `SparseTensor` objects plus N; the sparse tensors have been concatenated along new dimensions, one for each batch. The output `SparseTensor` object's shape values for the original dimensions are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. The new dimensions match the size of the batch. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `SparseReorder` to restore index ordering. For example, if the serialized input is a `[2 x 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: The serialized `SparseTensor` objects. The last dimension must have 3 columns. dtype: The `dtype` of the serialized `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional). Returns: A `SparseTensor` representing the deserialized `SparseTensor` objects." 9869,deserialize_many_sparse,tensorflow/tensorflow/python/ops/sparse_ops.py,2275,function,"Deserialize and concatenate `SparseTensors` from a serialized minibatch. The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where `N` is the minibatch size and the rows correspond to packed outputs of `serialize_sparse`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension). The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `sparse.reorder` to restore index ordering. For example, if the serialized input is a `[2, 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`. The serialized and packed `SparseTensor` objects. dtype: The `dtype` of the serialized `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` representing the deserialized `SparseTensor`s, concatenated along the `SparseTensor`s' first dimension. All of the serialized `SparseTensor`s must have had the same rank and type." 9870,sparse_tensor_dense_matmul,tensorflow/tensorflow/python/ops/sparse_ops.py,2348,function,"Multiply SparseTensor (or dense Matrix) (of rank 2) ""A"" by dense matrix (or SparseTensor) ""B"". Please note that one and only one of the inputs MUST be a SparseTensor and the other MUST be a dense matrix. No validity checking is performed on the indices of `A`. However, the following input format is recommended for optimal behavior: * If `adjoint_a == false`: `A` should be sorted in lexicographically increasing order. Use `sparse.reorder` if you're not sure. * If `adjoint_a == true`: `A` should be sorted in order of increasing dimension 1 (i.e., ""column major"" order instead of ""row major"" order). Using `tf.nn.embedding_lookup_sparse` for sparse multiplication: It's not obvious but you can consider `embedding_lookup_sparse` as another sparse and dense multiplication. In some situations, you may prefer to use `embedding_lookup_sparse` even though you're not dealing with embeddings. There are two questions to ask in the decision process: Do you need gradients computed as sparse too? Is your sparse data represented as two `SparseTensor`s: ids and values? There is more explanation about data format below. If you answer any of these questions as yes, consider using `tf.nn.embedding_lookup_sparse`. Following explains differences between the expected SparseTensors: For example if dense form of your sparse data has shape `[3, 5]` and values: [[ a ] [b c] [ d ]] `SparseTensor` format expected by `sparse_tensor_dense_matmul`: `sp_a` (indices, values): [0, 1]: a [1, 0]: b [1, 4]: c [2, 2]: d `SparseTensor` format expected by `embedding_lookup_sparse`: `sp_ids` `sp_weights` [0, 0]: 1 [0, 0]: a [1, 0]: 0 [1, 0]: b [1, 1]: 4 [1, 1]: c [2, 0]: 2 [2, 0]: d Deciding when to use `sparse_tensor_dense_matmul` vs. `matmul`(a_is_sparse=True): There are a number of questions to ask in the decision process, including: * Will the SparseTensor `A` fit in memory if densified? * Is the column count of the product large (>> 1)? * Is the density of `A` larger than approximately 15%? If the answer to several of these questions is yes, consider converting the `SparseTensor` to a dense one and using `tf.matmul` with `a_is_sparse=True`. This operation tends to perform well when `A` is more sparse, if the column size of the product is small (e.g. matrix-vector multiplication), if `sp_a.dense_shape` takes on large values. Below is a rough speed comparison between `sparse_tensor_dense_matmul`, labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For purposes of the comparison, the time spent converting from a `SparseTensor` to a dense `Tensor` is not included, so it is overly conservative with respect to the time ratio. Benchmark system: CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB GPU: NVidia Tesla k40c Compiled with: `-c opt --config=cuda --copt=-mavx` ``` tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks A sparse [m, k] with % nonzero values between 1% and 80% B dense [k, n] % nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense) 0.01 1 True 100 100 0.000221166 0.00010154 0.459112 0.01 1 True 100 1000 0.00033858 0.000109275 0.322745 0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385 0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669 0.01 1 False 100 100 0.000208085 0.000107603 0.51711 0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762 0.01 1 False 1000 100 0.000308222 0.00010345 0.335635 0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124 0.01 10 True 100 100 0.000218522 0.000105537 0.482958 0.01 10 True 100 1000 0.000340882 0.000111641 0.327506 0.01 10 True 1000 100 0.000315472 0.000117376 0.372064 0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128 0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354 0.01 10 False 100 1000 0.000330552 0.000112615 0.340687 0.01 10 False 1000 100 0.000341277 0.000114097 0.334324 0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549 0.01 25 True 100 100 0.000207806 0.000105977 0.509981 0.01 25 True 100 1000 0.000322879 0.00012921 0.400181 0.01 25 True 1000 100 0.00038262 0.00014158 0.370035 0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504 0.01 25 False 100 100 0.000209401 0.000104696 0.499979 0.01 25 False 100 1000 0.000321161 0.000130737 0.407076 0.01 25 False 1000 100 0.000377012 0.000136801 0.362856 0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413 0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833 0.2 1 True 100 1000 0.000348674 0.000147475 0.422959 0.2 1 True 1000 100 0.000336908 0.00010122 0.300439 0.2 1 True 1000 1000 0.001022 0.000203274 0.198898 0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746 0.2 1 False 100 1000 0.000356127 0.000146824 0.41228 0.2 1 False 1000 100 0.000322664 0.000100918 0.312764 0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648 0.2 10 True 100 100 0.000211692 0.000109903 0.519165 0.2 10 True 100 1000 0.000372819 0.000164321 0.440753 0.2 10 True 1000 100 0.000338651 0.000144806 0.427596 0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064 0.2 10 False 100 100 0.000215727 0.000110502 0.512231 0.2 10 False 100 1000 0.000375419 0.0001613 0.429653 0.2 10 False 1000 100 0.000336999 0.000145628 0.432132 0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618 0.2 25 True 100 100 0.000218705 0.000129913 0.594009 0.2 25 True 100 1000 0.000394794 0.00029428 0.745402 0.2 25 True 1000 100 0.000404483 0.0002693 0.665788 0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052 0.2 25 False 100 100 0.000221494 0.0001306 0.589632 0.2 25 False 100 1000 0.000396436 0.000297204 0.74969 0.2 25 False 1000 100 0.000409346 0.000270068 0.659754 0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046 0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836 0.5 1 True 100 1000 0.000415328 0.000223073 0.537101 0.5 1 True 1000 100 0.000358324 0.00011269 0.314492 0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851 0.5 1 False 100 100 0.000224196 0.000101423 0.452386 0.5 1 False 100 1000 0.000400987 0.000223286 0.556841 0.5 1 False 1000 100 0.000368825 0.00011224 0.304318 0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563 0.5 10 True 100 100 0.000222125 0.000112308 0.505608 0.5 10 True 100 1000 0.000461088 0.00032357 0.701753 0.5 10 True 1000 100 0.000394624 0.000225497 0.571422 0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801 0.5 10 False 100 100 0.000232083 0.000114978 0.495418 0.5 10 False 100 1000 0.000454574 0.000324632 0.714146 0.5 10 False 1000 100 0.000379097 0.000227768 0.600817 0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638 0.5 25 True 100 100 0.00023429 0.000151703 0.647501 0.5 25 True 100 1000 0.000497462 0.000598873 1.20386 0.5 25 True 1000 100 0.000460778 0.000557038 1.20891 0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845 0.5 25 False 100 100 0.000228981 0.000155334 0.678371 0.5 25 False 100 1000 0.000496139 0.000620789 1.25124 0.5 25 False 1000 100 0.00045473 0.000551528 1.21287 0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927 0.8 1 True 100 100 0.000222037 0.000105301 0.47425 0.8 1 True 100 1000 0.000410804 0.000329327 0.801664 0.8 1 True 1000 100 0.000349735 0.000131225 0.375212 0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633 0.8 1 False 100 100 0.000214079 0.000107486 0.502085 0.8 1 False 100 1000 0.000413746 0.000323244 0.781261 0.8 1 False 1000 100 0.000348983 0.000131983 0.378193 0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282 0.8 10 True 100 100 0.000229159 0.00011825 0.516017 0.8 10 True 100 1000 0.000498845 0.000532618 1.0677 0.8 10 True 1000 100 0.000383126 0.00029935 0.781336 0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689 0.8 10 False 100 100 0.000230783 0.000124958 0.541452 0.8 10 False 100 1000 0.000493393 0.000550654 1.11606 0.8 10 False 1000 100 0.000377167 0.000298581 0.791642 0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024 0.8 25 True 100 100 0.000233496 0.000175241 0.75051 0.8 25 True 100 1000 0.00055654 0.00102658 1.84458 0.8 25 True 1000 100 0.000463814 0.000783267 1.68875 0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132 0.8 25 False 100 100 0.000240243 0.000175047 0.728625 0.8 25 False 100 1000 0.000578102 0.00104499 1.80763 0.8 25 False 1000 100 0.000485113 0.000776849 1.60138 0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992 ``` Args: sp_a: SparseTensor (or dense Matrix) A, of rank 2. b: dense Matrix (or SparseTensor) B, with the same dtype as sp_a. adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex, this is transpose(conj(A)). Otherwise it's transpose(A). adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex, this is transpose(conj(B)). Otherwise it's transpose(B). name: A name prefix for the returned tensors (optional) Returns: A dense matrix (pseudo-code in dense np.matrix notation): `A = A.H if adjoint_a else A` `B = B.H if adjoint_b else B` `return A*B`" 9871,sparse_softmax,tensorflow/tensorflow/python/ops/sparse_ops.py,2584,function,"Applies softmax to a batched N-D `SparseTensor`. The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` (where `N >= 2`), and with indices sorted in the canonical lexicographic order. This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost logical submatrix with shape `[B, C]`, but with the catch that *the implicitly zero elements do not participate*. Specifically, the algorithm is equivalent to: (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix with shape `[B, C]`, along the size-C dimension; (2) Masks out the original implicitly-zero locations; (3) Renormalizes the remaining elements. Hence, the `SparseTensor` result has exactly the same non-zero indices and shape. Example: ```python # First batch: # [? e.] # [1. ? ] # Second batch: # [e ? ] # [e e ] shape = [2, 2, 2] # 3-D SparseTensor values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]]) indices = np.vstack(np.where(values)).astype(np.int64).T result = tf.sparse.softmax(tf.sparse.SparseTensor(indices, values, shape)) # ...returning a 3-D SparseTensor, equivalent to: # [? 1.] [1 ?] # [1. ? ] and [.5 .5] # where ? means implicitly zero. ``` Args: sp_input: N-D `SparseTensor`, where `N >= 2`. name: optional name of the operation. Returns: output: N-D `SparseTensor` representing the results." 9872,sparse_maximum,tensorflow/tensorflow/python/ops/sparse_ops.py,2640,function,"Returns the element-wise max of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: ```python sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7]) sp_one = sparse_tensor.SparseTensor([[1]], [1], [7]) res = tf.sparse.maximum(sp_zero, sp_one).eval() # ""res"" should be equal to SparseTensor([[0], [1]], [0, 1], [7]). ``` Args: sp_a: a `SparseTensor` operand whose dtype is real, and indices lexicographically ordered. sp_b: the other `SparseTensor` operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor." 9873,sparse_minimum,tensorflow/tensorflow/python/ops/sparse_ops.py,2678,function,"Returns the element-wise min of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: ```python sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7]) sp_one = sparse_tensor.SparseTensor([[1]], [1], [7]) res = tf.sparse.minimum(sp_zero, sp_one).eval() # ""res"" should be equal to SparseTensor([[0], [1]], [0, 0], [7]). ``` Args: sp_a: a `SparseTensor` operand whose dtype is real, and indices lexicographically ordered. sp_b: the other `SparseTensor` operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor." 9874,sparse_transpose,tensorflow/tensorflow/python/ops/sparse_ops.py,2716,function,"Transposes a `SparseTensor` The returned tensor's dimension i will correspond to the input dimension `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank of the input tensor. Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors. For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`: [0, 3]: b [0, 1]: a [3, 1]: d [2, 0]: c then the output will be a `SparseTensor` of shape `[5, 4]` and `indices` / `values`: [0, 2]: c [1, 0]: a [1, 3]: d [3, 0]: b Args: sp_input: The input `SparseTensor`. perm: A permutation of the dimensions of `sp_input`. name: A name prefix for the returned tensors (optional) Returns: A transposed `SparseTensor`. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9875,map_values,tensorflow/tensorflow/python/ops/sparse_ops.py,2778,function,"Applies `op` to the `.values` tensor of one or more `SparseTensor`s. Replaces any `SparseTensor` in `args` or `kwargs` with its `values` tensor (which contains the non-default values for the SparseTensor), and then calls `op`. Returns a `SparseTensor` that is constructed from the input `SparseTensor`s' `indices`, `dense_shape`, and the value returned by the `op`. If the input arguments contain multiple `SparseTensor`s, then they must have equal `indices` and dense shapes. Examples: >>> s = tf.sparse.from_dense([[1, 2, 0], ... [0, 4, 0], ... [1, 0, 0]]) >>> tf.sparse.to_dense(tf.sparse.map_values(tf.ones_like, s)).numpy() array([[1, 1, 0], [0, 1, 0], [1, 0, 0]], dtype=int32) >>> tf.sparse.to_dense(tf.sparse.map_values(tf.multiply, s, s)).numpy() array([[ 1, 4, 0], [ 0, 16, 0], [ 1, 0, 0]], dtype=int32) >>> tf.sparse.to_dense(tf.sparse.map_values(tf.add, s, 5)).numpy() array([[6, 7, 0], [0, 9, 0], [6, 0, 0]], dtype=int32) Note: even though `tf.add(0, 5) != 0`, implicit zeros will remain unchanged. However, if the sparse tensor contains any explict zeros, these will be affected by the mapping! Args: op: The operation that should be applied to the SparseTensor `values`. `op` is typically an element-wise operation (such as math_ops.add), but any operation that preserves the shape can be used. *args: Arguments for `op`. **kwargs: Keyword arguments for `op`. Returns: A `SparseTensor` whose `indices` and `dense_shape` matches the `indices` and `dense_shape` of all input `SparseTensor`s. Raises: ValueError: If args contains no `SparseTensor`, or if the `indices` or `dense_shape`s of the input `SparseTensor`s are not equal." 9876,_assert_sparse_compatible,tensorflow/tensorflow/python/ops/sparse_ops.py,2843,function,"Check that all of `sparse_tensors` have same `indices` and `dense_shape`. Args: sparse_tensors: A list of sparse tensors. Returns: An op to be used as a control dependency." 9877,_replace_sparse_with_values,tensorflow/tensorflow/python/ops/sparse_ops.py,2864,function,"Replace `SparseTensor`s with their values in `value` Each `SparseTensor` in `value` is replaced by its `values` tensor, and collects all `SparseTensor`s in `sparse_list`. Args: value: A structure of `Tensor`s and `SparseTensor`s sparse_list: A list. Output parameter that collects all `SparseTensor`s in `value`. Returns: `value` with each SparseTensor replaced by its `.value` attribute." 9878,_add_sparse_to_tensors_map,tensorflow/tensorflow/python/ops/sparse_ops.py,2889,function,"Add a `SparseTensor` to a `SparseTensorsMap` and return its handle. Args: sp_input: The input `SparseTensor`. container: The container for the underlying `SparseTensorsMap` (optional). shared_name: The shared name for the underlying `SparseTensorsMap` (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string 1-vector (1D `Tensor`), with the single element representing the a unique handle to a `SparseTensor` stored by the `SparseTensorMap` underlying this op. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9879,_add_many_sparse_to_tensors_map,tensorflow/tensorflow/python/ops/sparse_ops.py,2921,function,"Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. container: The container for the underlying `SparseTensorsMap` (optional). shared_name: The shared name for the underlying `SparseTensorsMap` (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string matrix (2-D `Tensor`) with `N` rows and `1` column. Each row represents a unique handle to a `SparseTensor` stored by the `SparseTensorMap` underlying this op. Raises: TypeError: If `sp_input` is not a `SparseTensor`." 9880,_take_many_sparse_from_tensors_map,tensorflow/tensorflow/python/ops/sparse_ops.py,2961,function,"Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. The input `sparse_handles` must be a string matrix of shape `[N, 1]` where `N` is the minibatch size and the rows correspond to packed outputs of `add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension). The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `sparse.reorder` to restore index ordering. For example, if the serialized input is a `[2, 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: sparse_map_op: The `Operation` that created the original handles. Usually this is, e.g., `add_sparse_to_tensors_map(...).op`. sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`. The serialized and packed `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` representing the deserialized `SparseTensor`s, concatenated along the `SparseTensor`s' first dimension. All of the serialized `SparseTensor`s must have had the same rank and type." 9881,_UnaryMapValueDispatcher,tensorflow/tensorflow/python/ops/sparse_ops.py,3047,class,OpDispatcher for unary ops that maps base function across sparse values. 9882,SparseOpsTest,tensorflow/tensorflow/python/ops/sparse_ops_test.py,42,class, 9883,lbeta,tensorflow/tensorflow/python/ops/special_math_ops.py,53,function,"Computes \\(ln(|Beta(x)|)\\), reducing along the last dimension. Given one-dimensional $z = [z_1,...,z_K]$, we define $$Beta(z) = \frac{\prod_j \Gamma(z_j)}{\Gamma(\sum_j z_j)},$$ where $\Gamma$ is the gamma function. And for $n + 1$ dimensional $x$ with shape $[N_1, ..., N_n, K]$, we define $$lbeta(x)[i_1, ..., i_n] = \log{|Beta(x[i_1, ..., i_n, :])|}.$$ In other words, the last dimension is treated as the $z$ vector. Note that if $z = [u, v]$, then $$Beta(z) = \frac{\Gamma(u)\Gamma(v)}{\Gamma(u + v)} = \int_0^1 t^{u-1} (1 - t)^{v-1} \mathrm{d}t,$$ which defines the traditional bivariate beta function. If the last dimension is empty, we follow the convention that the sum over the empty set is zero, and the product is one. Args: x: A rank `n + 1` `Tensor`, `n >= 0` with type `float`, or `double`. name: A name for the operation (optional). Returns: The logarithm of \\(|Beta(x)|\\) reducing along the last dimension." 9884,dawsn,tensorflow/tensorflow/python/ops/special_math_ops.py,108,function,"Computes Dawson's integral of `x` element-wise. Dawson's integral is defined as `exp(-x**2)` times the integral of `exp(t**2)` from `0` to `x`, with the domain of definition all real numbers. Dawson's function is odd. >>> tf.math.special.dawsn([-1., -0.5, 0.5, 1.]).numpy() array([-0.5380795, -0.4244364, 0.4244364, 0.5380795], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.dawsn @end_compatibility" 9885,expint,tensorflow/tensorflow/python/ops/special_math_ops.py,138,function,"Computes the Exponential integral of `x` element-wise. The Exponential integral is defined as the integral of `exp(t) / t` from `-inf` to `x`, with the domain of definition all positive real numbers. >>> tf.math.special.expint([1., 1.1, 2.1, 4.1]).numpy() array([ 1.8951179, 2.1673784, 5.3332353, 21.048464], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.expi @end_compatibility" 9886,fresnel_cos,tensorflow/tensorflow/python/ops/special_math_ops.py,167,function,"Computes Fresnel's cosine integral of `x` element-wise. The Fresnel cosine integral is defined as the integral of `cos(t^2)` from `0` to `x`, with the domain of definition all real numbers. The Fresnel cosine integral is odd. >>> tf.math.special.fresnel_cos([-1., -0.1, 0.1, 1.]).numpy() array([-0.7798934 , -0.09999753, 0.09999753, 0.7798934 ], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.fresnel second output. @end_compatibility" 9887,fresnel_sin,tensorflow/tensorflow/python/ops/special_math_ops.py,197,function,"Computes Fresnel's sine integral of `x` element-wise. The Fresnel sine integral is defined as the integral of `sin(t^2)` from `0` to `x`, with the domain of definition all real numbers. >>> tf.math.special.fresnel_sin([-1., -0.1, 0.1, 1.]).numpy() array([-0.43825912, -0.00052359, 0.00052359, 0.43825912], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.fresnel first output. @end_compatibility" 9888,spence,tensorflow/tensorflow/python/ops/special_math_ops.py,226,function,"Computes Spence's integral of `x` element-wise. Spence's integral is defined as the integral of `log(t) / (1 - t)` from `1` to `x`, with the domain of definition all non-negative real numbers. >>> tf.math.special.spence([0.5, 1., 2., 3.]).numpy() array([ 0.58224034, 0. , -0.82246685, -1.4367464], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.spence @end_compatibility" 9889,bessel_i0,tensorflow/tensorflow/python/ops/special_math_ops.py,255,function,"Computes the Bessel i0 function of `x` element-wise. Modified Bessel function of order 0. It is preferable to use the numerically stabler function `i0e(x)` instead. >>> tf.math.special.bessel_i0([-1., -0.5, 0.5, 1.]).numpy() array([1.26606588, 1.06348337, 1.06348337, 1.26606588], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.i0 @end_compatibility" 9890,bessel_i0e,tensorflow/tensorflow/python/ops/special_math_ops.py,283,function,"Computes the Bessel i0e function of `x` element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_i0e([-1., -0.5, 0.5, 1.]).numpy() array([0.46575961, 0.64503527, 0.64503527, 0.46575961], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.i0e @end_compatibility" 9891,bessel_i1,tensorflow/tensorflow/python/ops/special_math_ops.py,309,function,"Computes the Bessel i1 function of `x` element-wise. Modified Bessel function of order 1. It is preferable to use the numerically stabler function `i1e(x)` instead. >>> tf.math.special.bessel_i1([-1., -0.5, 0.5, 1.]).numpy() array([-0.5651591 , -0.25789431, 0.25789431, 0.5651591 ], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.i1 @end_compatibility" 9892,bessel_i1e,tensorflow/tensorflow/python/ops/special_math_ops.py,337,function,"Computes the Bessel i1e function of `x` element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_i1e([-1., -0.5, 0.5, 1.]).numpy() array([-0.20791042, -0.15642083, 0.15642083, 0.20791042], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.i1e @end_compatibility" 9893,bessel_k0,tensorflow/tensorflow/python/ops/special_math_ops.py,363,function,"Computes the Bessel k0 function of `x` element-wise. Modified Bessel function of order 0. It is preferable to use the numerically stabler function `k0e(x)` instead. >>> tf.math.special.bessel_k0([0.5, 1., 2., 4.]).numpy() array([0.92441907, 0.42102444, 0.11389387, 0.01115968], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.k0 @end_compatibility" 9894,bessel_k0e,tensorflow/tensorflow/python/ops/special_math_ops.py,391,function,"Computes the Bessel k0e function of `x` element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_k0e([0.5, 1., 2., 4.]).numpy() array([1.52410939, 1.14446308, 0.84156822, 0.60929767], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.k0e @end_compatibility" 9895,bessel_k1,tensorflow/tensorflow/python/ops/special_math_ops.py,417,function,"Computes the Bessel k1 function of `x` element-wise. Modified Bessel function of order 1. It is preferable to use the numerically stabler function `k1e(x)` instead. >>> tf.math.special.bessel_k1([0.5, 1., 2., 4.]).numpy() array([1.65644112, 0.60190723, 0.13986588, 0.0124835 ], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.k1 @end_compatibility" 9896,bessel_k1e,tensorflow/tensorflow/python/ops/special_math_ops.py,445,function,"Computes the Bessel k1e function of `x` element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_k1e([0.5, 1., 2., 4.]).numpy() array([2.73100971, 1.63615349, 1.03347685, 0.68157595], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.k1e @end_compatibility" 9897,bessel_j0,tensorflow/tensorflow/python/ops/special_math_ops.py,471,function,"Computes the Bessel j0 function of `x` element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_j0([0.5, 1., 2., 4.]).numpy() array([ 0.93846981, 0.76519769, 0.22389078, -0.39714981], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.j0 @end_compatibility" 9898,bessel_j1,tensorflow/tensorflow/python/ops/special_math_ops.py,497,function,"Computes the Bessel j1 function of `x` element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_j1([0.5, 1., 2., 4.]).numpy() array([ 0.24226846, 0.44005059, 0.57672481, -0.06604333], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.j1 @end_compatibility" 9899,bessel_y0,tensorflow/tensorflow/python/ops/special_math_ops.py,523,function,"Computes the Bessel y0 function of `x` element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_y0([0.5, 1., 2., 4.]).numpy() array([-0.44451873, 0.08825696, 0.51037567, -0.01694074], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.y0 @end_compatibility" 9900,bessel_y1,tensorflow/tensorflow/python/ops/special_math_ops.py,549,function,"Computes the Bessel y1 function of `x` element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_y1([0.5, 1., 2., 4.]).numpy() array([-1.47147239, -0.78121282, -0.10703243, 0.39792571], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.y1 @end_compatibility" 9901,_einsum_grad,tensorflow/tensorflow/python/ops/special_math_ops.py,574,function, 9902,_enclosing_tpu_context,tensorflow/tensorflow/python/ops/special_math_ops.py,596,function, 9903,einsum,tensorflow/tensorflow/python/ops/special_math_ops.py,608,function,"Tensor contraction over specified indices and outer product. Einsum allows defining Tensors by defining their element-wise computation. This computation is defined by `equation`, a shorthand form based on Einstein summation. As an example, consider multiplying two matrices A and B to form a matrix C. The elements of C are given by: ``` C[i,k] = sum_j A[i,j] * B[j,k] ``` The corresponding `equation` is: ``` ij,jk->ik ``` In general, to convert the element-wise equation into the `equation` string, use the following procedure (intermediate strings for matrix multiplication example provided in parentheses): 1. remove variable names, brackets, and commas, (`ik = sum_j ij * jk`) 2. replace ""*"" with "","", (`ik = sum_j ij , jk`) 3. drop summation signs, and (`ik = ij, jk`) 4. move the output to the right, while replacing ""="" with ""->"". (`ij,jk->ik`) Many common operations can be expressed in this way. For example: ```python # Matrix multiplication einsum('ij,jk->ik', m0, m1) # output[i,k] = sum_j m0[i,j] * m1[j, k] # Dot product einsum('i,i->', u, v) # output = sum_i u[i]*v[i] # Outer product einsum('i,j->ij', u, v) # output[i,j] = u[i]*v[j] # Transpose einsum('ij->ji', m) # output[j,i] = m[i,j] # Trace einsum('ii', m) # output[j,i] = trace(m) = sum_i m[i, i] # Batch matrix multiplication einsum('aij,ajk->aik', s, t) # out[a,i,k] = sum_j s[a,i,j] * t[a, j, k] ``` To enable and control broadcasting, use an ellipsis. For example, to perform batch matrix multiplication with NumPy-style broadcasting across the batch dimensions, use: ```python einsum('...ij,...jk->...ik', u, v) ``` Args: equation: a `str` describing the contraction, in the same format as `numpy.einsum`. *inputs: the inputs to contract (each one a `Tensor`), whose shapes should be consistent with `equation`. **kwargs: - optimize: Optimization strategy to use to find contraction path using opt_einsum. Must be 'greedy', 'optimal', 'branch-2', 'branch-all' or 'auto'. (optional, default: 'greedy'). - name: A name for the operation (optional). Returns: The contracted `Tensor`, with shape determined by `equation`. Raises: ValueError: If - the format of `equation` is incorrect, - number of inputs or their shapes are inconsistent with `equation`." 9904,_einsum_v1,tensorflow/tensorflow/python/ops/special_math_ops.py,687,function,Legacy implementation of einsum without using EinsumOp. 9905,_einsum_v1_parse_and_resolve_equation,tensorflow/tensorflow/python/ops/special_math_ops.py,751,function,"Helper for einsum() that splits/resolves inputs & outputs. Args: equation: Equation string given as argument to einsum(). input_shapes: List of the shapes of all inputs given to einsum() Returns: input_axis_labels, output_axis_labels where: input_axis_labels: List of length len(input_shapes) of strings representing the character label for each dimension of each given input, resolving any broadcast (...) axes, output_axis_labels: A string of character labels for each axes of output tensor, filling in missing output subscripts and broadcast axes. Raises: ValueError: If equation is in the uncorrect format, incorrect number of inputs given or broadcast axes ""..."" or output axes could not be resolved." 9906,_einsum_v1_reduction,tensorflow/tensorflow/python/ops/special_math_ops.py,834,function,"Helper for einsum() that computes the result of a two-argument einsum(). Args: t0: a `Tensor` t0_axis_labels: a string of axis labels. This string's length must equal the rank of t0. t1: a `Tensor` t1_axis_labels: a string to axis labels. This string's length must equal the rank of t1. axes_to_sum: set of labels of axes to be summed over Returns: A `Tensor` whose elements are obtained by summing, over all axes in `axes_to_sum`, the corresponding elements of `t0` and `t1`. For example, if t0_axis_labels == 'abijk', t1_axis_labels == 'acjkl', and axes_to_sum == {j,k}, this will return a tensor x where out[a,b,c,i,l] = sum_j sum_k t0[a,b,i,j,k] * t1[a,c,j,k,l] Raises: ValueError: if the rank of `t0` does not match the length of `t0_axis_labels`, or that of `t1` does not match the length of `t1_axis_labels`." 9907,_transpose_if_necessary,tensorflow/tensorflow/python/ops/special_math_ops.py,962,function,"Like transpose(), but avoids creating a new tensor if possible." 9908,_reshape_if_necessary,tensorflow/tensorflow/python/ops/special_math_ops.py,970,function,"Like reshape(), but avoids creating a new tensor if possible." 9909,_get_shape,tensorflow/tensorflow/python/ops/special_math_ops.py,983,function,"Like get_shape().as_list(), but explicitly queries the shape of a tensor if necessary to ensure that the returned value contains no unknown value." 9910,_total_size,tensorflow/tensorflow/python/ops/special_math_ops.py,997,function,"Given list of tensor shape values, returns total size. If shape_values contains tensor values (which are results of array_ops.shape), then it returns a scalar tensor. If not, it returns an integer." 9911,_exponential_space_einsum_v1,tensorflow/tensorflow/python/ops/special_math_ops.py,1009,function,Fallback implementation that supports summing an index over > 2 inputs. 9912,_einsum_v2,tensorflow/tensorflow/python/ops/special_math_ops.py,1085,function,Implementation of einsum utilizing opt_einsum and EinsumOp. 9913,_get_opt_einsum_contract_path,tensorflow/tensorflow/python/ops/special_math_ops.py,1136,function,Returns the (memoized) result of opt_einsum.contract_path. 9914,_einsum_v2_parse_and_resolve_equation,tensorflow/tensorflow/python/ops/special_math_ops.py,1159,function,Helper which validates einsum equation and resolves input shapes. 9915,LBetaTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,44,class, 9916,DawsnTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,185,class, 9917,ExpintTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,227,class, 9918,FresnelCosTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,269,class, 9919,FresnelSinTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,314,class, 9920,SpenceTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,359,class, 9921,BesselTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,407,class, 9922,EinsumTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,638,class, 9923,EinsumGradTest,tensorflow/tensorflow/python/ops/special_math_ops_test.py,955,class, 9924,EinsumBenchmark,tensorflow/tensorflow/python/ops/special_math_ops_test.py,1077,class, 9925,build_graph,tensorflow/tensorflow/python/ops/split_benchmark.py,34,function,"Build a graph containing a sequence of split operations. Args: device: string, the device to run on. input_shape: shape of the input tensor. output_sizes: size of each output along axis. axis: axis to be split along. Returns: An array of tensors to run()" 9926,SplitBenchmark,tensorflow/tensorflow/python/ops/split_benchmark.py,55,class,Benchmark split! 9927,variable_op,tensorflow/tensorflow/python/ops/state_ops.py,41,function,Deprecated. Used variable_op_v2 instead. 9928,variable_op_v2,tensorflow/tensorflow/python/ops/state_ops.py,55,function,"Create a variable Operation. See also variables.Variable. Args: shape: The shape of the tensor managed by this variable dtype: The underlying type of the tensor values. name: optional name to use for the variable op. container: An optional string. Defaults to """". If non-empty, this variable is placed in the given container. Otherwise, a default container is used. shared_name: An optional string. Defaults to """". If non-empty, this variable is named in the given bucket with this shared_name. Otherwise, the node name is used instead. Returns: A variable tensor." 9929,init_variable,tensorflow/tensorflow/python/ops/state_ops.py,82,function,"Initializes variable with ""init"". This op does the following: if init is a Tensor, v = init if callable(init): v = init(VariableShape(v), v.dtype) Args: v: Variable to initialize init: Tensor to assign to v, Or an object convertible to Tensor e.g. nparray, Or an Initializer that generates a tensor given the shape and type of v. An ""Initializer"" is a callable that returns a tensor that ""v"" should be set to. It will be called as init(shape, dtype). name: Optional name for the op. Returns: The operation that initializes v." 9930,is_variable_initialized,tensorflow/tensorflow/python/ops/state_ops.py,117,function,"Checks whether a tensor has been initialized. Outputs boolean scalar indicating whether the tensor has been initialized. Args: ref: A mutable `Tensor`. Should be from a `Variable` node. May be uninitialized. name: A name for the operation (optional). Returns: A `Tensor` of type `bool`." 9931,assign_sub,tensorflow/tensorflow/python/ops/state_ops.py,137,function,"Update `ref` by subtracting `value` from it. This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Unlike `tf.math.subtract`, this op does not broadcast. `ref` and `value` must have the same shape. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. Should be from a `Variable` node. value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to be subtracted to the variable. use_locking: An optional `bool`. Defaults to `False`. If True, the subtraction will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as ""ref"". Returned as a convenience for operations that want to use the new value after the variable has been updated." 9932,assign_add,tensorflow/tensorflow/python/ops/state_ops.py,168,function,"Update `ref` by adding `value` to it. This operation outputs ""ref"" after the update is done. This makes it easier to chain operations that need to use the reset value. Unlike `tf.math.add`, this op does not broadcast. `ref` and `value` must have the same shape. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. Should be from a `Variable` node. value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to be added to the variable. use_locking: An optional `bool`. Defaults to `False`. If True, the addition will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as ""ref"". Returned as a convenience for operations that want to use the new value after the variable has been updated." 9933,assign,tensorflow/tensorflow/python/ops/state_ops.py,199,function,"Update `ref` by assigning `value` to it. This operation outputs a Tensor that holds the new value of `ref` after the value has been assigned. This makes it easier to chain operations that need to use the reset value. Args: ref: A mutable `Tensor`. Should be from a `Variable` node. May be uninitialized. value: A `Tensor`. Must have the same shape and dtype as `ref`. The value to be assigned to the variable. validate_shape: An optional `bool`. Defaults to `True`. If true, the operation will validate that the shape of 'value' matches the shape of the Tensor being assigned to. If false, 'ref' will take on the shape of 'value'. use_locking: An optional `bool`. Defaults to `True`. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A `Tensor` that will hold the new value of `ref` after the assignment has completed." 9934,count_up_to,tensorflow/tensorflow/python/ops/state_ops.py,233,function,"Increments 'ref' until it reaches 'limit'. Args: ref: A Variable. Must be one of the following types: `int32`, `int64`. Should be from a scalar `Variable` node. limit: An `int`. If incrementing ref would bring it above limit, instead generates an 'OutOfRange' error. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `ref`. A copy of the input before increment. If nothing else modifies the input, the values produced will all be distinct." 9935,scatter_update,tensorflow/tensorflow/python/ops/state_ops.py,256,function,"Applies sparse updates to a variable reference. This operation computes ```python # Scalar indices ref[indices, ...] = updates[...] # Vector indices (for each i) ref[indices[i], ...] = updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. If values in `ref` is to be updated more than once, because there are duplicate entries in `indices`, the order at which the updates happen for each value is undefined. Requires `updates.shape = indices.shape + ref.shape[1:]`.
Args: ref: A `Variable`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to store in `ref`. use_locking: An optional `bool`. Defaults to `True`. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as `ref`. Returned as a convenience for operations that want to use the updated values after the update is done." 9936,scatter_nd_update,tensorflow/tensorflow/python/ops/state_ops.py,310,function,"Applies sparse `updates` to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) update = tf.compat.v1.scatter_nd_update(ref, indices, updates) with tf.compat.v1.Session() as sess: print sess.run(update) ``` The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See `tf.scatter_nd` for more details about how to make updates to slices. Args: ref: A Variable. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into ref. updates: A `Tensor`. Must have the same type as `ref`. A Tensor. Must have the same type as ref. A tensor of updated values to add to ref. use_locking: An optional `bool`. Defaults to `True`. An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: The value of the variable after the update." 9937,scatter_add,tensorflow/tensorflow/python/ops/state_ops.py,372,function,"Adds sparse updates to the variable referenced by `resource`. This operation computes ```python # Scalar indices ref[indices, ...] += updates[...] # Vector indices (for each i) ref[indices[i], ...] += updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the updated value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions add. Requires `updates.shape = indices.shape + ref.shape[1:]`.
Args: ref: A `Variable`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to store in `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as `ref`. Returned as a convenience for operations that want to use the updated values after the update is done." 9938,scatter_nd_add,tensorflow/tensorflow/python/ops/state_ops.py,424,function,"Applies sparse addition to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that addition would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1], [7]]) updates = tf.constant([9, 10, 11, 12]) add = tf.compat.v1.scatter_nd_add(ref, indices, updates) with tf.compat.v1.Session() as sess: print sess.run(add) ``` The resulting update to ref would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See `tf.scatter_nd` for more details about how to make updates to slices. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. A mutable Tensor. Should be from a Variable node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into ref. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to add to ref. use_locking: An optional `bool`. Defaults to `False`. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`." 9939,scatter_sub,tensorflow/tensorflow/python/ops/state_ops.py,487,function,"Subtracts sparse updates to a variable reference. ```python # Scalar indices ref[indices, ...] -= updates[...] # Vector indices (for each i) ref[indices[i], ...] -= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their (negated) contributions add. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to subtract from `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the subtraction will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`." 9940,scatter_nd_sub,tensorflow/tensorflow/python/ops/state_ops.py,541,function,"Applies sparse subtraction to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] ``` For example, say we want to subtract 4 scattered elements from a rank-1 tensor with 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = tf.compat.v1.scatter_nd_sub(ref, indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to ref would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See `tf.scatter_nd` for more details about how to make updates to slices. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. A mutable Tensor. Should be from a Variable node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into ref. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to add to ref. use_locking: An optional `bool`. Defaults to `False`. An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`." 9941,scatter_mul,tensorflow/tensorflow/python/ops/state_ops.py,605,function,"Multiplies sparse updates into a variable reference. This operation computes ```python # Scalar indices ref[indices, ...] *= updates[...] # Vector indices (for each i) ref[indices[i], ...] *= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions multiply. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to multiply to `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the operation will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`." 9942,scatter_div,tensorflow/tensorflow/python/ops/state_ops.py,657,function,"Divides a variable reference by sparse updates. This operation computes ```python # Scalar indices ref[indices, ...] /= updates[...] # Vector indices (for each i) ref[indices[i], ...] /= updates[i, ...] # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] ``` This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions divide. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. Args: ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of values that `ref` is divided by. use_locking: An optional `bool`. Defaults to `False`. If True, the operation will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`." 9943,scatter_max,tensorflow/tensorflow/python/ops/state_ops.py,709,function,"Reduces sparse updates into a variable reference using the `max` operation. This operation computes # Scalar indices ref[indices, ...] = max(ref[indices, ...], updates[...]) # Vector indices (for each i) ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions combine. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
Args: ref: A mutable `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to reduce into `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the update will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`." 9944,scatter_min,tensorflow/tensorflow/python/ops/state_ops.py,764,function,"Reduces sparse updates into a variable reference using the `min` operation. This operation computes # Scalar indices ref[indices, ...] = min(ref[indices, ...], updates[...]) # Vector indices (for each i) ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) # High rank indices (for each i, ..., j) ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) This operation outputs `ref` after the update is done. This makes it easier to chain operations that need to use the reset value. Duplicate entries are handled correctly: if multiple `indices` reference the same location, their contributions combine. Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
Args: ref: A mutable `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a `Variable` node. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into the first dimension of `ref`. updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated values to reduce into `ref`. use_locking: An optional `bool`. Defaults to `False`. If True, the update will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `ref`." 9945,batch_scatter_update,tensorflow/tensorflow/python/ops/state_ops.py,821,function,"Generalization of `tf.compat.v1.scatter_update` to axis different than 0. Analogous to `batch_gather`. This assumes that `ref`, `indices` and `updates` have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: `num_prefix_dims = indices.ndims - 1` `batch_dim = num_prefix_dims + 1` `updates.shape = indices.shape + var.shape[batch_dim:]` where `updates.shape[:num_prefix_dims]` `== indices.shape[:num_prefix_dims]` `== var.shape[:num_prefix_dims]` And the operation performed can be expressed as: `var[i_1, ..., i_n, indices[i_1, ..., i_n, j]] = updates[i_1, ..., i_n, j]` When indices is a 1D tensor, this operation is equivalent to `tf.compat.v1.scatter_update`. To avoid this operation there would be 2 alternatives: 1) Reshaping the variable by merging the first `ndims` dimensions. However, this is not possible because `tf.reshape` returns a Tensor, which we cannot use `tf.compat.v1.scatter_update` on. 2) Looping over the first `ndims` of the variable and using `tf.compat.v1.scatter_update` on the subtensors that result of slicing the first dimension. This is a valid option for `ndims = 1`, but less efficient than this implementation. See also `tf.compat.v1.scatter_update` and `tf.compat.v1.scatter_nd_update`. Args: ref: `Variable` to scatter onto. indices: Tensor containing indices as described above. updates: Tensor of updates to apply to `ref`. use_locking: Boolean indicating whether to lock the writing operation. name: Optional scope name string. Returns: Ref to `variable` after it has been modified. Raises: ValueError: If the initial `ndims` of `ref`, `indices`, and `updates` are not the same." 9946,Algorithm,tensorflow/tensorflow/python/ops/stateful_random_ops.py,67,class, 9947,non_deterministic_ints,tensorflow/tensorflow/python/ops/stateful_random_ops.py,77,function,"Non-deterministically generates some integers. This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. Args: shape: the shape of the result. dtype: (optional) the dtype of the result. Returns: a tensor whose element values are non-deterministically chosen." 9948,_uint_to_int,tensorflow/tensorflow/python/ops/stateful_random_ops.py,94,function, 9949,_make_1d_state,tensorflow/tensorflow/python/ops/stateful_random_ops.py,100,function,"Makes a 1-D RNG state. Args: state_size: an integer. seed: an integer or 1-D tensor. Returns: a 1-D tensor of shape [state_size] and dtype STATE_TYPE." 9950,_get_state_size,tensorflow/tensorflow/python/ops/stateful_random_ops.py,139,function, 9951,_check_state_shape,tensorflow/tensorflow/python/ops/stateful_random_ops.py,148,function, 9952,_make_state_from_seed,tensorflow/tensorflow/python/ops/stateful_random_ops.py,154,function, 9953,_convert_alg_to_int,tensorflow/tensorflow/python/ops/stateful_random_ops.py,158,function,"Converts algorithm to an integer. Args: alg: can be one of these types: integer, Algorithm, Tensor, string. Allowed strings are ""philox"" and ""threefry"". Returns: An integer, unless the input is a Tensor in which case a Tensor is returned." 9954,create_rng_state,tensorflow/tensorflow/python/ops/stateful_random_ops.py,187,function,"Creates a RNG state from an integer or a vector. Example: >>> tf.random.create_rng_state( ... 1234, ""philox"") array([1234, 0, 0]) >>> tf.random.create_rng_state( ... [12, 34], ""threefry"") array([12, 34]) Args: seed: an integer or 1-D numpy array. alg: the RNG algorithm. Can be a string, an `Algorithm` or an integer. Returns: a 1-D numpy array whose size depends on the algorithm." 9955,_shape_tensor,tensorflow/tensorflow/python/ops/stateful_random_ops.py,210,function,"Convert to an int32 or int64 tensor, defaulting to int64 if empty." 9956,_convert_to_state_tensor,tensorflow/tensorflow/python/ops/stateful_random_ops.py,219,function, 9957,GeneratorSpec,tensorflow/tensorflow/python/ops/stateful_random_ops.py,226,class,TypeSpec for Generator. 9958,Generator,tensorflow/tensorflow/python/ops/stateful_random_ops.py,259,class,"Random-number generator. Example: Creating a generator from a seed: >>> g = tf.random.Generator.from_seed(1234) >>> g.normal(shape=(2, 3)) Creating a generator from a non-deterministic state: >>> g = tf.random.Generator.from_non_deterministic_state() >>> g.normal(shape=(2, 3)) All the constructors allow explicitly choosing an Random-Number-Generation (RNG) algorithm. Supported algorithms are `""philox""` and `""threefry""`. For example: >>> g = tf.random.Generator.from_seed(123, alg=""philox"") >>> g.normal(shape=(2, 3)) CPU, GPU and TPU with the same algorithm and seed will generate the same integer random numbers. Float-point results (such as the output of `normal`) may have small numerical discrepancies between different devices. This class uses a `tf.Variable` to manage its internal state. Every time random numbers are generated, the state of the generator will change. For example: >>> g = tf.random.Generator.from_seed(1234) >>> g.state >>> g.normal(shape=(2, 3)) <...> >>> g.state The shape of the state is algorithm-specific. There is also a global generator: >>> g = tf.random.get_global_generator() >>> g.normal(shape=(2, 3)) " 9959,get_global_generator,tensorflow/tensorflow/python/ops/stateful_random_ops.py,915,function,"Retrieves the global generator. This function will create the global generator the first time it is called, and the generator will be placed at the default device at that time, so one needs to be careful when this function is first called. Using a generator placed on a less-ideal device will incur performance regression. Returns: The global `tf.random.Generator` object." 9960,set_global_generator,tensorflow/tensorflow/python/ops/stateful_random_ops.py,935,function,"Replaces the global generator with another `Generator` object. This function creates a new Generator object (and the Variable object within), which does not work well with tf.function because (1) tf.function puts restrictions on Variable creation thus reset_global_generator can't be freely used inside tf.function; (2) redirecting a global variable to a new object is problematic with tf.function because the old object may be captured by a 'tf.function'ed function and still be used by it. A 'tf.function'ed function only keeps weak references to variables, so deleting a variable and then calling that function again may raise an error, as demonstrated by random_test.py/RandomTest.testResetGlobalGeneratorBadWithDefun . Args: generator: the new `Generator` object." 9961,StatefulRandomOpsTest,tensorflow/tensorflow/python/ops/stateful_random_ops_test.py,60,class, 9962,split,tensorflow/tensorflow/python/ops/stateless_random_ops.py,45,function,"Splits an RNG seed into `num` new seeds by adding a leading axis. Example: >>> seed = [1, 2] >>> new_seeds = tf.random.experimental.stateless_split(seed, num=3) >>> print(new_seeds) tf.Tensor( [[1105988140 1738052849] [-335576002 370444179] [ 10670227 -246211131]], shape=(3, 2), dtype=int32) >>> tf.random.stateless_normal(shape=[3], seed=new_seeds[0, :]) Args: seed: an RNG seed (a tensor with shape [2] and dtype `int32` or `int64`). (When using XLA, only `int32` is allowed.) num: optional, a positive integer or scalar tensor indicating the number of seeds to produce (default 2). Returns: A tensor with shape [num, 2] representing `num` new seeds. It will have the same dtype as `seed` (if `seed` doesn't have an explict dtype, the dtype will be determined by `tf.convert_to_tensor`)." 9963,fold_in,tensorflow/tensorflow/python/ops/stateless_random_ops.py,79,function,"Folds in data to an RNG seed to form a new RNG seed. For example, in a distributed-training setting, suppose we have a master seed and a replica ID. We want to fold the replica ID into the master seed to form a ""replica seed"" to be used by that replica later on, so that different replicas will generate different random numbers but the reproducibility of the whole system can still be controlled by the master seed: >>> master_seed = [1, 2] >>> replica_id = 3 >>> replica_seed = tf.random.experimental.stateless_fold_in( ... master_seed, replica_id) >>> print(replica_seed) tf.Tensor([1105988140 3], shape=(2,), dtype=int32) >>> tf.random.stateless_normal(shape=[3], seed=replica_seed) Args: seed: an RNG seed (a tensor with shape [2] and dtype `int32` or `int64`). (When using XLA, only `int32` is allowed.) data: an `int32` or `int64` scalar representing data to be folded in to the seed. Returns: A new RNG seed that is a deterministic function of the inputs and is statistically safe for producing a stream of new pseudo-random values. It will have the same dtype as `data` (if `data` doesn't have an explict dtype, the dtype will be determined by `tf.convert_to_tensor`)." 9964,stateless_random_uniform,tensorflow/tensorflow/python/ops/stateless_random_ops.py,118,function,"Outputs deterministic pseudorandom values from a uniform distribution. This is a stateless version of `tf.random.uniform`: if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. The generated values follow a uniform distribution in the range `[minval, maxval)`. The lower bound `minval` is included in the range, while the upper bound `maxval` is excluded. For floats, the default range is `[0, 1)`. For ints, at least `maxval` must be specified explicitly. In the integer case, the random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2**32` or `2**64`). For full-range (i.e. inclusive of both max and min) random integers, pass `minval=None` and `maxval=None` with an integer `dtype`. For an integer dtype either both `minval` and `maxval` must be `None` or neither may be `None`. For example: ```python ints = tf.random.stateless_uniform( [10], seed=(2, 3), minval=None, maxval=None, dtype=tf.int32) ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) minval: A Tensor or Python value of type `dtype`, broadcastable with `shape` (for integer types, broadcasting is not supported, so it needs to be a scalar). The lower bound on the range of random values to generate. Pass `None` for full-range integers. Defaults to 0. maxval: A Tensor or Python value of type `dtype`, broadcastable with `shape` (for integer types, broadcasting is not supported, so it needs to be a scalar). The upper bound on the range of random values to generate. Defaults to 1 if `dtype` is floating point. Pass `None` for full-range integers. dtype: The type of the output: `float16`, `float32`, `float64`, `int32`, or `int64`. For unbounded uniform ints (`minval`, `maxval` both `None`), `uint32` and `uint64` may be used. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random uniform values. Raises: ValueError: If `dtype` is integral and only one of `minval` or `maxval` is specified." 9965,stateless_random_binomial,tensorflow/tensorflow/python/ops/stateless_random_ops.py,213,function,"Outputs deterministic pseudorandom values from a binomial distribution. The generated values follow a binomial distribution with specified count and probability of success parameters. This is a stateless version of `tf.random.Generator.binomial`: if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: ```python counts = [10., 20.] # Probability of success. probs = [0.8] binomial_samples = tf.random.stateless_binomial( shape=[2], seed=[123, 456], counts=counts, probs=probs) counts = ... # Shape [3, 1, 2] probs = ... # Shape [1, 4, 2] shape = [3, 4, 3, 4, 2] # Sample shape will be [3, 4, 3, 4, 2] binomial_samples = tf.random.stateless_binomial( shape=shape, seed=[123, 456], counts=counts, probs=probs) ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) counts: Tensor. The counts of the binomial distribution. Must be broadcastable with `probs`, and broadcastable with the rightmost dimensions of `shape`. probs: Tensor. The probability of success for the binomial distribution. Must be broadcastable with `counts` and broadcastable with the rightmost dimensions of `shape`. output_dtype: The type of the output. Default: tf.int32 name: A name for the operation (optional). Returns: samples: A Tensor of the specified shape filled with random binomial values. For each i, each samples[..., i] is an independent draw from the binomial distribution on counts[i] trials with probability of success probs[i]." 9966,stateless_random_gamma,tensorflow/tensorflow/python/ops/stateless_random_ops.py,283,function,"Outputs deterministic pseudorandom values from a gamma distribution. The generated values follow a gamma distribution with specified concentration (`alpha`) and inverse scale (`beta`) parameters. This is a stateless version of `tf.random.gamma`: if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. A slight difference exists in the interpretation of the `shape` parameter between `stateless_gamma` and `gamma`: in `gamma`, the `shape` is always prepended to the shape of the broadcast of `alpha` with `beta`; whereas in `stateless_gamma` the `shape` parameter must always encompass the shapes of each of `alpha` and `beta` (which must broadcast together to match the trailing dimensions of `shape`). Note: Because internal calculations are done using `float64` and casting has `floor` semantics, we must manually map zero outcomes to the smallest possible positive floating-point value, i.e., `np.finfo(dtype).tiny`. This means that `np.finfo(dtype).tiny` occurs more frequently than it otherwise should. This bias can only happen for small values of `alpha`, i.e., `alpha << 1` or large values of `beta`, i.e., `beta >> 1`. The samples are differentiable w.r.t. alpha and beta. The derivatives are computed using the approach described in (Figurnov et al., 2018). Example: ```python samples = tf.random.stateless_gamma([10, 2], seed=[12, 34], alpha=[0.5, 1.5]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random.stateless_gamma([7, 5, 2], seed=[12, 34], alpha=[.5, 1.5]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions alpha = tf.constant([[1.], [3.], [5.]]) beta = tf.constant([[3., 4.]]) samples = tf.random.stateless_gamma( [30, 3, 2], seed=[12, 34], alpha=alpha, beta=beta) # samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions. with tf.GradientTape() as tape: tape.watch([alpha, beta]) loss = tf.reduce_mean(tf.square(tf.random.stateless_gamma( [30, 3, 2], seed=[12, 34], alpha=alpha, beta=beta))) dloss_dalpha, dloss_dbeta = tape.gradient(loss, [alpha, beta]) # unbiased stochastic derivatives of the loss function alpha.shape == dloss_dalpha.shape # True beta.shape == dloss_dbeta.shape # True ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) alpha: Tensor. The concentration parameter of the gamma distribution. Must be broadcastable with `beta`, and broadcastable with the rightmost dimensions of `shape`. beta: Tensor. The inverse scale parameter of the gamma distribution. Must be broadcastable with `alpha` and broadcastable with the rightmost dimensions of `shape`. dtype: Floating point dtype of `alpha`, `beta`, and the output. name: A name for the operation (optional). Returns: samples: A Tensor of the specified shape filled with random gamma values. For each i, each `samples[..., i] is an independent draw from the gamma distribution with concentration alpha[i] and scale beta[i]." 9967,stateless_random_poisson,tensorflow/tensorflow/python/ops/stateless_random_ops.py,383,function,"Outputs deterministic pseudorandom values from a Poisson distribution. The generated values follow a Poisson distribution with specified rate parameter. This is a stateless version of `tf.random.poisson`: if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware, but may change between versions of TensorFlow or on non-CPU/GPU hardware. A slight difference exists in the interpretation of the `shape` parameter between `stateless_poisson` and `poisson`: in `poisson`, the `shape` is always prepended to the shape of `lam`; whereas in `stateless_poisson` the shape of `lam` must match the trailing dimensions of `shape`. Example: ```python samples = tf.random.stateless_poisson([10, 2], seed=[12, 34], lam=[5, 15]) # samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents # the samples drawn from each distribution samples = tf.random.stateless_poisson([7, 5, 2], seed=[12, 34], lam=[5, 15]) # samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1] # represents the 7x5 samples drawn from each of the two distributions rate = tf.constant([[1.], [3.], [5.]]) samples = tf.random.stateless_poisson([30, 3, 1], seed=[12, 34], lam=rate) # samples has shape [30, 3, 1], with 30 samples each of 3x1 distributions. ``` Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) lam: Tensor. The rate parameter ""lambda"" of the Poisson distribution. Shape must match the rightmost dimensions of `shape`. dtype: Dtype of the samples (int or float dtypes are permissible, as samples are discrete). Default: int32. name: A name for the operation (optional). Returns: samples: A Tensor of the specified shape filled with random Poisson values. For each i, each `samples[..., i]` is an independent draw from the Poisson distribution with rate `lam[i]`." 9968,stateless_random_normal,tensorflow/tensorflow/python/ops/stateless_random_ops.py,446,function,"Outputs deterministic pseudorandom values from a normal distribution. This is a stateless version of `tf.random.normal`: if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values." 9969,stateless_truncated_normal,tensorflow/tensorflow/python/ops/stateless_random_ops.py,487,function,"Outputs deterministic pseudorandom values, truncated normally distributed. This is a stateless version of `tf.random.truncated_normal`: if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) mean: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution, before truncation. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values." 9970,stateless_multinomial,tensorflow/tensorflow/python/ops/stateless_random_ops.py,535,function,"Draws deterministic pseudorandom samples from a multinomial distribution. This is a stateless version of `tf.random.categorical`: if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.stateless_categorical( tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17]) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) output_dtype: integer type to use for the output. Defaults to int64. name: Optional name for the operation. Returns: The drawn samples of shape `[batch_size, num_samples]`." 9971,stateless_categorical,tensorflow/tensorflow/python/ops/stateless_random_ops.py,576,function,"Draws deterministic pseudorandom samples from a categorical distribution. This is a stateless version of `tf.categorical`: if run twice with the same seeds and shapes, it will produce the same pseudorandom numbers. The output is consistent across multiple runs on the same hardware (and between CPU and GPU), but may change between versions of TensorFlow or on non-CPU/GPU hardware. Example: ```python # samples has shape [1, 5], where each value is either 0 or 1 with equal # probability. samples = tf.random.stateless_categorical( tf.math.log([[0.5, 0.5]]), 5, seed=[7, 17]) ``` Args: logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) dtype: integer type to use for the output. Defaults to int64. name: Optional name for the operation. Returns: The drawn samples of shape `[batch_size, num_samples]`." 9972,stateless_multinomial_categorical_impl,tensorflow/tensorflow/python/ops/stateless_random_ops.py,616,function,Implementation for stateless multinomial/categorical ops (v1/v2). 9973,stateless_parameterized_truncated_normal,tensorflow/tensorflow/python/ops/stateless_random_ops.py,625,function,"Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Examples: Sample from a Truncated normal, with deferring shape parameters that broadcast. >>> means = 0. >>> stddevs = tf.math.exp(tf.random.uniform(shape=[2, 3])) >>> minvals = [-1., -2., -1000.] >>> maxvals = [[10000.], [1.]] >>> y = tf.random.stateless_parameterized_truncated_normal( ... shape=[10, 2, 3], seed=[7, 17], ... means=means, stddevs=stddevs, minvals=minvals, maxvals=maxvals) >>> y.shape TensorShape([10, 2, 3]) Args: shape: A 1-D integer `Tensor` or Python array. The shape of the output tensor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) means: A `Tensor` or Python value of type `dtype`. The mean of the truncated normal distribution. This must broadcast with `stddevs`, `minvals` and `maxvals`, and the broadcasted shape must be dominated by `shape`. stddevs: A `Tensor` or Python value of type `dtype`. The standard deviation of the truncated normal distribution. This must broadcast with `means`, `minvals` and `maxvals`, and the broadcasted shape must be dominated by `shape`. minvals: A `Tensor` or Python value of type `dtype`. The minimum value of the truncated normal distribution. This must broadcast with `means`, `stddevs` and `maxvals`, and the broadcasted shape must be dominated by `shape`. maxvals: A `Tensor` or Python value of type `dtype`. The maximum value of the truncated normal distribution. This must broadcast with `means`, `stddevs` and `minvals`, and the broadcasted shape must be dominated by `shape`. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values." 9974,regex_full_match,tensorflow/tensorflow/python/ops/string_ops.py,50,function,"Match elements of `input` with regex `pattern`. Args: input: string `Tensor`, the source strings to process. pattern: string or scalar string `Tensor`, regular expression to use, see more details at https://github.com/google/re2/wiki/Syntax name: Name of the op. Returns: bool `Tensor` of the same shape as `input` with match results." 9975,regex_replace,tensorflow/tensorflow/python/ops/string_ops.py,79,function,"Replace elements of `input` matching regex `pattern` with `rewrite`. >>> tf.strings.regex_replace(""Text with tags.
contains html"", ... ""<[^>]+>"", "" "") Args: input: string `Tensor`, the source strings to process. pattern: string or scalar string `Tensor`, regular expression to use, see more details at https://github.com/google/re2/wiki/Syntax rewrite: string or scalar string `Tensor`, value to use in match replacement, supports backslash-escaped digits (\1 to \9) can be to insert text matching corresponding parenthesized group. replace_global: `bool`, if `True` replace all non-overlapping matches, else replace only the first match. name: A name for the operation (optional). Returns: string `Tensor` of the same shape as `input` with specified replacements." 9976,string_format,tensorflow/tensorflow/python/ops/string_ops.py,117,function,"Formats a string template using a list of tensors. Formats a string template using a list of tensors, abbreviating tensors by only printing the first and last `summarize` elements of each dimension (recursively). If formatting only one tensor into a template, the tensor does not have to be wrapped in a list. Example: Formatting a single-tensor template: >>> tensor = tf.range(5) >>> tf.strings.format(""tensor: {}, suffix"", tensor) Formatting a multi-tensor template: >>> tensor_a = tf.range(2) >>> tensor_b = tf.range(1, 4, 2) >>> tf.strings.format(""a: {}, b: {}, suffix"", (tensor_a, tensor_b)) Args: template: A string template to format tensor values into. inputs: A list of `Tensor` objects, or a single Tensor. The list of tensors to format into the template string. If a solitary tensor is passed in, the input tensor will automatically be wrapped as a list. placeholder: An optional `string`. Defaults to `{}`. At each placeholder occurring in the template, a subsequent tensor will be inserted. summarize: An optional `int`. Defaults to `3`. When formatting the tensors, show the first and last `summarize` entries of each tensor dimension (recursively). If set to -1, all elements of the tensor will be shown. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. Raises: ValueError: if the number of placeholders does not match the number of inputs." 9977,string_split,tensorflow/tensorflow/python/ops/string_ops.py,180,function,"Split elements of `source` based on `delimiter` into a `SparseTensor`. Let N be the size of source (typically N will be the batch size). Split each element of `source` based on `delimiter` and return a `SparseTensor` containing the split tokens. Empty tokens are ignored. If `sep` is an empty string, each element of the `source` is split into individual strings, each containing one byte. (This includes splitting multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is treated as a set of delimiters with each considered a potential split point. For example: N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output will be st.indices = [0, 0; 0, 1; 1, 0; 1, 1; 1, 2] st.shape = [2, 3] st.values = ['hello', 'world', 'a', 'b', 'c'] Args: source: `1-D` string `Tensor`, the strings to split. sep: `0-D` string `Tensor`, the delimiter character, the string should be length 0 or 1. Default is ' '. skip_empty: A `bool`. If `True`, skip the empty strings from the result. delimiter: deprecated alias for `sep`. Raises: ValueError: If delimiter is not a string. Returns: A `SparseTensor` of rank `2`, the strings split according to the delimiter. The first column of the indices corresponds to the row in `source` and the second column corresponds to the index of the split component in this row." 9978,string_split_v2,tensorflow/tensorflow/python/ops/string_ops.py,237,function,"Split elements of `source` based on `sep` into a `SparseTensor`. Let N be the size of source (typically N will be the batch size). Split each element of `source` based on `sep` and return a `SparseTensor` containing the split tokens. Empty tokens are ignored. For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output will be st.indices = [0, 0; 0, 1; 1, 0; 1, 1; 1, 2] st.shape = [2, 3] st.values = ['hello', 'world', 'a', 'b', 'c'] If `sep` is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings. For example, source of `""1<>2<><>3""` and sep of `""<>""` returns `[""1"", ""2"", """", ""3""]`. If `sep` is None or an empty string, consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Note that the above mentioned behavior matches python's str.split. Args: source: `1-D` string `Tensor`, the strings to split. sep: `0-D` string `Tensor`, the delimiter character. maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. Raises: ValueError: If sep is not a string. Returns: A `SparseTensor` of rank `2`, the strings split according to the delimiter. The first column of the indices corresponds to the row in `source` and the second column corresponds to the index of the split component in this row." 9979,_reduce_join_reduction_dims,tensorflow/tensorflow/python/ops/string_ops.py,290,function,"Returns range(rank(x) - 1, 0, -1) if axis is None; or axis otherwise." 9980,reduce_join,tensorflow/tensorflow/python/ops/string_ops.py,310,function, 9981,reduce_join_v2,tensorflow/tensorflow/python/ops/string_ops.py,333,function,"Joins all strings into a single string, or joins along an axis. >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']]).numpy() b'abc123def456' >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']], axis=-1).numpy() array([b'abc123', b'def456'], dtype=object) >>> tf.strings.reduce_join([['abc','123'], ... ['def','456']], ... axis=-1, ... separator="" "").numpy() array([b'abc 123', b'def 456'], dtype=object) Args: inputs: A `tf.string` tensor. axis: Which axis to join along. The default behavior is to join all elements, producing a scalar. keepdims: If true, retains reduced dimensions with length 1. separator: a string added between each string being joined. name: A name for the operation (optional). Returns: A `tf.string` tensor." 9982,string_length,tensorflow/tensorflow/python/ops/string_ops.py,381,function,"Computes the length of each string given in the input tensor. >>> strings = tf.constant(['Hello','TensorFlow', '🙂']) >>> tf.strings.length(strings).numpy() # default counts bytes array([ 5, 10, 4], dtype=int32) >>> tf.strings.length(strings, unit=""UTF8_CHAR"").numpy() array([ 5, 10, 1], dtype=int32) Args: input: A `Tensor` of type `string`. The strings for which to compute the length for each element. name: A name for the operation (optional). unit: An optional `string` from: `""BYTE"", ""UTF8_CHAR""`. Defaults to `""BYTE""`. The unit that is counted to compute string length. One of: `""BYTE""` (for the number of bytes in each string) or `""UTF8_CHAR""` (for the number of UTF-8 encoded Unicode code points in each string). Results are undefined if `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid UTF-8. Returns: A `Tensor` of type `int32`, containing the length of the input string in the same element of the input tensor." 9983,string_length_v2,tensorflow/tensorflow/python/ops/string_ops.py,410,function, 9984,substr_deprecated,tensorflow/tensorflow/python/ops/string_ops.py,420,function, 9985,substr,tensorflow/tensorflow/python/ops/string_ops.py,428,function, 9986,substr_v2,tensorflow/tensorflow/python/ops/string_ops.py,436,function, 9987,string_to_number,tensorflow/tensorflow/python/ops/string_ops.py,456,function,"Converts each string in the input Tensor to the specified numeric type. (Note that int32 overflow results in an error while float overflow results in a rounded value.) Examples: >>> tf.strings.to_number(""1.55"") >>> tf.strings.to_number(""3"", tf.int32) Args: input: A `Tensor` of type `string`. out_type: An optional `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.float32`. The numeric type to interpret each string in `string_tensor` as. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`." 9988,string_to_number_v1,tensorflow/tensorflow/python/ops/string_ops.py,484,function, 9989,string_to_hash_bucket,tensorflow/tensorflow/python/ops/string_ops.py,498,function,"Converts each string in the input Tensor to its hash mod by a number of buckets. The hash function is deterministic on the content of the string within the process. Note that the hash function may change from time to time. This functionality will be deprecated and it's recommended to use `tf.strings.to_hash_bucket_fast()` or `tf.strings.to_hash_bucket_strong()`. Examples: >>> tf.strings.to_hash_bucket([""Hello"", ""TensorFlow"", ""2.x""], 3) Args: input: A `Tensor` of type `string`. num_buckets: An `int` that is `>= 1`. The number of buckets. name: A name for the operation (optional). Returns: A `Tensor` of type `int64`." 9990,string_to_hash_bucket_v1,tensorflow/tensorflow/python/ops/string_ops.py,528,function, 9991,string_join,tensorflow/tensorflow/python/ops/string_ops.py,544,function,"Perform element-wise concatenation of a list of string tensors. Given a list of string tensors of same shape, performs element-wise concatenation of the strings of the same index in all tensors. >>> tf.strings.join(['abc','def']).numpy() b'abcdef' >>> tf.strings.join([['abc','123'], ... ['def','456'], ... ['ghi','789']]).numpy() array([b'abcdefghi', b'123456789'], dtype=object) >>> tf.strings.join([['abc','123'], ... ['def','456']], ... separator="" "").numpy() array([b'abc def', b'123 456'], dtype=object) Args: inputs: A list of `tf.Tensor` objects of same size and `tf.string` dtype. separator: A string added between each string being joined. name: A name for the operation (optional). Returns: A `tf.string` tensor." 9992,collect,tensorflow/tensorflow/python/ops/summary_op_util.py,28,function,"Adds keys to a collection. Args: val: The value to add per each key. collections: A collection of keys to add. default_collections: Used if collections is None." 9993,clean_tag,tensorflow/tensorflow/python/ops/summary_op_util.py,45,function,"Cleans a tag. Removes illegal characters for instance. Args: name: The original tag name to be processed. Returns: The cleaned tag name." 9994,summary_scope,tensorflow/tensorflow/python/ops/summary_op_util.py,72,function,"Enters a scope used for the summary and yields both the name and tag. To ensure that the summary tag name is always unique, we create a name scope based on `name` and use the full scope name in the tag. If `family` is set, then the tag name will be '/', where `scope_name` is `//`. This ensures that `family` is always the prefix of the tag (and unmodified), while ensuring the scope respects the outer scope from this summary was created. Args: name: A name for the generated summary node. family: Optional; if provided, used as the prefix of the summary tag name. default_name: Optional; if provided, used as default name of the summary. values: Optional; passed as `values` parameter to name_scope. Yields: A tuple `(tag, scope)`, both of which are unique and should be used for the tag and the scope for the summary to output." 9995,_SummaryState,tensorflow/tensorflow/python/ops/summary_ops_v2.py,64,class, 9996,_should_record_summaries_internal,tensorflow/tensorflow/python/ops/summary_ops_v2.py,78,function,"Returns boolean Tensor if summaries should/shouldn't be recorded. Now the summary condition is decided by logical ""and"" of below conditions: First, summary writer must be set. Given this constraint is met, ctx.summary_recording and ctx.summary_recording_distribution_strategy. The former one is usually set by user, and the latter one is controlled by DistributionStrategy (tf.distribute.ReplicaContext). Args: default_state: can be True or False. The default summary behavior when summary writer is set and the user does not specify ctx.summary_recording and ctx.summary_recording_distribution_strategy is True." 9997,_should_record_summaries_v2,tensorflow/tensorflow/python/ops/summary_ops_v2.py,109,function,"Returns boolean Tensor which is true if summaries should be recorded. If no recording status has been set, this defaults to True, unlike the public should_record_summaries()." 9998,should_record_summaries,tensorflow/tensorflow/python/ops/summary_ops_v2.py,119,function,Returns boolean Tensor which is true if summaries should be recorded. 9999,record_if,tensorflow/tensorflow/python/ops/summary_ops_v2.py,126,function,"Sets summary recording on or off per the provided boolean value. The provided value can be a python boolean, a scalar boolean Tensor, or or a callable providing such a value; if a callable is passed it will be invoked on-demand to determine whether summary writing will occur. Args: condition: can be True, False, a bool Tensor, or a callable providing such. Yields: Returns a context manager that sets this value on enter and restores the previous value on exit." 10000,record_summaries_every_n_global_steps,tensorflow/tensorflow/python/ops/summary_ops_v2.py,149,function,Sets the should_record_summaries Tensor to true if global_step % n == 0. 10001,always_record_summaries,tensorflow/tensorflow/python/ops/summary_ops_v2.py,160,function,Sets the should_record_summaries Tensor to always true. 10002,never_record_summaries,tensorflow/tensorflow/python/ops/summary_ops_v2.py,165,function,Sets the should_record_summaries Tensor to always false. 10003,get_step,tensorflow/tensorflow/python/ops/summary_ops_v2.py,171,function,"Returns the default summary step for the current thread. Returns: The step set by `tf.summary.experimental.set_step()` if one has been set, otherwise None." 10004,set_step,tensorflow/tensorflow/python/ops/summary_ops_v2.py,182,function,"Sets the default summary step for the current thread. For convenience, this function sets a default value for the `step` parameter used in summary-writing functions elsewhere in the API so that it need not be explicitly passed in every such invocation. The value can be a constant or a variable, and can be retrieved via `tf.summary.experimental.get_step()`. Note: when using this with @tf.functions, the step value will be captured at the time the function is traced, so changes to the step outside the function will not be reflected inside the function unless using a `tf.Variable` step. Args: step: An `int64`-castable default step value, or None to unset." 10005,SummaryWriter,tensorflow/tensorflow/python/ops/summary_ops_v2.py,202,class,Interface representing a stateful summary writer object. 10006,ResourceSummaryWriter,tensorflow/tensorflow/python/ops/summary_ops_v2.py,272,class,Implementation of SummaryWriter using a SummaryWriterInterface resource. 10007,NoopSummaryWriter,tensorflow/tensorflow/python/ops/summary_ops_v2.py,395,class,"A summary writer that does nothing, for create_noop_writer()." 10008,initialize,tensorflow/tensorflow/python/ops/summary_ops_v2.py,416,function,"Initializes summary writing for graph execution mode. This operation is a no-op when executing eagerly. This helper method provides a higher-level alternative to using `tf.contrib.summary.summary_writer_initializer_op` and `tf.contrib.summary.graph`. Most users will also want to call `tf.compat.v1.train.create_global_step` which can happen before or after this function is called. Args: graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer. This function will not write the default graph by default. When writing to an event log file, the associated step will be zero. session: So this method can call `tf.Session.run`. This defaults to `tf.compat.v1.get_default_session`. Raises: RuntimeError: If the current thread has no default `tf.contrib.summary.SummaryWriter`. ValueError: If session wasn't passed and no default session." 10009,create_file_writer_v2,tensorflow/tensorflow/python/ops/summary_ops_v2.py,458,function,"Creates a summary file writer for the given log directory. Args: logdir: a string specifying the directory in which to write an event file. max_queue: the largest number of summaries to keep in a queue; will flush once the queue gets bigger than this. Defaults to 10. flush_millis: the largest interval between flushes. Defaults to 120,000. filename_suffix: optional suffix for the event file name. Defaults to `.v2`. name: a name for the op that creates the writer. Returns: A SummaryWriter object." 10010,create_file_writer,tensorflow/tensorflow/python/ops/summary_ops_v2.py,519,function,"Creates a summary file writer in the current context under the given name. Args: logdir: a string, or None. If a string, creates a summary file writer which writes to the directory named by the string. If None, returns a mock object which acts like a summary writer but does nothing, useful to use as a context manager. max_queue: the largest number of summaries to keep in a queue; will flush once the queue gets bigger than this. Defaults to 10. flush_millis: the largest interval between flushes. Defaults to 120,000. filename_suffix: optional suffix for the event file name. Defaults to `.v2`. name: Shared name for this SummaryWriter resource stored to default Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a summary writer resource with this shared name already exists, the returned SummaryWriter wraps that resource and the other arguments have no effect. Returns: Either a summary writer or an empty object which can be used as a summary writer." 10011,create_db_writer,tensorflow/tensorflow/python/ops/summary_ops_v2.py,566,function,"Creates a summary database writer in the current context. This can be used to write tensors from the execution graph directly to a database. Only SQLite is supported right now. This function will create the schema if it doesn't exist. Entries in the Users, Experiments, and Runs tables will be created automatically if they don't already exist. Args: db_uri: For example ""file:/tmp/foo.sqlite"". experiment_name: Defaults to YYYY-MM-DD in local time if None. Empty string means the Run will not be associated with an Experiment. Can't contain ASCII control characters or <>. Case sensitive. run_name: Defaults to HH:MM:SS in local time if None. Empty string means a Tag will not be associated with any Run. Can't contain ASCII control characters or <>. Case sensitive. user_name: Defaults to system username if None. Empty means the Experiment will not be associated with a User. Must be valid as both a DNS label and Linux username. name: Shared name for this SummaryWriter resource stored to default `tf.Graph`. Returns: A `tf.summary.SummaryWriter` instance." 10012,create_noop_writer,tensorflow/tensorflow/python/ops/summary_ops_v2.py,619,function,"Returns a summary writer that does nothing. This is useful as a placeholder in code that expects a context manager." 10013,_cleanse_string,tensorflow/tensorflow/python/ops/summary_ops_v2.py,627,function, 10014,_nothing,tensorflow/tensorflow/python/ops/summary_ops_v2.py,633,function,Convenient else branch for when summaries do not record. 10015,all_v2_summary_ops,tensorflow/tensorflow/python/ops/summary_ops_v2.py,639,function,"Returns all V2-style summary ops defined in the current default graph. This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except for `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but does *not* include TF 1.x tf.summary ops. Returns: List of summary ops, or None if called under eager execution." 10016,summary_writer_initializer_op,tensorflow/tensorflow/python/ops/summary_ops_v2.py,654,function,"Graph-mode only. Returns the list of ops to create all summary writers. Returns: The initializer ops. Raises: RuntimeError: If in Eager mode." 10017,summary_scope,tensorflow/tensorflow/python/ops/summary_ops_v2.py,675,function,"Experimental context manager for use when defining a custom summary op. This behaves similarly to `tf.name_scope`, except that it returns a generated summary tag in addition to the scope name. The tag is structurally similar to the scope name - derived from the user-provided name, prefixed with enclosing name scopes if any - but we relax the constraint that it be uniquified, as well as the character set limitation (so the user-provided name can contain characters not legal for scope names; in the scope name these are removed). This makes the summary tag more predictable and consistent for the user. For example, to define a new summary op called `my_op`: ```python def my_op(name, my_value, step): with tf.summary.summary_scope(name, ""MyOp"", [my_value]) as (tag, scope): my_value = tf.convert_to_tensor(my_value) return tf.summary.write(tag, my_value, step=step) ``` Args: name: string name for the summary. default_name: Optional; if provided, used as default name of the summary. values: Optional; passed as `values` parameter to name_scope. Yields: A tuple `(tag, scope)` as described above." 10018,write,tensorflow/tensorflow/python/ops/summary_ops_v2.py,715,function,"Writes a generic summary to the default SummaryWriter if one exists. This exists primarily to support the definition of type-specific summary ops like scalar() and image(), and is not intended for direct use unless defining a new type-specific summary op. Args: tag: string tag used to identify the summary (e.g. in TensorBoard), usually generated with `tf.summary.summary_scope` tensor: the Tensor holding the summary data to write or a callable that returns this Tensor. If a callable is passed, it will only be called when a default SummaryWriter exists and the recording condition specified by `record_if()` is met. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. metadata: Optional SummaryMetadata, as a proto or serialized bytes name: Optional string name for this op. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None." 10019,write_raw_pb,tensorflow/tensorflow/python/ops/summary_ops_v2.py,782,function,"Writes a summary using raw `tf.compat.v1.Summary` protocol buffers. Experimental: this exists to support the usage of V1-style manual summary writing (via the construction of a `tf.compat.v1.Summary` protocol buffer) with the V2 summary writing API. Args: tensor: the string Tensor holding one or more serialized `Summary` protobufs step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. name: Optional string name for this op. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None." 10020,summary_writer_function,tensorflow/tensorflow/python/ops/summary_ops_v2.py,833,function,"Helper function to write summaries. Args: name: name of the summary tensor: main tensor to form the summary function: function taking a tag and a scope which writes the summary family: optional, the summary's family Returns: The result of writing the summary." 10021,generic,tensorflow/tensorflow/python/ops/summary_ops_v2.py,865,function,Writes a tensor summary if possible. 10022,scalar,tensorflow/tensorflow/python/ops/summary_ops_v2.py,886,function,"Writes a scalar summary if possible. Unlike `tf.contrib.summary.generic` this op may change the dtype depending on the writer, for both practical and efficiency concerns. Args: name: An arbitrary name for this summary. tensor: A `tf.Tensor` Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`, `uint32`, `uint64`. family: Optional, the summary's family. step: The `int64` monotonic step variable, which defaults to `tf.compat.v1.train.get_global_step`. Returns: The created `tf.Operation` or a `tf.no_op` if summary writing has not been enabled for this context." 10023,histogram,tensorflow/tensorflow/python/ops/summary_ops_v2.py,918,function,Writes a histogram summary if possible. 10024,image,tensorflow/tensorflow/python/ops/summary_ops_v2.py,933,function,Writes an image summary if possible. 10025,audio,tensorflow/tensorflow/python/ops/summary_ops_v2.py,952,function,Writes an audio summary if possible. 10026,graph,tensorflow/tensorflow/python/ops/summary_ops_v2.py,969,function,"Writes a TensorFlow graph to the summary interface. The graph summary is, strictly speaking, not a summary. Conditions like `tf.summary.should_record_summaries` do not apply. Only a single graph can be associated with a particular run. If multiple graphs are written, then only the last one will be considered by TensorBoard. When not using eager execution mode, the user should consider passing the `graph` parameter to `tf.compat.v1.summary.initialize` instead of calling this function. Otherwise special care needs to be taken when using the graph to record the graph. Args: param: A `tf.Tensor` containing a serialized graph proto. When eager execution is enabled, this function will automatically coerce `tf.Graph`, `tf.compat.v1.GraphDef`, and string types. step: The global step variable. This doesn't have useful semantics for graph summaries, but is used anyway, due to the structure of event log files. This defaults to the global step. name: A name for the operation (optional). Returns: The created `tf.Operation` or a `tf.no_op` if summary writing has not been enabled for this context. Raises: TypeError: If `param` isn't already a `tf.Tensor` in graph mode." 10027,import_event,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1017,function,"Writes a `tf.compat.v1.Event` binary proto. This can be used to import existing event logs into a new summary writer sink. Please note that this is lower level than the other summary functions and will ignore the `tf.summary.should_record_summaries` setting. Args: tensor: A `tf.Tensor` of type `string` containing a serialized `tf.compat.v1.Event` proto. name: A name for the operation (optional). Returns: The created `tf.Operation`." 10028,flush,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1037,function,"Forces summary writer to send any buffered data to storage. This operation blocks until that finishes. Args: writer: The `tf.summary.SummaryWriter` resource to flush. The thread default will be used if this parameter is None. Otherwise a `tf.no_op` is returned. name: A name for the operation (optional). Returns: The created `tf.Operation`." 10029,eval_dir,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1067,function,Construct a logdir for an eval summary writer. 10030,create_summary_file_writer,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1074,function,Please use `tf.contrib.summary.create_file_writer`. 10031,_serialize_graph,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1081,function, 10032,_choose_step,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1088,function, 10033,_check_create_file_writer_args,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1096,function,"Helper to check the validity of arguments to a create_file_writer() call. Args: inside_function: whether the create_file_writer() call is in a tf.function **kwargs: the arguments to check, as kwargs to give them names. Raises: ValueError: if the arguments are graph tensors." 10034,run_metadata,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1120,function,"Writes entire RunMetadata summary. A RunMetadata can contain DeviceStats, partition graphs, and function graphs. Please refer to the proto for definition of each field. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A RunMetadata proto to write. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None." 10035,run_metadata_graphs,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1162,function,"Writes graphs from a RunMetadata summary. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A RunMetadata proto to write. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None." 10036,keras_model,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1205,function,"Writes a Keras model as JSON to as a Summary. Writing the Keras model configuration allows the TensorBoard graph plugin to render a conceptual graph, as opposed to graph of ops. In case the model fails to serialize as JSON, it ignores and returns False. Args: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A Keras Model to write. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. Returns: True on success, or False if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None." 10037,trace_on,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1258,function,"Starts a trace to record computation graphs and profiling information. Must be invoked in eager mode. When enabled, TensorFlow runtime will collection information that can later be exported and consumed by TensorBoard. The trace is activated across the entire TensorFlow runtime and affects all threads of execution. To stop the trace and export the collected information, use `tf.summary.trace_export`. To stop the trace without exporting, use `tf.summary.trace_off`. Args: graph: If True, enables collection of executed graphs. It includes ones from tf.function invocation and ones from the legacy graph mode. The default is True. profiler: If True, enables the advanced profiler. Enabling profiler implicitly enables the graph collection. The profiler may incur a high memory overhead. The default is False." 10038,trace_export,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1303,function,"Stops and exports the active trace as a Summary and/or profile file. Stops the trace and exports all metadata collected during the trace to the default SummaryWriter, if one has been set. Args: name: A name for the summary to be written. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. profiler_outdir: Output directory for profiler. This is only used when the profiler was enabled when the trace was started. In that case, if there is a logdir-based default SummaryWriter, this defaults to the same directory, but otherwise the argument must be passed. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None." 10039,trace_off,tensorflow/tensorflow/python/ops/summary_ops_v2.py,1359,function,Stops the current trace and discards any collected information. 10040,make_template,tensorflow/tensorflow/python/ops/template.py,40,function,"Given an arbitrary function, wrap it so that it does variable sharing. This wraps `func_` in a Template and partially evaluates it. Templates are functions that create variables the first time they are called and reuse them thereafter. In order for `func_` to be compatible with a `Template` it must have the following properties: * The function should create all trainable variables and any variables that should be reused by calling `tf.compat.v1.get_variable`. If a trainable variable is created using `tf.Variable`, then a ValueError will be thrown. Variables that are intended to be locals can be created by specifying `tf.Variable(..., trainable=false)`. * The function may use variable scopes and other templates internally to create and reuse variables, but it shouldn't use `tf.compat.v1.global_variables` to capture variables that are defined outside of the scope of the function. * Internal scopes and variable names should not depend on any arguments that are not supplied to `make_template`. In general you will get a ValueError telling you that you are trying to reuse a variable that doesn't exist if you make a mistake. In the following example, both `z` and `w` will be scaled by the same `y`. It is important to note that if we didn't assign `scalar_name` and used a different name for z and w that a `ValueError` would be thrown because it couldn't reuse the variable. ```python def my_op(x, scalar_name): var1 = tf.compat.v1.get_variable(scalar_name, shape=[], initializer=tf.compat.v1.constant_initializer(1)) return x * var1 scale_by_y = tf.compat.v1.make_template('scale_by_y', my_op, scalar_name='y') z = scale_by_y(input1) w = scale_by_y(input2) ``` As a safe-guard, the returned function will raise a `ValueError` after the first call if trainable variables are created by calling `tf.Variable`. If all of these are true, then 2 properties are enforced by the template: 1. Calling the same template multiple times will share all non-local variables. 2. Two different templates are guaranteed to be unique, unless you reenter the same variable scope as the initial definition of a template and redefine it. An examples of this exception: ```python def my_op(x, scalar_name): var1 = tf.compat.v1.get_variable(scalar_name, shape=[], initializer=tf.compat.v1.constant_initializer(1)) return x * var1 with tf.compat.v1.variable_scope('scope') as vs: scale_by_y = tf.compat.v1.make_template('scale_by_y', my_op, scalar_name='y') z = scale_by_y(input1) w = scale_by_y(input2) # Creates a template that reuses the variables above. with tf.compat.v1.variable_scope(vs, reuse=True): scale_by_y2 = tf.compat.v1.make_template('scale_by_y', my_op, scalar_name='y') z2 = scale_by_y2(input1) w2 = scale_by_y2(input2) ``` Depending on the value of `create_scope_now_`, the full variable scope may be captured either at the time of first call or at the time of construction. If this option is set to True, then all Tensors created by repeated calls to the template will have an extra trailing _N+1 to their name, as the first time the scope is entered in the Template constructor no Tensors are created. Note: `name_`, `func_` and `create_scope_now_` have a trailing underscore to reduce the likelihood of collisions with kwargs. Args: name_: A name for the scope created by this template. If necessary, the name will be made unique by appending `_N` to the name. func_: The function to wrap. create_scope_now_: Boolean controlling whether the scope should be created when the template is constructed or when the template is called. Default is False, meaning the scope is created when the template is called. unique_name_: When used, it overrides name_ and is not made unique. If a template of the same scope/unique_name already exists and reuse is false, an error is raised. Defaults to None. custom_getter_: Optional custom getter for variables used in `func_`. See the `tf.compat.v1.get_variable` `custom_getter` documentation for more information. **kwargs: Keyword arguments to apply to `func_`. Returns: A function to encapsulate a set of variables which should be created once and reused. An enclosing scope will be created either when `make_template` is called or when the result is called, depending on the value of `create_scope_now_`. Regardless of the value, the first time the template is called it will enter the scope with no reuse, and call `func_` to create variables, which are guaranteed to be unique. All subsequent calls will re-enter the scope and reuse those variables. Raises: ValueError: if `name_` is None." 10041,make_template_internal,tensorflow/tensorflow/python/ops/template.py,164,function,"Make a template, optionally compiling func_ into a graph function. See `make_template` for full documentation. Args: name_: A name for the scope created by this template. If necessary, the name will be made unique by appending `_N` to the name. func_: The function to wrap. create_scope_now_: Boolean controlling whether the scope should be created when the template is constructed or when the template is called. Default is False, meaning the scope is created when the template is called. unique_name_: When used, it overrides name_ and is not made unique. If a template of the same scope/unique_name already exists and reuse is false, an error is raised. Defaults to None. If executing eagerly, must be None. custom_getter_: Optional custom getter for variables used in `func_`. See the `tf.compat.v1.get_variable` `custom_getter` documentation for more information. create_graph_function_: When True, `func_` will be executed as a graph function. This implies that `func_` must satisfy the properties that `function.defun` requires of functions: See the documentation of `function.defun` for details. When executing eagerly, setting this flag to True can improve performance. Regardless of whether eager execution is enabled, enabling this flag gives the caller access to graph-function semantics, i.e., accesses to variables are totally ordered and side-effecting ops are not pruned. **kwargs: Keyword arguments to apply to `func_`. Returns: A function to encapsulate a set of variables which should be created once and reused. An enclosing scope will be created either when `make_template` is called or when the result is called, depending on the value of `create_scope_now_`. Regardless of the value, the first time the template is called it will enter the scope with no reuse, and call `func_` to create variables, which are guaranteed to be unique. All subsequent calls will re-enter the scope and reuse those variables. Raises: ValueError: if `name_` is None. ValueError: if `unique_name_` is not None and eager execution is enabled." 10042,_skip_common_stack_elements,tensorflow/tensorflow/python/ops/template.py,234,function,Skips items that the target stacktrace shares with the base stacktrace. 10043,Template,tensorflow/tensorflow/python/ops/template.py,242,class,"Wrap a function to aid in variable sharing. Templates are functions that create variables the first time they are called and reuse them thereafter. See `make_template` for full documentation. Note: By default, the full variable scope is captured at the time of first call. If `create_scope_now_` is passed as True to the constructor, the full scope will be captured there, but no variables will created until the first call." 10044,_EagerTemplateVariableStore,tensorflow/tensorflow/python/ops/template.py,485,class,Wrapper around EagerVariableStore to support nesting EagerTemplates. 10045,EagerTemplate,tensorflow/tensorflow/python/ops/template.py,539,class,"Wrap a function to aid in variable sharing in Eager mode. Templates are functions that create variables the first time they are called and reuse them thereafter. See `make_template` for full documentation. Note: By default, the full variable scope is captured at the time of first call. If `create_scope_now` is passed as True to the constructor, the full scope will be captured there, but no variables will be created until the first call." 10046,_GetGradSource,tensorflow/tensorflow/python/ops/tensor_array_grad.py,43,function,"Identify which call to tf.gradients created this gradient op or tensor. TensorArray gradient calls use an accumulator TensorArray object. If multiple gradients are calculated and run in the same session, the multiple gradient nodes may accidentally flow through the same accumulator TensorArray. This double counting breaks the TensorArray gradient flow. The solution is to identify which gradient call this particular TensorArray*Grad is being called in, by looking at the input gradient tensor's name, and create or lookup an accumulator gradient TensorArray associated with this specific call. This solves any confusion and ensures different gradients from the same forward graph get their own accumulators. This function creates the unique label associated with the tf.gradients call that is used to create the gradient TensorArray. Args: op_or_tensor: `Tensor` or `Operation` which is an input to a TensorArray*Grad call. Returns: A python string, the unique label associated with this particular gradients calculation. Raises: ValueError: If not called within a gradients calculation." 10047,_TensorArrayReadGrad,tensorflow/tensorflow/python/ops/tensor_array_grad.py,87,function,"Gradient for TensorArrayRead. Args: op: Forward TensorArrayRead op. grad: Gradient `Tensor` to TensorArrayRead. Returns: A flow `Tensor`, which can be used in control dependencies to force the write of `grad` to the gradient `TensorArray`." 10048,_TensorArrayWriteGrad,tensorflow/tensorflow/python/ops/tensor_array_grad.py,118,function,"Gradient for TensorArrayWrite. Args: op: Forward TensorArrayWrite op. flow: Gradient `Tensor` flow to TensorArrayWrite. Returns: A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad." 10049,_TensorArrayGatherGrad,tensorflow/tensorflow/python/ops/tensor_array_grad.py,150,function,"Gradient for TensorArrayGather. Args: op: Forward TensorArrayGather op. grad: Gradient `Tensor` to TensorArrayGather. Returns: A flow `Tensor`, which can be used in control dependencies to force the write of `grad` to the gradient `TensorArray`." 10050,_TensorArrayScatterGrad,tensorflow/tensorflow/python/ops/tensor_array_grad.py,181,function,"Gradient for TensorArrayScatter. Args: op: Forward TensorArrayScatter op. flow: Gradient `Tensor` flow to TensorArrayScatter. Returns: A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad." 10051,_TensorArrayConcatGrad,tensorflow/tensorflow/python/ops/tensor_array_grad.py,211,function,"Gradient for TensorArrayConcat. Args: op: Forward TensorArrayConcat op. grad: Gradient `Tensor` to TensorArrayConcat. Returns: A flow `Tensor`, which can be used in control dependencies to force the write of `grad` to the gradient `TensorArray`." 10052,_TensorArraySplitGrad,tensorflow/tensorflow/python/ops/tensor_array_grad.py,243,function,"Gradient for TensorArraySplit. Args: op: Forward TensorArraySplit op. flow: Gradient `Tensor` flow to TensorArraySplit. Returns: A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad." 10053,_GraphTensorArray,tensorflow/tensorflow/python/ops/tensor_array_ops.py,52,class,"Graph-mode implementation of TensorArray. " 10054,_GraphTensorArrayV2,tensorflow/tensorflow/python/ops/tensor_array_ops.py,388,class,"Graph-mode implementation of TensorArray backed by TensorLists. The backing tensor of this TensorArray is a TensorList variant tensor which is stored in the `flow`. The `handle` is always none here. The reason we use the `flow` field and not the `handle` field is to ensure backwards compatibility with legacy control flow." 10055,_EagerTensorArray,tensorflow/tensorflow/python/ops/tensor_array_ops.py,650,class,"Eager-compatible implementation of TensorArray. " 10056,TensorArray,tensorflow/tensorflow/python/ops/tensor_array_ops.py,947,class,"Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays. This class is meant to be used with dynamic iteration primitives such as `while_loop` and `map_fn`. It supports gradient back-propagation via special ""flow"" control flow dependencies. Example 1: Plain reading and writing. >>> ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True, clear_after_read=False) >>> ta = ta.write(0, 10) >>> ta = ta.write(1, 20) >>> ta = ta.write(2, 30) >>> >>> ta.read(0) >>> ta.read(1) >>> ta.read(2) >>> ta.stack() Example 2: Fibonacci sequence algorithm that writes in a loop then returns. >>> @tf.function ... def fibonacci(n): ... ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True) ... ta = ta.unstack([0., 1.]) ... ... for i in range(2, n): ... ta = ta.write(i, ta.read(i - 1) + ta.read(i - 2)) ... ... return ta.stack() >>> >>> fibonacci(7) Example 3: A simple loop interacting with a `tf.Variable`. # TODO(b/153898334): Convert back to doctest once bug is resolved. ``` v = tf.Variable(1) @tf.function def f(x): ta = tf.TensorArray(tf.int32, size=0, dynamic_size=True) for i in tf.range(x): v.assign_add(i) ta = ta.write(i, v) return ta.stack() f(5) ```" 10057,build_ta_with_new_flow,tensorflow/tensorflow/python/ops/tensor_array_ops.py,1274,function,Builds a TensorArray with a new `flow` tensor. 10058,_check_dtypes,tensorflow/tensorflow/python/ops/tensor_array_ops.py,1308,function, 10059,TensorArraySpec,tensorflow/tensorflow/python/ops/tensor_array_ops.py,1319,class,Type specification for a `tf.TensorArray`. 10060,TensorArrayOpsTest,tensorflow/tensorflow/python/ops/tensor_array_ops_test.py,30,class, 10061,TreeVariableSaveable,tensorflow/tensorflow/python/ops/tensor_forest_ops.py,26,class,Resource that holds a tree. 10062,tree_variable,tensorflow/tensorflow/python/ops/tensor_forest_ops.py,76,function, 10063,ForestVariables,tensorflow/tensorflow/python/ops/tensor_forest_ops.py,86,class,Resource that holds all trees from a forest. 10064,build_graph,tensorflow/tensorflow/python/ops/transpose_benchmark.py,34,function,"builds a graph containing a sequence of conv2d operations. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. datatype: numpy data type of the input tensor. num_iters: number of iterations to run transpose. Returns: An array of tensors to run()" 10065,TransposeBenchmark,tensorflow/tensorflow/python/ops/transpose_benchmark.py,62,class,Benchmark transpose! 10066,UnconnectedGradients,tensorflow/tensorflow/python/ops/unconnected_gradients.py,27,class,"Controls how gradient computation behaves when y does not depend on x. The gradient of y with respect to x can be zero in two different ways: there could be no differentiable path in the graph connecting x to y (and so we can statically prove that the gradient is zero) or it could be that runtime values of tensors in a particular execution lead to a gradient of zero (say, if a relu unit happens to not be activated). To allow you to distinguish between these two cases you can choose what value gets returned for the gradient when there is no path in the graph from x to y: * `NONE`: Indicates that [None] will be returned if there is no path from x to y * `ZERO`: Indicates that a zero tensor will be returned in the shape of x." 10067,_PartitionInfo,tensorflow/tensorflow/python/ops/variable_scope.py,63,class,Holds partition info used by initializer functions. 10068,_ReuseMode,tensorflow/tensorflow/python/ops/variable_scope.py,190,class,Mode for variable access within a variable scope. 10069,enable_resource_variables,tensorflow/tensorflow/python/ops/variable_scope.py,219,function,"Creates resource variables by default. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature." 10070,resource_variables_enabled,tensorflow/tensorflow/python/ops/variable_scope.py,240,function,"Returns `True` if resource variables are enabled. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature." 10071,disable_resource_variables,tensorflow/tensorflow/python/ops/variable_scope.py,262,function,"Opts out of resource variables. If your code needs tf.disable_resource_variables() to be called to work properly please file a bug." 10072,_VariableStore,tensorflow/tensorflow/python/ops/variable_scope.py,273,class,"Variable store that carries a number of named Variables. New variable names and new variables can be created; all stored variables are initialized with the initializer passed to __init__. Attributes: vars: a dictionary with string names (same as passed in GetVar) as keys and the corresponding TensorFlow Variables as values." 10073,_LazyEvalTensor,tensorflow/tensorflow/python/ops/variable_scope.py,1008,class,A Tensor-like object that only evaluates its thunk when used. 10074,_make_master_property,tensorflow/tensorflow/python/ops/variable_scope.py,1028,function, 10075,_make_master_method,tensorflow/tensorflow/python/ops/variable_scope.py,1040,function, 10076,_make_op_method,tensorflow/tensorflow/python/ops/variable_scope.py,1050,function, 10077,no_regularizer,tensorflow/tensorflow/python/ops/variable_scope.py,1080,function,Use this function to prevent regularization of variables. 10078,VariableScope,tensorflow/tensorflow/python/ops/variable_scope.py,1087,class,"Variable scope object to carry defaults to provide to `get_variable`. Many of the arguments we need for `get_variable` in a variable store are most easily handled with a context. This object is used for the defaults. Attributes: name: name of the current scope, used as prefix in get_variable. initializer: default initializer passed to get_variable. regularizer: default regularizer passed to get_variable. reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in get_variable. When eager execution is enabled this argument is always forced to be False. caching_device: string, callable, or None: the caching device passed to get_variable. partitioner: callable or `None`: the partitioner passed to `get_variable`. custom_getter: default custom getter passed to get_variable. name_scope: The name passed to `tf.name_scope`. dtype: default type passed to get_variable (defaults to DT_FLOAT). use_resource: if False, create a normal Variable; if True create an experimental ResourceVariable with well-defined semantics. Defaults to False (will later change to True). When eager execution is enabled this argument is always forced to be True. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training." 10079,_VariableScopeStore,tensorflow/tensorflow/python/ops/variable_scope.py,1401,class,A thread local store for the current variable scope and scope counts. 10080,get_variable_scope_store,tensorflow/tensorflow/python/ops/variable_scope.py,1424,function,Returns the variable scope store for current thread. 10081,get_variable_scope,tensorflow/tensorflow/python/ops/variable_scope.py,1438,function,Returns the current variable scope. 10082,_get_default_variable_store,tensorflow/tensorflow/python/ops/variable_scope.py,1443,function, 10083,with_variable_store,tensorflow/tensorflow/python/ops/variable_scope.py,1453,function, 10084,EagerVariableStore,tensorflow/tensorflow/python/ops/variable_scope.py,1463,class,"Wrapper allowing functional layers to be used with eager execution. When eager execution is enabled Variables get deleted when they go out of scope, and are not stored in global collections by default. A lot of code (mostly the functional layers in tf.layers) assumes that variables are kept in a global list. EagerVariableStore can be used in conjunction with this code to make it eager-friendly. For example, to create a dense layer, use: ``` container = tfe.EagerVariableStore() for input in dataset_iterator: with container.as_default(): x = tf.compat.v1.layers.dense(input, name=""l1"") print(container.variables) # Should print the variables used in the layer. ```" 10085,get_variable,tensorflow/tensorflow/python/ops/variable_scope.py,1545,function, 10086,get_local_variable,tensorflow/tensorflow/python/ops/variable_scope.py,1688,function, 10087,_get_partitioned_variable,tensorflow/tensorflow/python/ops/variable_scope.py,1733,function,"Gets or creates a sharded variable list with these parameters. The `partitioner` must be a callable that accepts a fully defined `TensorShape` and returns a sequence of integers (the `partitions`). These integers describe how to partition the given sharded `Variable` along the given dimension. That is, `partitions[1] = 3` means split the `Variable` into 3 shards along dimension 1. Currently, sharding along only one axis is supported. If the list of variables with the given name (prefix) is already stored, we return the stored variables. Otherwise, we create a new one. If initializer is `None` (the default), the default initializer passed in the constructor is used. If that one is `None` too, we use a new `glorot_uniform_initializer`. If initializer is a Tensor, we use it as a value and derive the shape from the initializer. If the initializer is a callable, then it will be called for each shard. Otherwise the initializer should match the shape of the entire sharded Variable, and it will be sliced accordingly for each shard. Some useful partitioners are available. See, e.g., `variable_axis_size_partitioner` and `min_max_variable_partitioner`. Args: name: The name of the new or existing variable. shape: Shape of the new or existing variable. dtype: Type of the new or existing variable (defaults to `DT_FLOAT`). initializer: Initializer for the variable if one is created. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). collections: List of graph collections keys to add the Variable to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`). caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. partitioner: Optional callable that accepts a fully defined `TensorShape` and `dtype` of the Variable to be created, and returns a list of partitions for each axis (currently only one axis can be partitioned). validate_shape: If False, allows the variable to be initialized with a value of unknown shape. If True, the default, the shape of initial_value must be known. use_resource: If False, creates a regular Variable. If True, creates an experimental ResourceVariable instead which has well-defined semantics. Defaults to False (will later change to True). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. Returns: A tuple `(shards, partitions)` where `shards` is the list of `Variable` shards and `partitions` is the output of the partitioner on the input shape. Raises: ValueError: when creating a new variable and shape is not declared, or when violating reuse during variable creation. Reuse is set inside `variable_scope`." 10088,_pure_variable_scope,tensorflow/tensorflow/python/ops/variable_scope.py,1852,class,"A context for the variable_scope, see `variable_scope` for docs." 10089,_maybe_wrap_custom_getter,tensorflow/tensorflow/python/ops/variable_scope.py,2018,function,Wrap a call to a custom_getter to use the old_getter internally. 10090,_get_unique_variable_scope,tensorflow/tensorflow/python/ops/variable_scope.py,2037,function,Get a name with the given prefix unique in the current variable scope. 10091,variable_scope,tensorflow/tensorflow/python/ops/variable_scope.py,2054,class,"A context manager for defining ops that creates variables (layers). This context manager validates that the (optional) `values` are from the same graph, ensures that graph is the default graph, and pushes a name scope and a variable scope. If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None, then `default_name` is used. In that case, if the same name has been previously used in the same scope, it will be made unique by appending `_N` to it. Variable scope allows you to create new variables and to share already created ones while providing checks to not create or share by accident. For details, see the [Variable Scope How To](https://tensorflow.org/guide/variables), here we present only a few basic examples. Simple example of how to create a new variable: ```python with tf.compat.v1.variable_scope(""foo""): with tf.compat.v1.variable_scope(""bar""): v = tf.compat.v1.get_variable(""v"", [1]) assert v.name == ""foo/bar/v:0"" ``` Simple example of how to reenter a premade variable scope safely: ```python with tf.compat.v1.variable_scope(""foo"") as vs: pass # Re-enter the variable scope. with tf.compat.v1.variable_scope(vs, auxiliary_name_scope=False) as vs1: # Restore the original name_scope. with tf.name_scope(vs1.original_name_scope): v = tf.compat.v1.get_variable(""v"", [1]) assert v.name == ""foo/v:0"" c = tf.constant([1], name=""c"") assert c.name == ""foo/c:0"" ``` Keep in mind that the counters for `default_name` are discarded once the parent scope is exited. Therefore when the code re-enters the scope (for instance by saving it), all nested default_name counters will be restarted. For instance: ```python with tf.compat.v1.variable_scope(""foo"") as vs: with tf.compat.v1.variable_scope(None, default_name=""bar""): v = tf.compat.v1.get_variable(""a"", [1]) assert v.name == ""foo/bar/a:0"", v.name with tf.compat.v1.variable_scope(None, default_name=""bar""): v = tf.compat.v1.get_variable(""b"", [1]) assert v.name == ""foo/bar_1/b:0"" with tf.compat.v1.variable_scope(vs): with tf.compat.v1.variable_scope(None, default_name=""bar""): v = tf.compat.v1.get_variable(""c"", [1]) assert v.name == ""foo/bar/c:0"" # Uses bar instead of bar_2! ``` Basic example of sharing a variable AUTO_REUSE: ```python def foo(): with tf.compat.v1.variable_scope(""foo"", reuse=tf.compat.v1.AUTO_REUSE): v = tf.compat.v1.get_variable(""v"", [1]) return v v1 = foo() # Creates v. v2 = foo() # Gets the same, existing v. assert v1 == v2 ``` Basic example of sharing a variable with reuse=True: ```python with tf.compat.v1.variable_scope(""foo""): v = tf.compat.v1.get_variable(""v"", [1]) with tf.compat.v1.variable_scope(""foo"", reuse=True): v1 = tf.compat.v1.get_variable(""v"", [1]) assert v1 == v ``` Sharing a variable by capturing a scope and setting reuse: ```python with tf.compat.v1.variable_scope(""foo"") as scope: v = tf.compat.v1.get_variable(""v"", [1]) scope.reuse_variables() v1 = tf.compat.v1.get_variable(""v"", [1]) assert v1 == v ``` To prevent accidental sharing of variables, we raise an exception when getting an existing variable in a non-reusing scope. ```python with tf.compat.v1.variable_scope(""foo""): v = tf.compat.v1.get_variable(""v"", [1]) v1 = tf.compat.v1.get_variable(""v"", [1]) # Raises ValueError(""... v already exists ...""). ``` Similarly, we raise an exception when trying to get a variable that does not exist in reuse mode. ```python with tf.compat.v1.variable_scope(""foo"", reuse=True): v = tf.compat.v1.get_variable(""v"", [1]) # Raises ValueError(""... v does not exists ...""). ``` Note that the `reuse` flag is inherited: if we open a reusing scope, then all its sub-scopes become reusing as well. A note about name scoping: Setting `reuse` does not impact the naming of other ops such as mult. See related discussion on [github#6189](https://github.com/tensorflow/tensorflow/issues/6189) Note that up to and including version 1.0, it was allowed (though explicitly discouraged) to pass False to the reuse argument, yielding undocumented behaviour slightly different from None. Starting at 1.1.0 passing None and False as reuse has exactly the same effect. A note about using variable scopes in multi-threaded environment: Variable scopes are thread local, so one thread will not see another thread's current scope. Also, when using `default_name`, unique scopes names are also generated only on a per thread basis. If the same name was used within a different thread, that doesn't prevent a new thread from creating the same scope. However, the underlying variable store is shared across threads (within the same graph). As such, if another thread tries to create a new variable with the same name as a variable created by a previous thread, it will fail unless reuse is True. Further, each thread starts with an empty variable scope. So if you wish to preserve name prefixes from a scope from the main thread, you should capture the main thread's scope and re-enter it in each thread. For e.g. ``` main_thread_scope = variable_scope.get_variable_scope() # Thread's target function: def thread_target_fn(captured_scope): with variable_scope.variable_scope(captured_scope): # .... regular code for this thread thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,)) ```" 10092,variable_op_scope,tensorflow/tensorflow/python/ops/variable_scope.py,2465,function,Deprecated: context manager for defining an op that creates variables. 10093,_call_partitioner,tensorflow/tensorflow/python/ops/variable_scope.py,2496,function,"Call partitioner validating its inputs/output. Args: partitioner: a function mapping `Tensor` shape and dtype to a list of partitions. shape: shape of the `Tensor` to partition, must have at least two dimensions. dtype: dtype of the elements in the `Tensor`. Returns: A list with elements >=1 and exactly one >1. The index of that element corresponds to the partitioning axis." 10094,_get_slice_dim_and_num_slices,tensorflow/tensorflow/python/ops/variable_scope.py,2537,function,Get slicing dimension and number of slices from the partitioner output. 10095,_iter_slices,tensorflow/tensorflow/python/ops/variable_scope.py,2549,function,Slices a given a shape along the specified dimension. 10096,default_variable_creator,tensorflow/tensorflow/python/ops/variable_scope.py,2561,function,Default variable creator. 10097,default_variable_creator_v2,tensorflow/tensorflow/python/ops/variable_scope.py,2620,function,Default variable creator. 10098,_make_getter,tensorflow/tensorflow/python/ops/variable_scope.py,2657,function,Gets around capturing loop variables in python being broken. 10099,variable_creator_scope_v1,tensorflow/tensorflow/python/ops/variable_scope.py,2668,function,"Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: ``` def variable_creator(next_creator, **kwargs) ``` The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: * initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) * trainable: If `True`, the default, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. `trainable` defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. * collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. * validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. * caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. * name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. * dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. * constraint: A constraint function to be applied to the variable after updates by some algorithms. * use_resource: if True, a ResourceVariable is always created. * synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. * aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active" 10100,variable_creator_scope,tensorflow/tensorflow/python/ops/variable_scope.py,2744,function,"Scope which defines a variable creation function to be used by variable(). variable_creator is expected to be a function with the following signature: ``` def variable_creator(next_creator, **kwargs) ``` The creator is supposed to eventually call the next_creator to create a variable if it does want to create a variable and not call Variable or ResourceVariable directly. This helps make creators composable. A creator may choose to create multiple variables, return already existing variables, or simply register that a variable was created and defer to the next creators in line. Creators can also modify the keyword arguments seen by the next creators. Custom getters in the variable scope will eventually resolve down to these custom creators when they do create variables. The valid keyword arguments in kwds are: * initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) * trainable: If `True`, the default, GradientTapes automatically watch uses of this Variable. * validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. * caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. * name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. * constraint: A constraint function to be applied to the variable after updates by some algorithms. * synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. * aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. This set may grow over time, so it's important the signature of creators is as mentioned above. Args: variable_creator: the passed creator Yields: A scope in which the creator is active" 10101,VariableSpecTest,tensorflow/tensorflow/python/ops/variable_spec_test.py,30,class, 10102,default_variable_creator,tensorflow/tensorflow/python/ops/variables.py,53,function, 10103,default_variable_creator_v2,tensorflow/tensorflow/python/ops/variables.py,58,function, 10104,_make_getter,tensorflow/tensorflow/python/ops/variables.py,63,function,To avoid capturing loop variables. 10105,VariableSynchronization,tensorflow/tensorflow/python/ops/variables.py,73,class,"Indicates when a distributed variable will be synced. * `AUTO`: Indicates that the synchronization will be determined by the current `DistributionStrategy` (eg. With `MirroredStrategy` this would be `ON_WRITE`). * `NONE`: Indicates that there will only be one copy of the variable, so there is no need to sync. * `ON_WRITE`: Indicates that the variable will be updated across devices every time it is written. * `ON_READ`: Indicates that the variable will be aggregated across devices when it is read (eg. when checkpointing or when evaluating an op that uses the variable)." 10106,VariableAggregationV2,tensorflow/tensorflow/python/ops/variables.py,95,class,"Indicates how a distributed variable will be aggregated. `tf.distribute.Strategy` distributes a model by making multiple copies (called ""replicas"") acting data-parallel on different elements of the input batch. When performing some variable-update operation, say `var.assign_add(x)`, in a model, we need to resolve how to combine the different values for `x` computed in the different replicas. * `NONE`: This is the default, giving an error if you use a variable-update operation with multiple replicas. * `SUM`: Add the updates across replicas. * `MEAN`: Take the arithmetic mean (""average"") of the updates across replicas. * `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same update, but we only want to perform the update once. Used, e.g., for the global step counter." 10107,VariableAggregation,tensorflow/tensorflow/python/ops/variables.py,130,class, 10108,validate_synchronization_aggregation_trainable,tensorflow/tensorflow/python/ops/variables.py,151,function,"Given user-provided variable properties, sets defaults and validates." 10109,VariableMetaclass,tensorflow/tensorflow/python/ops/variables.py,179,class,Metaclass to allow construction of tf.Variable to be overridden. 10110,Variable,tensorflow/tensorflow/python/ops/variables.py,269,class,"See the [variable guide](https://tensorflow.org/guide/variable). A variable maintains shared, persistent state manipulated by a program. The `Variable()` constructor requires an initial value for the variable, which can be a `Tensor` of any type and shape. This initial value defines the type and shape of the variable. After construction, the type and shape of the variable are fixed. The value can be changed using one of the assign methods. >>> v = tf.Variable(1.) >>> v.assign(2.) >>> v.assign_add(0.5) The `shape` argument to `Variable`'s constructor allows you to construct a variable with a less defined shape than its `initial_value`: >>> v = tf.Variable(1., shape=tf.TensorShape(None)) >>> v.assign([[1.]]) dtype=float32, numpy=array([[1.]], ...)> Just like any `Tensor`, variables created with `Variable()` can be used as inputs to operations. Additionally, all the operators overloaded for the `Tensor` class are carried over to variables. >>> w = tf.Variable([[1.], [2.]]) >>> x = tf.constant([[3., 4.]]) >>> tf.matmul(w, x) >>> tf.sigmoid(w + x) When building a machine learning model it is often convenient to distinguish between variables holding trainable model parameters and other variables such as a `step` variable used to count training steps. To make this easier, the variable constructor supports a `trainable=` parameter. `tf.GradientTape` watches trainable variables by default: >>> with tf.GradientTape(persistent=True) as tape: ... trainable = tf.Variable(1.) ... non_trainable = tf.Variable(2., trainable=False) ... x1 = trainable * 2. ... x2 = non_trainable * 3. >>> tape.gradient(x1, trainable) >>> assert tape.gradient(x2, non_trainable) is None # Unwatched Variables are automatically tracked when assigned to attributes of types inheriting from `tf.Module`. >>> m = tf.Module() >>> m.v = tf.Variable([1.]) >>> m.trainable_variables (,) This tracking then allows saving variable values to [training checkpoints](https://www.tensorflow.org/guide/checkpoint), or to [SavedModels](https://www.tensorflow.org/guide/saved_model) which include serialized TensorFlow graphs. Variables are often captured and manipulated by `tf.function`s. This works the same way the un-decorated function would have: >>> v = tf.Variable(0.) >>> read_and_decrement = tf.function(lambda: v.assign_sub(0.1)) >>> read_and_decrement() >>> read_and_decrement() Variables created inside a `tf.function` must be owned outside the function and be created only once: >>> class M(tf.Module): ... @tf.function ... def __call__(self, x): ... if not hasattr(self, ""v""): # Or set self.v to None in __init__ ... self.v = tf.Variable(x) ... return self.v * x >>> m = M() >>> m(2.) >>> m(3.) >>> m.v See the `tf.function` documentation for details." 10111,VariableV1,tensorflow/tensorflow/python/ops/variables.py,1346,class,"See the [Variables Guide](https://tensorflow.org/guide/variables). A variable maintains state in the graph across calls to `run()`. You add a variable to the graph by constructing an instance of the class `Variable`. The `Variable()` constructor requires an initial value for the variable, which can be a `Tensor` of any type and shape. The initial value defines the type and shape of the variable. After construction, the type and shape of the variable are fixed. The value can be changed using one of the assign methods. If you want to change the shape of a variable later you have to use an `assign` Op with `validate_shape=False`. Just like any `Tensor`, variables created with `Variable()` can be used as inputs for other Ops in the graph. Additionally, all the operators overloaded for the `Tensor` class are carried over to variables, so you can also add nodes to the graph by just doing arithmetic on variables. ```python import tensorflow as tf # Create a variable. w = tf.Variable(, name=) # Use the variable in the graph like any Tensor. y = tf.matmul(w, ...another variable or tensor...) # The overloaded operators are available too. z = tf.sigmoid(w + y) # Assign a new value to the variable with `assign()` or a related method. w.assign(w + 1.0) w.assign_add(1.0) ``` When you launch the graph, variables have to be explicitly initialized before you can run Ops that use their value. You can initialize a variable by running its *initializer op*, restoring the variable from a save file, or simply running an `assign` Op that assigns a value to the variable. In fact, the variable *initializer op* is just an `assign` Op that assigns the variable's initial value to the variable itself. ```python # Launch the graph in a session. with tf.compat.v1.Session() as sess: # Run the variable initializer. sess.run(w.initializer) # ...you now can run ops that use the value of 'w'... ``` The most common initialization pattern is to use the convenience function `global_variables_initializer()` to add an Op to the graph that initializes all the variables. You then run that Op after launching the graph. ```python # Add an Op to initialize global variables. init_op = tf.compat.v1.global_variables_initializer() # Launch the graph in a session. with tf.compat.v1.Session() as sess: # Run the Op that initializes global variables. sess.run(init_op) # ...you can now run any Op that uses variable values... ``` If you need to create a variable with an initial value dependent on another variable, use the other variable's `initialized_value()`. This ensures that variables are initialized in the right order. All variables are automatically collected in the graph where they are created. By default, the constructor adds the new variable to the graph collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function `global_variables()` returns the contents of that collection. When building a machine learning model it is often convenient to distinguish between variables holding the trainable model parameters and other variables such as a `global step` variable used to count training steps. To make this easier, the variable constructor supports a `trainable=` parameter. If `True`, the new variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. The convenience function `trainable_variables()` returns the contents of this collection. The various `Optimizer` classes use this collection as the default list of variables to optimize. WARNING: tf.Variable objects by default have a non-intuitive memory model. A Variable is represented internally as a mutable Tensor which can non-deterministically alias other Tensors in a graph. The set of operations which consume a Variable and can lead to aliasing is undetermined and can change across TensorFlow versions. Avoid writing code which relies on the value of a Variable either changing or not changing as other operations happen. For example, using Variable objects or simple functions thereof as predicates in a `tf.cond` is dangerous and error-prone: ``` v = tf.Variable(True) tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken. ``` Here, adding `use_resource=True` when constructing the variable will fix any nondeterminism issues: ``` v = tf.Variable(True, use_resource=True) tf.cond(v, lambda: v.assign(False), my_false_fn) ``` To use the replacement for variables which does not have these issues: * Add `use_resource=True` when constructing `tf.Variable`; * Call `tf.compat.v1.get_variable_scope().set_use_resource(True)` inside a `tf.compat.v1.variable_scope` before the `tf.compat.v1.get_variable()` call." 10112,RefVariable,tensorflow/tensorflow/python/ops/variables.py,1556,class,Ref-based implementation of variables. 10113,_try_guard_against_uninitialized_dependencies,tensorflow/tensorflow/python/ops/variables.py,2729,function,"Attempt to guard against dependencies on uninitialized variables. Replace references to variables in `initial_value` with references to the variable's initialized values. The initialized values are essentially conditional TensorFlow graphs that return a variable's value if it is initialized or its `initial_value` if it hasn't been initialized. This replacement is done on a best effort basis: - If the `initial_value` graph contains cycles, we don't do any replacements for that graph. - If the variables that `initial_value` depends on are not present in the `GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them. In these cases, it is up to the caller to ensure that the `initial_value` graph uses initialized variables or that they guard access to variables using their `initialized_value` method. Args: name: Variable name. initial_value: `Tensor`. The initial value. Returns: A `Tensor` suitable to initialize a variable. Raises: TypeError: If `initial_value` is not a `Tensor`." 10114,_has_cycle,tensorflow/tensorflow/python/ops/variables.py,2768,function,Detect cycles in the dependencies of `initial_value`. 10115,_safe_initial_value_from_tensor,tensorflow/tensorflow/python/ops/variables.py,2784,function,"Replace dependencies on variables with their initialized values. Args: name: Variable name. tensor: A `Tensor`. The tensor to replace. op_cache: A dict mapping operation names to `Operation`s. Used to memoize the results so as to avoid creating redundant operations. Returns: A `Tensor` compatible with `tensor`. Any inputs that lead to variable values will be replaced with a corresponding graph that uses the variable's initialized values. This is done on a best-effort basis. If no modifications need to be made then `tensor` will be returned unchanged." 10116,_safe_initial_value_from_op,tensorflow/tensorflow/python/ops/variables.py,2807,function,"Replace dependencies on variables with their initialized values. Args: name: Variable name. op: An `Operation`. The operation to replace. op_cache: A dict mapping operation names to `Operation`s. Used to memoize the results so as to avoid creating redundant operations. Returns: An `Operation` compatible with `op`. Any inputs that lead to variable values will be replaced with a corresponding graph that uses the variable's initialized values. This is done on a best-effort basis. If no modifications need to be made then `op` will be returned unchanged." 10117,_find_initialized_value_for_variable,tensorflow/tensorflow/python/ops/variables.py,2858,function,"Find the initialized value for a variable op. To do so, lookup the variable op in the variables collection. Args: variable_op: A variable `Operation`. Returns: A `Tensor` representing the initialized value for the variable or `None` if the initialized value could not be found." 10118,PartitionedVariable,tensorflow/tensorflow/python/ops/variables.py,2884,class,"A container for partitioned `Variable` objects. @compatibility(eager) `tf.PartitionedVariable` is not compatible with eager execution. Use `tf.Variable` instead which is compatible with both eager execution and graph construction. See [the TensorFlow Eager Execution guide](https://www.tensorflow.org/guide/eager#variables_and_optimizers) for details on how variables work in eager execution. @end_compatibility" 10119,global_variables,tensorflow/tensorflow/python/ops/variables.py,3106,function,"Returns global variables. Global variables are variables that are shared across machines in a distributed environment. The `Variable()` constructor or `get_variable()` automatically adds new variables to the graph collection `GraphKeys.GLOBAL_VARIABLES`. This convenience function returns the contents of that collection. An alternative to global variables are local variables. See `tf.compat.v1.local_variables` Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of `Variable` objects." 10120,all_variables,tensorflow/tensorflow/python/ops/variables.py,3133,function,Use `tf.compat.v1.global_variables` instead. 10121,_all_saveable_objects,tensorflow/tensorflow/python/ops/variables.py,3138,function,"Returns all variables and `SaveableObject`s that must be checkpointed. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of `Variable` and `SaveableObject` to be checkpointed" 10122,local_variables,tensorflow/tensorflow/python/ops/variables.py,3157,function,"Returns local variables. Local variables - per process variables, usually not saved/restored to checkpoint and used for temporary or intermediate values. For example, they can be used as counters for metrics computation or number of epochs this machine has read data. The `tf.contrib.framework.local_variable()` function automatically adds the new variable to `GraphKeys.LOCAL_VARIABLES`. This convenience function returns the contents of that collection. An alternative to local variables are global variables. See `tf.compat.v1.global_variables` Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of local `Variable` objects." 10123,model_variables,tensorflow/tensorflow/python/ops/variables.py,3185,function,"Returns all variables in the MODEL_VARIABLES collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of local Variable objects." 10124,trainable_variables,tensorflow/tensorflow/python/ops/variables.py,3202,function,"Returns all variables created with `trainable=True`. When passed `trainable=True`, the `Variable()` constructor automatically adds new variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the contents of that collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of Variable objects." 10125,moving_average_variables,tensorflow/tensorflow/python/ops/variables.py,3224,function,"Returns all variables that maintain their moving averages. If an `ExponentialMovingAverage` object is created and the `apply()` method is called on a list of variables, these variables will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection. This convenience function returns the contents of that collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of Variable objects." 10126,variables_initializer,tensorflow/tensorflow/python/ops/variables.py,3246,function,"Returns an Op that initializes a list of variables. After you launch the graph in a session, you can run the returned Op to initialize all the variables in `var_list`. This Op runs all the initializers of the variables in `var_list` in parallel. Calling `initialize_variables()` is equivalent to passing the list of initializers to `Group()`. If `var_list` is empty, however, the function still returns an Op that can be run. That Op just has no effect. Args: var_list: List of `Variable` objects to initialize. name: Optional name for the returned operation. Returns: An Op that run the initializers of all the specified variables." 10127,initialize_variables,tensorflow/tensorflow/python/ops/variables.py,3274,function,See `tf.compat.v1.variables_initializer`. 10128,global_variables_initializer,tensorflow/tensorflow/python/ops/variables.py,3280,function,"Returns an Op that initializes global variables. This is just a shortcut for `variables_initializer(global_variables())` Returns: An Op that initializes global variables in the graph." 10129,initialize_all_variables,tensorflow/tensorflow/python/ops/variables.py,3296,function,See `tf.compat.v1.global_variables_initializer`. 10130,local_variables_initializer,tensorflow/tensorflow/python/ops/variables.py,3302,function,"Returns an Op that initializes all local variables. This is just a shortcut for `variables_initializer(local_variables())` Returns: An Op that initializes all local variables in the graph." 10131,initialize_local_variables,tensorflow/tensorflow/python/ops/variables.py,3318,function,See `tf.compat.v1.local_variables_initializer`. 10132,is_variable_initialized,tensorflow/tensorflow/python/ops/variables.py,3325,function,"Tests if a variable has been initialized. Args: variable: A `Variable`. Returns: Returns a scalar boolean Tensor, `True` if the variable has been initialized, `False` otherwise." 10133,assert_variables_initialized,tensorflow/tensorflow/python/ops/variables.py,3340,function,"Returns an Op to check if variables are initialized. NOTE: This function is obsolete and will be removed in 6 months. Please change your implementation to use `report_uninitialized_variables()`. When run, the returned Op will raise the exception `FailedPreconditionError` if any of the variables has not yet been initialized. Note: This function is implemented by trying to fetch the values of the variables. If one of the variables is not initialized a message may be logged by the C++ runtime. This is expected. Args: var_list: List of `Variable` objects to check. Defaults to the value of `global_variables().` Returns: An Op, or None if there are no variables." 10134,report_uninitialized_variables,tensorflow/tensorflow/python/ops/variables.py,3383,function,"Adds ops to list the names of uninitialized variables. When run, it returns a 1-D tensor containing the names of uninitialized variables if there are any, or an empty array if there are none. Args: var_list: List of `Variable` objects to check. Defaults to the value of `global_variables() + local_variables()` name: Optional name of the `Operation`. Returns: A 1-D tensor containing names of the uninitialized variables, or an empty 1-D tensor if there are no variables or no uninitialized variables." 10135,_has_valid_dims,tensorflow/tensorflow/python/ops/weights_broadcast_ops.py,33,function, 10136,_has_valid_nonscalar_shape,tensorflow/tensorflow/python/ops/weights_broadcast_ops.py,46,function, 10137,assert_broadcastable,tensorflow/tensorflow/python/ops/weights_broadcast_ops.py,63,function,"Asserts `weights` can be broadcast to `values`. In `tf.losses` and `tf.metrics`, we support limited weight broadcasting. We let weights be either scalar, or the same rank as the target values, with each dimension either 1, or the same as the corresponding values dimension. Args: weights: `Tensor` of weights. values: `Tensor` of values to which weights are applied. Returns: `Operation` raising `InvalidArgumentError` if `weights` has incorrect shape. `no_op` if static checks determine `weights` has correct shape. Raises: ValueError: If static checks determine `weights` has incorrect shape." 10138,broadcast_weights,tensorflow/tensorflow/python/ops/weights_broadcast_ops.py,136,function,"Broadcast `weights` to the same shape as `values`. This returns a version of `weights` following the same broadcast rules as `mul(weights, values)`, but limited to the weights shapes allowed by `assert_broadcastable`. When computing a weighted average, use this function to broadcast `weights` before summing them; e.g., `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`. Args: weights: `Tensor` whose shape is broadcastable to `values` according to the rules of `assert_broadcastable`. values: `Tensor` of any shape. Returns: `weights` broadcast to `values` shape according to the rules of `assert_broadcastable`." 10139,while_loop,tensorflow/tensorflow/python/ops/while_v2.py,58,function,"Like tf.while_loop, except emits a single While op." 10140,_WhileGrad,tensorflow/tensorflow/python/ops/while_v2.py,309,function,The gradient of a While op produced by while_loop. 10141,_build_while_op,tensorflow/tensorflow/python/ops/while_v2.py,414,function,Builds the functional StatelessWhile/While op. 10142,_get_intermediates,tensorflow/tensorflow/python/ops/while_v2.py,443,function,Returns all tensors in `func_graph` that should be accumulated. 10143,_preprocess_grad,tensorflow/tensorflow/python/ops/while_v2.py,486,function,"Returns the initial gradient to be used for a given output tensor. Args: grad: the original gradient Tensor passed to the gradient function. body_graph_output: the corresponding Tensor in the body graph. while_op_input: the corresponding Tensor input of the While op. while_op_output: the corresponding Tensor output of the While op. Returns: A Tensor or None." 10144,_zeros_like,tensorflow/tensorflow/python/ops/while_v2.py,528,function,Like array_ops.zeros_like() but also accepts resource var handles. 10145,_is_trainable,tensorflow/tensorflow/python/ops/while_v2.py,540,function,Returns whether the given tensor is trainable. 10146,_get_graph,tensorflow/tensorflow/python/ops/while_v2.py,558,function,"Returns `FuncGraph` for the given function attribute. Args: while_op: The While Operation. func_attr_name: string attr_graph_name: cached forward graph name Returns: `FuncGraph`" 10147,_create_grad_func,tensorflow/tensorflow/python/ops/while_v2.py,581,function,"Builds and returns the gradient FuncGraph of `func_graph` and its args. The returned grad_func_graph must be called with the returned args + grad_func_graph.captures. Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. grads: The incoming grads for `ys`. cond_graph: FuncGraph for the forward cond function. body_graph: FuncGraph for the forward body function. name: Name of the returned gradient function. while_op: The forward While op. maximum_iterations: Tensor. The maximum number of iterations. Returns: 2-tuple of (grad_func_graph, args)." 10148,_grad_fn,tensorflow/tensorflow/python/ops/while_v2.py,654,function,"Computes the gradient of `func_graph` in the current graph. This function builds the gradient graph of the corresponding forward-pass `func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs. Args: ys: A `Tensor` or list of tensors to be differentiated. xs: A `Tensor` or list of tensors to be used for differentiation. args: The input arguments. args[0] - Loop counter args[1] - Total number of iterations. args[2] - maximum_iterations. args[3:] - Incoming gradients for `ys`. func_graph: function.FuncGraph. The corresponding forward-pass function. Returns: The output gradient Tensors." 10149,_resolve_grad_captures,tensorflow/tensorflow/python/ops/while_v2.py,693,function,"Returns the tensors to pass as captured inputs to `body_grad_graph`. `body_grad_graph` may have external references to: 1. Its outer graph containing the input gradients. These are left as-is. 2. Accumulators captured from the forward-pass graph. These should have been added as `while_op` outputs after the gradient graph was built. We replace these with the corresponding output of `while_op`, i.e. a tensor in `body_graph.outer_graph`. In the case of nested control flow or functions, the gradient logic handling `body_grad_graph.outer_graph` will make sure the tensor from `body_graph.outer_graph` is also correctly captured. Args: body_graph: FuncGraph. The forward-pass body function. body_grad_graph: FuncGraph. The body gradients function. while_op: The forward-pass While Operation calling `body_graph`. Returns: A list of input tensors to be passed as the captured inputs to `body_grad_graph`." 10150,_get_structured_grad_output,tensorflow/tensorflow/python/ops/while_v2.py,738,function,"Returns the values that should be returned from the while grad function. Args: outputs: the raw Tensor outputs of the grad While op. grads: the input gradients to the gradient function. body_grad_graph: _WhileBodyGradFuncGraph. Returns: A list of gradient values. May include Nones." 10151,_get_accumulator,tensorflow/tensorflow/python/ops/while_v2.py,778,function,"Returns TensorList if any containing accumulated values of tensor. We try to find a pattern of the form: input_tl tensor \ / (TensorListPushBack) | output_tl which satisfies the following conditions: 1. input_tl must be in tensor.graph.inputs. 2. output_tl or Identity(output_tl) must be in tensor.graph.outputs. 3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t). output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is returned if such a pattern is found else None is returned. Args: tensor: The Tensor to be accumulated. Returns: A variant tensor in the same graph as `tensor` or None if no accumulator is found." 10152,_WhileBodyGradFuncGraph,tensorflow/tensorflow/python/ops/while_v2.py,847,class,"FuncGraph for the gradient function of the body of a While op. Contains the logic for capturing the tensors from the body of the forward While op which is as follows: 1. If the tensor is of resource type (these are not accumulated): a. Ensure that the tensor is a loop invariant, i.e., it exists in both loop inputs and outputs at the same index. b. Lookup the corresponding resource tensor in the forward outer graph and try to capture that. 2. If the tensor is not of resource type: a. Create an accumulator for that tensor and output it from the forward pass. Note this also requires adding it as an input to the forward pass. b. Capture the accumulator from the forward pass in this FuncGraph. This will later be resolved to the correct output of the forward While op. c. Pop a value from the captured placeholder and use it as the captured value for the forward pass tensor. This only allows capturing tensors in the forward graph. A ValueError is raised if an attempt is made to capture a tensor not in the forward graph. To manually capture capture a tensor that is not in the forward graph, call `capture` with `allowlisted=True`. Note: The `captures` dict does not contain the forward tensor since it is not directly captured. It contains the accumulator corresponding to this forward tensor. Attributes: while_op_needs_rewrite: True if any non-resource intermediates were captured, meaning the forward While op needs to be rewritten to output the corresponding accumulators. extra_inputs: list of EmptyTensorList tensors to be used as initial input to the new accumulators in the forward graph. It may also contain external captures of the custom gradient function. popped_tensor_lists: dict from the captured accumulator placeholder to the TensorList obtained after popping the intermediate tensor from it. The values of this dict need to be added to the list of outputs." 10153,_check_shapes_compat,tensorflow/tensorflow/python/ops/while_v2.py,1143,function, 10154,_check_num_inputs_outputs,tensorflow/tensorflow/python/ops/while_v2.py,1154,function,Checks the number of inputs/outputs of `cond_graph` and `body_graph`. 10155,_check_inputs_outputs_types_match,tensorflow/tensorflow/python/ops/while_v2.py,1169,function, 10156,_build_cond_placeholders_name_prefix,tensorflow/tensorflow/python/ops/while_v2.py,1178,function, 10157,_duplicate_body_captures_in_cond,tensorflow/tensorflow/python/ops/while_v2.py,1182,function,"Creates placeholders for body captures in cond_graph. This is needed to match signatures of cond and body graphs. Args: cond_graph: cond branch graph body_graph_captures: Tensors which were captured when building the `body_graph`." 10158,_copy_handle_data,tensorflow/tensorflow/python/ops/while_v2.py,1222,function, 10159,_graph_name,tensorflow/tensorflow/python/ops/while_v2.py,1227,function, 10160,_pack_sequence_as,tensorflow/tensorflow/python/ops/while_v2.py,1233,function,Like `nest.pack_sequence_as` but also replaces flows with TensorArrays. 10161,_tensor_array_to_flow,tensorflow/tensorflow/python/ops/while_v2.py,1249,function, 10162,_build_maximum_iterations_loop_var,tensorflow/tensorflow/python/ops/while_v2.py,1259,function, 10163,_build_accumulator_name,tensorflow/tensorflow/python/ops/while_v2.py,1269,function, 10164,_is_loop_invariant,tensorflow/tensorflow/python/ops/while_v2.py,1274,function, 10165,_OperationWithOutputs,tensorflow/tensorflow/python/ops/while_v2.py,1278,class,"Operation with pre-built `TF_Output`s. The C API for creating the extra placeholders for the cond graph returns SWIG wrapped TF_Output* pointers which we can use directly for `Operation.outputs`. The default constructor for `Operation` does not provide a way of specifying pre-built output tensors and always creates them. This is a performance overhead. It is not clear if adding that feature to the `Operation` API would be generally useful so for now we just have our own lightweight `Operation` implementation. Note that this does not extract a stacktrace as well since we don't expect this operation to be used. TODO(b/143286622): This should not be required once captures are separated from regular loop vars." 10166,_set_read_only_resource_inputs_attr,tensorflow/tensorflow/python/ops/while_v2.py,1302,function,"Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. Args: op: While Operation. branch_graphs: List of branch FuncGraphs." 10167,rewrite_grad_indexed_slices,tensorflow/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py,31,function,"Handles special case of IndexedSlices returned from while gradient. Some gradient functions return IndexedSlices instead of a Tensor (e.g. the gradient of Gather ops). When this happens in the gradient of a while body, the resulting gradient body function will have mismatched inputs and outputs, since the input is a single Tensor, but the IndexedSlices gets unnested into three output Tensors. This function fixes this by rewriting the gradient body to have three inputs to match the three outputs, i.e., it effectively converts the input Tensor into an input IndexedSlices. It also returns new `loop_vars` to reflect the new inputs. Args: grads: the input gradient Tensors to the while gradient computation. body_grad_graph: _WhileBodyGradFuncGraph. loop_vars: list of Tensors. The inputs to body_grad_graph. forward_inputs: list of Tensors. The (flat) inputs to the forward-pass While op. Returns: The new loop_vars to pass to body_grad_graph." 10168,_get_tensor_index_in_iterable,tensorflow/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py,84,function,"Returns index of first occurence of `t`, raises ValueError if not found." 10169,_rewrite_output_as_tensor,tensorflow/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py,92,function,"Rewrites grad_output_slices to be a Tensor output. Args: body_grad_graph: _WhileBodyGradFuncGraph. grad_output_slices: IndexedSlices output of body_grad_graph." 10170,_rewrite_input_as_indexed_slices,tensorflow/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py,109,function,"Rewrites grad_output_slices's corresponding input to be an IndexedSlices. This rewrite requires that forward_input was captured in the forward loop, i.e. is not a user-specified loop variable. This is important because the rewrite assumes that forward_input is passed through to its corresponding output unchanged. This assumption is used in _rewrite_input_as_indexed_slices, which depends on the exact gradient structure produced by the input's fanout. This can yield a more efficient computation than using _rewrite_output_as_tensor, since it preserves the IndexedSlices structure instead of converting the IndexedSlices to a dense Tensor. Args: body_grad_graph: _WhileBodyGradFuncGraph. grad_output_slices: IndexedSlices output of body_grad_graph. forward_input: the corresponding Tensor input to the forward loop. loop_vars: list of Tensors. The inputs to body_grad_graph. Returns: The new loop_vars to pass to body_grad_graph." 10171,_create_grad_indexed_slices_init,tensorflow/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py,167,function,"Creates an IndexedSlices to pass as input to the while grad function. Args: grad_output_slices: IndexedSlices. The corresponding while grad function output. forward_input: Tensor. The corresponding input to the forward while op. Returns: Zeros IndexedSlices, created in current Graph." 10172,_rewrite_grad_indexed_slices_output,tensorflow/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py,213,function,"Creates a new version of old_output_slices with new_input_slices as input. This method assumes that old_output_slices.{values,indices} are produced by concatenating the incoming gradient Tensor input with the IndexedSlices produced by the gradient computation of the while body. See backprop.aggregate_indexed_slices_gradients for where these concats are constructed. We build new concats that use new_input_slices instead of the original Tensor input. Args: old_output_slices: original IndexedSlices output of while gradient. new_input_slices: new IndexedSlices to use as input to while gradient. Returns: A new IndexedSlices to replace old_output_slices." 10173,_update_indexed_slices_param,tensorflow/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py,248,function,"Updates graph with new IndexedSlices input/output. Updates graph's metadata to output the gradient computation defined by init_slices, input_slices, and output_slices, instead of outputting old_output_slices. Also returns a new version of loop_vars with init_slices replacing the old input. Args: graph: _WhileBodyGradFuncGraph. loop_vars: the inputs to graph. init_slices: the new IndexedSlices to use as input to graph. input_slices: the new IndexedSlices in graph that should be fed by init_slices. output_slices: the new IndexedSlices in graph that should be the corresponding output to input_slices. old_output_slices: the IndexedSlices in graph that are currently being output. Returns: New loop_vars to pass to graph." 10174,_flatten,tensorflow/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py,290,function, 10175,Bernoulli,tensorflow/tensorflow/python/ops/distributions/bernoulli.py,36,class,"Bernoulli distribution. The Bernoulli distribution with `probs` parameter, i.e., the probability of a `1` outcome (vs a `0` outcome)." 10176,_kl_bernoulli_bernoulli,tensorflow/tensorflow/python/ops/distributions/bernoulli.py,170,function,"Calculate the batched KL divergence KL(a || b) with a and b Bernoulli. Args: a: instance of a Bernoulli distribution object. b: instance of a Bernoulli distribution object. name: (optional) Name to use for created operations. default is ""kl_bernoulli_bernoulli"". Returns: Batchwise KL(a || b)" 10177,Beta,tensorflow/tensorflow/python/ops/distributions/beta.py,51,class,"Beta distribution. The Beta distribution is defined over the `(0, 1)` interval using parameters `concentration1` (aka ""alpha"") and `concentration0` (aka ""beta""). #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta) ``` where: * `concentration1 = alpha`, * `concentration0 = beta`, * `Z` is the normalization constant, and, * `Gamma` is the [gamma function]( https://en.wikipedia.org/wiki/Gamma_function). The concentration parameters represent mean total counts of a `1` or a `0`, i.e., ```none concentration1 = alpha = mean * total_concentration concentration0 = beta = (1. - mean) * total_concentration ``` where `mean` in `(0, 1)` and `total_concentration` is a positive real number representing a mean `total_count = concentration1 + concentration0`. Distribution parameters are automatically broadcast in all functions; see examples for details. Warning: The samples can be zero due to finite precision. This happens more often when some of the concentrations are very small. Make sure to round the samples to `np.finfo(dtype).tiny` before computing the density. Samples of this distribution are reparameterized (pathwise differentiable). The derivatives are computed using the approach described in (Figurnov et al., 2018). #### Examples ```python import tensorflow_probability as tfp tfd = tfp.distributions # Create a batch of three Beta distributions. alpha = [1, 2, 3] beta = [1, 2, 3] dist = tfd.Beta(alpha, beta) dist.sample([4, 5]) # Shape [4, 5, 3] # `x` has three batch entries, each with two samples. x = [[.1, .4, .5], [.2, .3, .5]] # Calculate the probability of each pair of samples under the corresponding # distribution in `dist`. dist.prob(x) # Shape [2, 3] ``` ```python # Create batch_shape=[2, 3] via parameter broadcast: alpha = [[1.], [2]] # Shape [2, 1] beta = [3., 4, 5] # Shape [3] dist = tfd.Beta(alpha, beta) # alpha broadcast as: [[1., 1, 1,], # [2, 2, 2]] # beta broadcast as: [[3., 4, 5], # [3, 4, 5]] # batch_Shape [2, 3] dist.sample([4, 5]) # Shape [4, 5, 2, 3] x = [.2, .3, .5] # x will be broadcast as [[.2, .3, .5], # [.2, .3, .5]], # thus matching batch_shape [2, 3]. dist.prob(x) # Shape [2, 3] ``` Compute the gradients of samples w.r.t. the parameters: ```python alpha = tf.constant(1.0) beta = tf.constant(2.0) dist = tfd.Beta(alpha, beta) samples = dist.sample(5) # Shape [5] loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function # Unbiased stochastic gradients of the loss function grads = tf.gradients(loss, [alpha, beta]) ``` References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) ([pdf] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))" 10178,BetaWithSoftplusConcentration,tensorflow/tensorflow/python/ops/distributions/beta.py,354,class,Beta with softplus transform of `concentration1` and `concentration0`. 10179,_kl_beta_beta,tensorflow/tensorflow/python/ops/distributions/beta.py,383,function,"Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta. Args: d1: instance of a Beta distribution object. d2: instance of a Beta distribution object. name: (optional) Name to use for created operations. default is ""kl_beta_beta"". Returns: Batchwise KL(d1 || d2)" 10180,_Mapping,tensorflow/tensorflow/python/ops/distributions/bijector_impl.py,45,class,Helper class to make it easier to manage caching in `Bijector`. 10181,Bijector,tensorflow/tensorflow/python/ops/distributions/bijector_impl.py,136,class,"Interface for transformations of a `Distribution` sample. Bijectors can be used to represent any differentiable and injective (one to one) function defined on an open subset of `R^n`. Some non-injective transformations are also supported (see ""Non Injective Transforms"" below). #### Mathematical Details A `Bijector` implements a [smooth covering map]( https://en.wikipedia.org/wiki/Local_diffeomorphism), i.e., a local diffeomorphism such that every point in the target has a neighborhood evenly covered by a map ([see also]( https://en.wikipedia.org/wiki/Covering_space#Covering_of_a_manifold)). A `Bijector` is used by `TransformedDistribution` but can be generally used for transforming a `Distribution` generated `Tensor`. A `Bijector` is characterized by three operations: 1. Forward Useful for turning one random outcome into another random outcome from a different distribution. 2. Inverse Useful for ""reversing"" a transformation to compute one probability in terms of another. 3. `log_det_jacobian(x)` ""The log of the absolute value of the determinant of the matrix of all first-order partial derivatives of the inverse function."" Useful for inverting a transformation to compute one probability in terms of another. Geometrically, the Jacobian determinant is the volume of the transformation and is used to scale the probability. We take the absolute value of the determinant before log to avoid NaN values. Geometrically, a negative determinant corresponds to an orientation-reversing transformation. It is ok for us to discard the sign of the determinant because we only integrate everywhere-nonnegative functions (probability densities) and the correct orientation is always the one that produces a nonnegative integrand. By convention, transformations of random variables are named in terms of the forward transformation. The forward transformation creates samples, the inverse is useful for computing probabilities. #### Example Uses - Basic properties: ```python x = ... # A tensor. # Evaluate forward transformation. fwd_x = my_bijector.forward(x) x == my_bijector.inverse(fwd_x) x != my_bijector.forward(fwd_x) # Not equal because x != g(g(x)). ``` - Computing a log-likelihood: ```python def transformed_log_prob(bijector, log_prob, x): return (bijector.inverse_log_det_jacobian(x, event_ndims=0) + log_prob(bijector.inverse(x))) ``` - Transforming a random outcome: ```python def transformed_sample(bijector, x): return bijector.forward(x) ``` #### Example Bijectors - ""Exponential"" ```none Y = g(X) = exp(X) X ~ Normal(0, 1) # Univariate. ``` Implies: ```none g^{-1}(Y) = log(Y) |Jacobian(g^{-1})(y)| = 1 / y Y ~ LogNormal(0, 1), i.e., prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y)) = (1 / y) Normal(log(y); 0, 1) ``` Here is an example of how one might implement the `Exp` bijector: ```python class Exp(Bijector): def __init__(self, validate_args=False, name=""exp""): super(Exp, self).__init__( validate_args=validate_args, forward_min_event_ndims=0, name=name) def _forward(self, x): return math_ops.exp(x) def _inverse(self, y): return math_ops.log(y) def _inverse_log_det_jacobian(self, y): return -self._forward_log_det_jacobian(self._inverse(y)) def _forward_log_det_jacobian(self, x): # Notice that we needn't do any reducing, even when`event_ndims > 0`. # The base Bijector class will handle reducing for us; it knows how # to do so because we called `super` `__init__` with # `forward_min_event_ndims = 0`. return x ``` - ""Affine"" ```none Y = g(X) = sqrtSigma * X + mu X ~ MultivariateNormal(0, I_d) ``` Implies: ```none g^{-1}(Y) = inv(sqrtSigma) * (Y - mu) |Jacobian(g^{-1})(y)| = det(inv(sqrtSigma)) Y ~ MultivariateNormal(mu, sqrtSigma) , i.e., prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y)) = det(sqrtSigma)^(-d) * MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d) ``` #### Min_event_ndims and Naming Bijectors are named for the dimensionality of data they act on (i.e. without broadcasting). We can think of bijectors having an intrinsic `min_event_ndims` , which is the minimum number of dimensions for the bijector act on. For instance, a Cholesky decomposition requires a matrix, and hence `min_event_ndims=2`. Some examples: `AffineScalar: min_event_ndims=0` `Affine: min_event_ndims=1` `Cholesky: min_event_ndims=2` `Exp: min_event_ndims=0` `Sigmoid: min_event_ndims=0` `SoftmaxCentered: min_event_ndims=1` Note the difference between `Affine` and `AffineScalar`. `AffineScalar` operates on scalar events, whereas `Affine` operates on vector-valued events. More generally, there is a `forward_min_event_ndims` and an `inverse_min_event_ndims`. In most cases, these will be the same. However, for some shape changing bijectors, these will be different (e.g. a bijector which pads an extra dimension at the end, might have `forward_min_event_ndims=0` and `inverse_min_event_ndims=1`. #### Jacobian Determinant The Jacobian determinant is a reduction over `event_ndims - min_event_ndims` (`forward_min_event_ndims` for `forward_log_det_jacobian` and `inverse_min_event_ndims` for `inverse_log_det_jacobian`). To see this, consider the `Exp` `Bijector` applied to a `Tensor` which has sample, batch, and event (S, B, E) shape semantics. Suppose the `Tensor`'s partitioned-shape is `(S=[4], B=[2], E=[3, 3])`. The shape of the `Tensor` returned by `forward` and `inverse` is unchanged, i.e., `[4, 2, 3, 3]`. However the shape returned by `inverse_log_det_jacobian` is `[4, 2]` because the Jacobian determinant is a reduction over the event dimensions. Another example is the `Affine` `Bijector`. Because `min_event_ndims = 1`, the Jacobian determinant reduction is over `event_ndims - 1`. It is sometimes useful to implement the inverse Jacobian determinant as the negative forward Jacobian determinant. For example, ```python def _inverse_log_det_jacobian(self, y): return -self._forward_log_det_jac(self._inverse(y)) # Note negation. ``` The correctness of this approach can be seen from the following claim. - Claim: Assume `Y = g(X)` is a bijection whose derivative exists and is nonzero for its domain, i.e., `dY/dX = d/dX g(X) != 0`. Then: ```none (log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X) ``` - Proof: From the bijective, nonzero differentiability of `g`, the [inverse function theorem]( https://en.wikipedia.org/wiki/Inverse_function_theorem) implies `g^{-1}` is differentiable in the image of `g`. Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields `I = g'(g^{-1}(y))*g^{-1}'(y)`. The same theorem also implies `g^{-1}'` is non-singular therefore: `inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`. The claim follows from [properties of determinant]( https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups). Generally its preferable to directly implement the inverse Jacobian determinant. This should have superior numerical stability and will often share subgraphs with the `_inverse` implementation. #### Is_constant_jacobian Certain bijectors will have constant jacobian matrices. For instance, the `Affine` bijector encodes multiplication by a matrix plus a shift, with jacobian matrix, the same aforementioned matrix. `is_constant_jacobian` encodes the fact that the jacobian matrix is constant. The semantics of this argument are the following: * Repeated calls to ""log_det_jacobian"" functions with the same `event_ndims` (but not necessarily same input), will return the first computed jacobian (because the matrix is constant, and hence is input independent). * `log_det_jacobian` implementations are merely broadcastable to the true `log_det_jacobian` (because, again, the jacobian matrix is input independent). Specifically, `log_det_jacobian` is implemented as the log jacobian determinant for a single input. ```python class Identity(Bijector): def __init__(self, validate_args=False, name=""identity""): super(Identity, self).__init__( is_constant_jacobian=True, validate_args=validate_args, forward_min_event_ndims=0, name=name) def _forward(self, x): return x def _inverse(self, y): return y def _inverse_log_det_jacobian(self, y): return -self._forward_log_det_jacobian(self._inverse(y)) def _forward_log_det_jacobian(self, x): # The full log jacobian determinant would be array_ops.zero_like(x). # However, we circumvent materializing that, since the jacobian # calculation is input independent, and we specify it for one input. return constant_op.constant(0., x.dtype.base_dtype) ``` #### Subclass Requirements - Subclasses typically implement: - `_forward`, - `_inverse`, - `_inverse_log_det_jacobian`, - `_forward_log_det_jacobian` (optional). The `_forward_log_det_jacobian` is called when the bijector is inverted via the `Invert` bijector. If undefined, a slightly less efficiently calculation, `-1 * _inverse_log_det_jacobian`, is used. If the bijector changes the shape of the input, you must also implement: - _forward_event_shape_tensor, - _forward_event_shape (optional), - _inverse_event_shape_tensor, - _inverse_event_shape (optional). By default the event-shape is assumed unchanged from input. - If the `Bijector`'s use is limited to `TransformedDistribution` (or friends like `QuantizedDistribution`) then depending on your use, you may not need to implement all of `_forward` and `_inverse` functions. Examples: 1. Sampling (e.g., `sample`) only requires `_forward`. 2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require `_inverse` (and related). 3. Only calling probability functions on the output of `sample` means `_inverse` can be implemented as a cache lookup. See ""Example Uses"" [above] which shows how these functions are used to transform a distribution. (Note: `_forward` could theoretically be implemented as a cache lookup but this would require controlling the underlying sample generation mechanism.) #### Non Injective Transforms **WARNING** Handing of non-injective transforms is subject to change. Non injective maps `g` are supported, provided their domain `D` can be partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that, ignoring sets of measure zero, the restriction of `g` to each subset is a differentiable bijection onto `g(D)`. In particular, this implies that for `y in g(D)`, the set inverse, i.e. `g^{-1}(y) = {x in D : g(x) = y}`, always contains exactly `k` distinct points. The property, `_is_injective` is set to `False` to indicate that the bijector is not injective, yet satisfies the above condition. The usual bijector API is modified in the case `_is_injective is False` (see method docstrings for specifics). Here we show by example the `AbsoluteValue` bijector. In this case, the domain `D = (-inf, inf)`, can be partitioned into `D1 = (-inf, 0)`, `D2 = {0}`, and `D3 = (0, inf)`. Let `gi` be the restriction of `g` to `Di`, then both `g1` and `g3` are bijections onto `(0, inf)`, with `g1^{-1}(y) = -y`, and `g3^{-1}(y) = y`. We will use `g1` and `g3` to define bijector methods over `D1` and `D3`. `D2 = {0}` is an oddball in that `g2` is one to one, and the derivative is not well defined. Fortunately, when considering transformations of probability densities (e.g. in `TransformedDistribution`), sets of measure zero have no effect in theory, and only a small effect in 32 or 64 bit precision. For that reason, we define `inverse(0)` and `inverse_log_det_jacobian(0)` both as `[0, 0]`, which is convenient and results in a left-semicontinuous pdf. ```python abs = tfp.distributions.bijectors.AbsoluteValue() abs.forward(-1.) ==> 1. abs.forward(1.) ==> 1. abs.inverse(1.) ==> (-1., 1.) # The |dX/dY| is constant, == 1. So Log|dX/dY| == 0. abs.inverse_log_det_jacobian(1., event_ndims=0) ==> (0., 0.) # Special case handling of 0. abs.inverse(0.) ==> (0., 0.) abs.inverse_log_det_jacobian(0., event_ndims=0) ==> (0., 0.) ```" 10182,assert_finite,tensorflow/tensorflow/python/ops/distributions/bijector_test_util.py,28,function, 10183,assert_strictly_increasing,tensorflow/tensorflow/python/ops/distributions/bijector_test_util.py,33,function, 10184,assert_strictly_decreasing,tensorflow/tensorflow/python/ops/distributions/bijector_test_util.py,37,function, 10185,assert_strictly_monotonic,tensorflow/tensorflow/python/ops/distributions/bijector_test_util.py,41,function, 10186,assert_scalar_congruency,tensorflow/tensorflow/python/ops/distributions/bijector_test_util.py,48,function,"Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent. We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the `bijector` in order to check that: 1. the forward is strictly monotonic. 2. the forward/inverse methods are inverses of each other. 3. the jacobian is the correct change of measure. This can only be used for a Bijector mapping open subsets of the real line to themselves. This is due to the fact that this test compares the `prob` before/after transformation with the Lebesgue measure on the line. Args: bijector: Instance of Bijector lower_x: Python scalar. upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in the domain of the `bijector`. The `bijector` should probably not produce huge variation in values in the interval `(lower_x, upper_x)`, or else the variance based check of the Jacobian will require small `rtol` or huge `n`. n: Number of samples to draw for the checks. rtol: Positive number. Used for the Jacobian check. sess: `tf.compat.v1.Session`. Defaults to the default session. Raises: AssertionError: If tests fail." 10187,assert_bijective_and_finite,tensorflow/tensorflow/python/ops/distributions/bijector_test_util.py,163,function,"Assert that forward/inverse (along with jacobians) are inverses and finite. It is recommended to use x and y values that are very very close to the edge of the Bijector's domain. Args: bijector: A Bijector instance. x: np.array of values in the domain of bijector.forward. y: np.array of values in the domain of bijector.inverse. event_ndims: Integer describing the number of event dimensions this bijector operates on. atol: Absolute tolerance. rtol: Relative tolerance. sess: TensorFlow session. Defaults to the default session. Raises: AssertionError: If tests fail." 10188,_broadcast_cat_event_and_params,tensorflow/tensorflow/python/ops/distributions/categorical.py,36,function,Broadcasts the event or distribution parameters. 10189,Categorical,tensorflow/tensorflow/python/ops/distributions/categorical.py,63,class,"Categorical distribution. The Categorical distribution is parameterized by either probabilities or log-probabilities of a set of `K` classes. It is defined over the integers `{0, 1, ..., K}`. The Categorical distribution is closely related to the `OneHotCategorical` and `Multinomial` distributions. The Categorical distribution can be intuited as generating samples according to `argmax{ OneHotCategorical(probs) }` itself being identical to `argmax{ Multinomial(probs, total_count=1) }`. #### Mathematical Details The probability mass function (pmf) is, ```none pmf(k; pi) = prod_j pi_j**[k == j] ``` #### Pitfalls The number of classes, `K`, must not exceed: - the largest integer representable by `self.dtype`, i.e., `2**(mantissa_bits+1)` (IEEE 754), - the maximum `Tensor` index, i.e., `2**31-1`. In other words, ```python K <= min(2**31-1, { tf.float16: 2**11, tf.float32: 2**24, tf.float64: 2**53 }[param.dtype]) ``` Note: This condition is validated only when `self.validate_args = True`. #### Examples Creates a 3-class distribution with the 2nd class being most likely. ```python dist = Categorical(probs=[0.1, 0.5, 0.4]) n = 1e4 empirical_prob = tf.cast( tf.histogram_fixed_width( dist.sample(int(n)), [0., 2], nbins=3), dtype=tf.float32) / n # ==> array([ 0.1005, 0.5037, 0.3958], dtype=float32) ``` Creates a 3-class distribution with the 2nd class being most likely. Parameterized by [logits](https://en.wikipedia.org/wiki/Logit) rather than probabilities. ```python dist = Categorical(logits=np.log([0.1, 0.5, 0.4]) n = 1e4 empirical_prob = tf.cast( tf.histogram_fixed_width( dist.sample(int(n)), [0., 2], nbins=3), dtype=tf.float32) / n # ==> array([0.1045, 0.5047, 0.3908], dtype=float32) ``` Creates a 3-class distribution with the 3rd class being most likely. The distribution functions can be evaluated on counts. ```python # counts is a scalar. p = [0.1, 0.4, 0.5] dist = Categorical(probs=p) dist.prob(0) # Shape [] # p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts. counts = [1, 0] dist.prob(counts) # Shape [2] # p will be broadcast to shape [3, 5, 7, 3] to match counts. counts = [[...]] # Shape [5, 7, 3] dist.prob(counts) # Shape [5, 7, 3] ```" 10190,_kl_categorical_categorical,tensorflow/tensorflow/python/ops/distributions/categorical.py,329,function,"Calculate the batched KL divergence KL(a || b) with a and b Categorical. Args: a: instance of a Categorical distribution object. b: instance of a Categorical distribution object. name: (optional) Name to use for created operations. default is ""kl_categorical_categorical"". Returns: Batchwise KL(a || b)" 10191,Dirichlet,tensorflow/tensorflow/python/ops/distributions/dirichlet.py,49,class,"Dirichlet distribution. The Dirichlet distribution is defined over the [`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive, length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the Beta distribution when `k = 2`. #### Mathematical Details The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e., ```none S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }. ``` The probability density function (pdf) is, ```none pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j) ``` where: * `x in S^{k-1}`, i.e., the `(k-1)`-simplex, * `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`, * `Z` is the normalization constant aka the [multivariate beta function]( https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function), and, * `Gamma` is the [gamma function]( https://en.wikipedia.org/wiki/Gamma_function). The `concentration` represents mean total counts of class occurrence, i.e., ```none concentration = alpha = mean * total_concentration ``` where `mean` in `S^{k-1}` and `total_concentration` is a positive real number representing a mean total count. Distribution parameters are automatically broadcast in all functions; see examples for details. Warning: Some components of the samples can be zero due to finite precision. This happens more often when some of the concentrations are very small. Make sure to round the samples to `np.finfo(dtype).tiny` before computing the density. Samples of this distribution are reparameterized (pathwise differentiable). The derivatives are computed using the approach described in (Figurnov et al., 2018). #### Examples ```python import tensorflow_probability as tfp tfd = tfp.distributions # Create a single trivariate Dirichlet, with the 3rd class being three times # more frequent than the first. I.e., batch_shape=[], event_shape=[3]. alpha = [1., 2, 3] dist = tfd.Dirichlet(alpha) dist.sample([4, 5]) # shape: [4, 5, 3] # x has one sample, one batch, three classes: x = [.2, .3, .5] # shape: [3] dist.prob(x) # shape: [] # x has two samples from one batch: x = [[.1, .4, .5], [.2, .3, .5]] dist.prob(x) # shape: [2] # alpha will be broadcast to shape [5, 7, 3] to match x. x = [[...]] # shape: [5, 7, 3] dist.prob(x) # shape: [5, 7] ``` ```python # Create batch_shape=[2], event_shape=[3]: alpha = [[1., 2, 3], [4, 5, 6]] # shape: [2, 3] dist = tfd.Dirichlet(alpha) dist.sample([4, 5]) # shape: [4, 5, 2, 3] x = [.2, .3, .5] # x will be broadcast as [[.2, .3, .5], # [.2, .3, .5]], # thus matching batch_shape [2, 3]. dist.prob(x) # shape: [2] ``` Compute the gradients of samples w.r.t. the parameters: ```python alpha = tf.constant([1.0, 2.0, 3.0]) dist = tfd.Dirichlet(alpha) samples = dist.sample(5) # Shape [5, 3] loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function # Unbiased stochastic gradients of the loss function grads = tf.gradients(loss, alpha) ``` References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) ([pdf] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))" 10192,_kl_dirichlet_dirichlet,tensorflow/tensorflow/python/ops/distributions/dirichlet.py,339,function,"Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet. Args: d1: instance of a Dirichlet distribution object. d2: instance of a Dirichlet distribution object. name: (optional) Name to use for created operations. default is ""kl_dirichlet_dirichlet"". Returns: Batchwise KL(d1 || d2)" 10193,DirichletMultinomial,tensorflow/tensorflow/python/ops/distributions/dirichlet_multinomial.py,55,class,"Dirichlet-Multinomial compound distribution. The Dirichlet-Multinomial distribution is parameterized by a (batch of) length-`K` `concentration` vectors (`K > 1`) and a `total_count` number of trials, i.e., the number of trials per draw from the DirichletMultinomial. It is defined over a (batch of) length-`K` vector `counts` such that `tf.reduce_sum(counts, -1) = total_count`. The Dirichlet-Multinomial is identically the Beta-Binomial distribution when `K = 2`. #### Mathematical Details The Dirichlet-Multinomial is a distribution over `K`-class counts, i.e., a length-`K` vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`. The probability mass function (pmf) is, ```none pmf(n; alpha, N) = Beta(alpha + n) / (prod_j n_j!) / Z Z = Beta(alpha) / N! ``` where: * `concentration = alpha = [alpha_0, ..., alpha_{K-1}]`, `alpha_j > 0`, * `total_count = N`, `N` a positive integer, * `N!` is `N` factorial, and, * `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the [multivariate beta function]( https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function), and, * `Gamma` is the [gamma function]( https://en.wikipedia.org/wiki/Gamma_function). Dirichlet-Multinomial is a [compound distribution]( https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e., its samples are generated as follows. 1. Choose class probabilities: `probs = [p_0,...,p_{K-1}] ~ Dir(concentration)` 2. Draw integers: `counts = [n_0,...,n_{K-1}] ~ Multinomial(total_count, probs)` The last `concentration` dimension parametrizes a single Dirichlet-Multinomial distribution. When calling distribution functions (e.g., `dist.prob(counts)`), `concentration`, `total_count` and `counts` are broadcast to the same shape. The last dimension of `counts` corresponds single Dirichlet-Multinomial distributions. Distribution parameters are automatically broadcast in all functions; see examples for details. #### Pitfalls The number of classes, `K`, must not exceed: - the largest integer representable by `self.dtype`, i.e., `2**(mantissa_bits+1)` (IEE754), - the maximum `Tensor` index, i.e., `2**31-1`. In other words, ```python K <= min(2**31-1, { tf.float16: 2**11, tf.float32: 2**24, tf.float64: 2**53 }[param.dtype]) ``` Note: This condition is validated only when `self.validate_args = True`. #### Examples ```python alpha = [1., 2., 3.] n = 2. dist = DirichletMultinomial(n, alpha) ``` Creates a 3-class distribution, with the 3rd class is most likely to be drawn. The distribution functions can be evaluated on counts. ```python # counts same shape as alpha. counts = [0., 0., 2.] dist.prob(counts) # Shape [] # alpha will be broadcast to [[1., 2., 3.], [1., 2., 3.]] to match counts. counts = [[1., 1., 0.], [1., 0., 1.]] dist.prob(counts) # Shape [2] # alpha will be broadcast to shape [5, 7, 3] to match counts. counts = [[...]] # Shape [5, 7, 3] dist.prob(counts) # Shape [5, 7] ``` Creates a 2-batch of 3-class distributions. ```python alpha = [[1., 2., 3.], [4., 5., 6.]] # Shape [2, 3] n = [3., 3.] dist = DirichletMultinomial(n, alpha) # counts will be broadcast to [[2., 1., 0.], [2., 1., 0.]] to match alpha. counts = [2., 1., 0.] dist.prob(counts) # Shape [2] ```" 10194,_BaseDistribution,tensorflow/tensorflow/python/ops/distributions/distribution.py,73,class,Abstract base class needed for resolving subclass hierarchy. 10195,_copy_fn,tensorflow/tensorflow/python/ops/distributions/distribution.py,78,function,"Create a deep copy of fn. Args: fn: a callable Returns: A `FunctionType`: a deep copy of fn. Raises: TypeError: if `fn` is not a callable." 10196,_update_docstring,tensorflow/tensorflow/python/ops/distributions/distribution.py,110,function,"Update old_str by inserting append_str just before the ""Args:"" section." 10197,_convert_to_tensor,tensorflow/tensorflow/python/ops/distributions/distribution.py,132,function,Converts to tensor avoiding an eager bug that loses float precision. 10198,_DistributionMeta,tensorflow/tensorflow/python/ops/distributions/distribution.py,144,class, 10199,ReparameterizationType,tensorflow/tensorflow/python/ops/distributions/distribution.py,216,class,"Instances of this class represent how sampling is reparameterized. Two static instances exist in the distributions library, signifying one of two possible properties for samples from a distribution: `FULLY_REPARAMETERIZED`: Samples from the distribution are fully reparameterized, and straight-through gradients are supported. `NOT_REPARAMETERIZED`: Samples from the distribution are not fully reparameterized, and straight-through gradients are either partially unsupported or are not supported at all. In this case, for purposes of e.g. RL or variational inference, it is generally safest to wrap the sample results in a `stop_gradients` call and use policy gradients / surrogate loss instead." 10200,Distribution,tensorflow/tensorflow/python/ops/distributions/distribution.py,280,class,"A generic probability distribution base class. `Distribution` is a base class for constructing and organizing properties (e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian). #### Subclassing Subclasses are expected to implement a leading-underscore version of the same-named function. The argument signature should be identical except for the omission of `name=""...""`. For example, to enable `log_prob(value, name=""log_prob"")` a subclass should implement `_log_prob(value)`. Subclasses can append to public-level docstrings by providing docstrings for their method specializations. For example: ```python @util.AppendDocstring(""Some other details."") def _log_prob(self, value): ... ``` would add the string ""Some other details."" to the `log_prob` function docstring. This is implemented as a simple decorator to avoid python linter complaining about missing Args/Returns/Raises sections in the partial docstrings. #### Broadcasting, batching, and shapes All distributions support batches of independent distributions of that type. The batch shape is determined by broadcasting together the parameters. The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and `log_prob` reflect this broadcasting, as does the return value of `sample` and `sample_n`. `sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is the shape of the `Tensor` returned from `sample_n`, `n` is the number of samples, `batch_shape` defines how many independent distributions there are, and `event_shape` defines the shape of samples from each of those independent distributions. Samples are independent along the `batch_shape` dimensions, but not necessarily so along the `event_shape` dimensions (depending on the particulars of the underlying distribution). Using the `Uniform` distribution as an example: ```python minval = 3.0 maxval = [[4.0, 6.0], [10.0, 12.0]] # Broadcasting: # This instance represents 4 Uniform distributions. Each has a lower bound at # 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape. u = Uniform(minval, maxval) # `event_shape` is `TensorShape([])`. event_shape = u.event_shape # `event_shape_t` is a `Tensor` which will evaluate to []. event_shape_t = u.event_shape_tensor() # Sampling returns a sample per distribution. `samples` has shape # [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5, # batch_shape=[2, 2], and event_shape=[]. samples = u.sample_n(5) # The broadcasting holds across methods. Here we use `cdf` as an example. The # same holds for `log_cdf` and the likelihood functions. # `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the # shape of the `Uniform` instance. cum_prob_broadcast = u.cdf(4.0) # `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting # occurred. cum_prob_per_dist = u.cdf([[4.0, 5.0], [6.0, 7.0]]) # INVALID as the `value` argument is not broadcastable to the distribution's # shape. cum_prob_invalid = u.cdf([4.0, 5.0, 6.0]) ``` #### Shapes There are three important concepts associated with TensorFlow Distributions shapes: - Event shape describes the shape of a single draw from the distribution; it may be dependent across dimensions. For scalar distributions, the event shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is `[5]`. - Batch shape describes independent, not identically distributed draws, aka a ""collection"" or ""bunch"" of distributions. - Sample shape describes independent, identically distributed draws of batches from the distribution family. The event shape and the batch shape are properties of a Distribution object, whereas the sample shape is associated with a specific call to `sample` or `log_prob`. For detailed usage examples of TensorFlow Distributions shapes, see [this tutorial]( https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb) #### Parameter values leading to undefined statistics or distributions. Some distributions do not have well-defined statistics for all initialization parameter values. For example, the beta distribution is parameterized by positive real numbers `concentration1` and `concentration0`, and does not have well-defined mode if `concentration1 < 1` or `concentration0 < 1`. The user is given the option of raising an exception or returning `NaN`. ```python a = tf.exp(tf.matmul(logits, weights_a)) b = tf.exp(tf.matmul(logits, weights_b)) # Will raise exception if ANY batch member has a < 1 or b < 1. dist = distributions.beta(a, b, allow_nan_stats=False) mode = dist.mode().eval() # Will return NaN for batch members with either a < 1 or b < 1. dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior mode = dist.mode().eval() ``` In all cases, an exception is raised if *invalid* parameters are passed, e.g. ```python # Will raise an exception if any Op is run. negative_a = -1.0 * a # beta distribution by definition has a > 0. dist = distributions.beta(negative_a, b, allow_nan_stats=True) dist.mean().eval() ```" 10201,Exponential,tensorflow/tensorflow/python/ops/distributions/exponential.py,41,class,"Exponential distribution. The Exponential distribution is parameterized by an event `rate` parameter. #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; lambda, x > 0) = exp(-lambda x) / Z Z = 1 / lambda ``` where `rate = lambda` and `Z` is the normalizaing constant. The Exponential distribution is a special case of the Gamma distribution, i.e., ```python Exponential(rate) = Gamma(concentration=1., rate) ``` The Exponential distribution uses a `rate` parameter, or ""inverse scale"", which can be intuited as, ```none X ~ Exponential(rate=1) Y = X / rate ```" 10202,ExponentialWithSoftplusRate,tensorflow/tensorflow/python/ops/distributions/exponential.py,147,class,Exponential with softplus transform on `rate`. 10203,Gamma,tensorflow/tensorflow/python/ops/distributions/gamma.py,47,class,"Gamma distribution. The Gamma distribution is defined over positive real numbers using parameters `concentration` (aka ""alpha"") and `rate` (aka ""beta""). #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z Z = Gamma(alpha) beta**(-alpha) ``` where: * `concentration = alpha`, `alpha > 0`, * `rate = beta`, `beta > 0`, * `Z` is the normalizing constant, and, * `Gamma` is the [gamma function]( https://en.wikipedia.org/wiki/Gamma_function). The cumulative density function (cdf) is, ```none cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha) ``` where `GammaInc` is the [lower incomplete Gamma function]( https://en.wikipedia.org/wiki/Incomplete_gamma_function). The parameters can be intuited via their relationship to mean and stddev, ```none concentration = alpha = (mean / stddev)**2 rate = beta = mean / stddev**2 = concentration / mean ``` Distribution parameters are automatically broadcast in all functions; see examples for details. Warning: The samples of this distribution are always non-negative. However, the samples that are smaller than `np.finfo(dtype).tiny` are rounded to this value, so it appears more often than it should. This should only be noticeable when the `concentration` is very small, or the `rate` is very large. See note in `tf.random.gamma` docstring. Samples of this distribution are reparameterized (pathwise differentiable). The derivatives are computed using the approach described in (Figurnov et al., 2018). #### Examples ```python import tensorflow_probability as tfp tfd = tfp.distributions dist = tfd.Gamma(concentration=3.0, rate=2.0) dist2 = tfd.Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0]) ``` Compute the gradients of samples w.r.t. the parameters: ```python concentration = tf.constant(3.0) rate = tf.constant(2.0) dist = tfd.Gamma(concentration, rate) samples = dist.sample(5) # Shape [5] loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function # Unbiased stochastic gradients of the loss function grads = tf.gradients(loss, [concentration, rate]) ``` References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) ([pdf](http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))" 10204,GammaWithSoftplusConcentrationRate,tensorflow/tensorflow/python/ops/distributions/gamma.py,291,class,`Gamma` with softplus of `concentration` and `rate`. 10205,_kl_gamma_gamma,tensorflow/tensorflow/python/ops/distributions/gamma.py,318,function,"Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma. Args: g0: instance of a Gamma distribution object. g1: instance of a Gamma distribution object. name: (optional) Name to use for created operations. Default is ""kl_gamma_gamma"". Returns: kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1)." 10206,Identity,tensorflow/tensorflow/python/ops/distributions/identity_bijector.py,31,class,"Compute Y = g(X) = X. Example Use: ```python # Create the Y=g(X)=X transform which is intended for Tensors with 1 batch # ndim and 1 event ndim (i.e., vector of vectors). identity = Identity() x = [[1., 2], [3, 4]] x == identity.forward(x) == identity.inverse(x) ```" 10207,_registered_kl,tensorflow/tensorflow/python/ops/distributions/kullback_leibler.py,39,function,Get the KL function registered for classes a and b. 10208,kl_divergence,tensorflow/tensorflow/python/ops/distributions/kullback_leibler.py,64,function,"Get the KL-divergence KL(distribution_a || distribution_b). If there is no KL method registered specifically for `type(distribution_a)` and `type(distribution_b)`, then the class hierarchies of these types are searched. If one KL method is registered between any pairs of classes in these two parent hierarchies, it is used. If more than one such registered method exists, the method whose registered classes have the shortest sum MRO paths to the input types is used. If more than one such shortest path exists, the first method identified in the search is used (favoring a shorter MRO distance to `type(distribution_a)`). Args: distribution_a: The first distribution. distribution_b: The second distribution. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value ""`NaN`"" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Returns: A Tensor with the batchwise KL-divergence between `distribution_a` and `distribution_b`. Raises: NotImplementedError: If no KL method is defined for distribution types of `distribution_a` and `distribution_b`." 10209,cross_entropy,tensorflow/tensorflow/python/ops/distributions/kullback_leibler.py,132,function,"Computes the (Shannon) cross entropy. Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q` are absolutely continuous with respect to one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as: ```none H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x) ``` where `F` denotes the support of the random variable `X ~ P`. Args: ref: `tfd.Distribution` instance. other: `tfd.Distribution` instance. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value ""`NaN`"" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` prepended to names of ops created by this function. Returns: cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]` representing `n` different calculations of (Shanon) cross entropy." 10210,RegisterKL,tensorflow/tensorflow/python/ops/distributions/kullback_leibler.py,165,class,"Decorator to register a KL divergence implementation function. Usage: @distributions.RegisterKL(distributions.Normal, distributions.Normal) def _kl_normal_mvn(norm_a, norm_b): # Return KL(norm_a || norm_b)" 10211,Laplace,tensorflow/tensorflow/python/ops/distributions/laplace.py,47,class,"The Laplace distribution with location `loc` and `scale` parameters. #### Mathematical details The probability density function (pdf) of this distribution is, ```none pdf(x; mu, sigma) = exp(-|x - mu| / sigma) / Z Z = 2 sigma ``` where `loc = mu`, `scale = sigma`, and `Z` is the normalization constant. Note that the Laplace distribution can be thought of two exponential distributions spliced together ""back-to-back."" The Lpalce distribution is a member of the [location-scale family]( https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be constructed as, ```none X ~ Laplace(loc=0, scale=1) Y = loc + scale * X ```" 10212,LaplaceWithSoftplusScale,tensorflow/tensorflow/python/ops/distributions/laplace.py,220,class,Laplace with softplus applied to `scale`. 10213,Multinomial,tensorflow/tensorflow/python/ops/distributions/multinomial.py,56,class,"Multinomial distribution. This Multinomial distribution is parameterized by `probs`, a (batch of) length-`K` `prob` (probability) vectors (`K > 1`) such that `tf.reduce_sum(probs, -1) = 1`, and a `total_count` number of trials, i.e., the number of trials per draw from the Multinomial. It is defined over a (batch of) length-`K` vector `counts` such that `tf.reduce_sum(counts, -1) = total_count`. The Multinomial is identically the Binomial distribution when `K = 2`. #### Mathematical Details The Multinomial is a distribution over `K`-class counts, i.e., a length-`K` vector of non-negative integer `counts = n = [n_0, ..., n_{K-1}]`. The probability mass function (pmf) is, ```none pmf(n; pi, N) = prod_j (pi_j)**n_j / Z Z = (prod_j n_j!) / N! ``` where: * `probs = pi = [pi_0, ..., pi_{K-1}]`, `pi_j > 0`, `sum_j pi_j = 1`, * `total_count = N`, `N` a positive integer, * `Z` is the normalization constant, and, * `N!` denotes `N` factorial. Distribution parameters are automatically broadcast in all functions; see examples for details. #### Pitfalls The number of classes, `K`, must not exceed: - the largest integer representable by `self.dtype`, i.e., `2**(mantissa_bits+1)` (IEE754), - the maximum `Tensor` index, i.e., `2**31-1`. In other words, ```python K <= min(2**31-1, { tf.float16: 2**11, tf.float32: 2**24, tf.float64: 2**53 }[param.dtype]) ``` Note: This condition is validated only when `self.validate_args = True`. #### Examples Create a 3-class distribution, with the 3rd class is most likely to be drawn, using logits. ```python logits = [-50., -43, 0] dist = Multinomial(total_count=4., logits=logits) ``` Create a 3-class distribution, with the 3rd class is most likely to be drawn. ```python p = [.2, .3, .5] dist = Multinomial(total_count=4., probs=p) ``` The distribution functions can be evaluated on counts. ```python # counts same shape as p. counts = [1., 0, 3] dist.prob(counts) # Shape [] # p will be broadcast to [[.2, .3, .5], [.2, .3, .5]] to match counts. counts = [[1., 2, 1], [2, 2, 0]] dist.prob(counts) # Shape [2] # p will be broadcast to shape [5, 7, 3] to match counts. counts = [[...]] # Shape [5, 7, 3] dist.prob(counts) # Shape [5, 7] ``` Create a 2-batch of 3-class distributions. ```python p = [[.1, .2, .7], [.3, .3, .4]] # Shape [2, 3] dist = Multinomial(total_count=[4., 5], probs=p) counts = [[2., 1, 1], [3, 1, 1]] dist.prob(counts) # Shape [2] dist.sample(5) # Shape [5, 2, 3] ```" 10214,Normal,tensorflow/tensorflow/python/ops/distributions/normal.py,46,class,"The Normal distribution with location `loc` and `scale` parameters. #### Mathematical details The probability density function (pdf) is, ```none pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z Z = (2 pi sigma**2)**0.5 ``` where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z` is the normalization constant. The Normal distribution is a member of the [location-scale family]( https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be constructed as, ```none X ~ Normal(loc=0, scale=1) Y = loc + scale * X ``` #### Examples Examples of initialization of one or a batch of distributions. ```python import tensorflow_probability as tfp tfd = tfp.distributions # Define a single scalar Normal distribution. dist = tfd.Normal(loc=0., scale=3.) # Evaluate the cdf at 1, returning a scalar. dist.cdf(1.) # Define a batch of two scalar valued Normals. # The first has mean 1 and standard deviation 11, the second 2 and 22. dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.]) # Evaluate the pdf of the first distribution on 0, and the second on 1.5, # returning a length two tensor. dist.prob([0, 1.5]) # Get 3 samples, returning a 3 x 2 tensor. dist.sample([3]) ``` Arguments are broadcast when possible. ```python # Define a batch of two scalar valued Normals. # Both have mean 1, but different standard deviations. dist = tfd.Normal(loc=1., scale=[11, 22.]) # Evaluate the pdf of both distributions on the same point, 3.0, # returning a length 2 tensor. dist.prob(3.0) ```" 10215,NormalWithSoftplusScale,tensorflow/tensorflow/python/ops/distributions/normal.py,249,class,Normal with softplus applied to `scale`. 10216,_kl_normal_normal,tensorflow/tensorflow/python/ops/distributions/normal.py,275,function,"Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal. Args: n_a: instance of a Normal distribution object. n_b: instance of a Normal distribution object. name: (optional) Name to use for created operations. default is ""kl_normal_normal"". Returns: Batchwise KL(n_a || n_b)" 10217,ndtr,tensorflow/tensorflow/python/ops/distributions/special_math.py,111,function,"Normal distribution function. Returns the area under the Gaussian probability density function, integrated from minus infinity to x: ``` 1 / x ndtr(x) = ---------- | exp(-0.5 t**2) dt sqrt(2 pi) /-inf = 0.5 (1 + erf(x / sqrt(2))) = 0.5 erfc(x / sqrt(2)) ``` Args: x: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default=""ndtr""). Returns: ndtr: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x` is not floating-type." 10218,_ndtr,tensorflow/tensorflow/python/ops/distributions/special_math.py,146,function,Implements ndtr core logic. 10219,ndtri,tensorflow/tensorflow/python/ops/distributions/special_math.py,159,function,"The inverse of the CDF of the Normal distribution function. Returns x such that the area under the pdf from minus infinity to x is equal to p. A piece-wise rational approximation is done for the function. This is a port of the implementation in netlib. Args: p: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default=""ndtri""). Returns: x: `Tensor` with `dtype=p.dtype`. Raises: TypeError: if `p` is not floating-type." 10220,_ndtri,tensorflow/tensorflow/python/ops/distributions/special_math.py,188,function,Implements ndtri core logic. 10221,log_ndtr,tensorflow/tensorflow/python/ops/distributions/special_math.py,282,function,"Log Normal distribution function. For details of the Normal distribution function see `ndtr`. This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or using an asymptotic series. Specifically: - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on `log(1-x) ~= -x, x << 1`. - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique and take a log. - For `x <= lower_segment`, we use the series approximation of erf to compute the log CDF directly. The `lower_segment` is set based on the precision of the input: ``` lower_segment = { -20, x.dtype=float64 { -10, x.dtype=float32 upper_segment = { 8, x.dtype=float64 { 5, x.dtype=float32 ``` When `x < lower_segment`, the `ndtr` asymptotic series approximation is: ``` ndtr(x) = scale * (1 + sum) + R_N scale = exp(-0.5 x**2) / (-x sqrt(2 pi)) sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N} R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3}) ``` where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a [double-factorial](https://en.wikipedia.org/wiki/Double_factorial). Args: x: `Tensor` of type `float32`, `float64`. series_order: Positive Python `integer`. Maximum depth to evaluate the asymptotic expansion. This is the `N` above. name: Python string. A name for the operation (default=""log_ndtr""). Returns: log_ndtr: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x.dtype` is not handled. TypeError: if `series_order` is a not Python `integer.` ValueError: if `series_order` is not in `[0, 30]`." 10222,_log_ndtr_lower,tensorflow/tensorflow/python/ops/distributions/special_math.py,374,function,"Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`." 10223,_log_ndtr_asymptotic_series,tensorflow/tensorflow/python/ops/distributions/special_math.py,382,function,Calculates the asymptotic series used in log_ndtr. 10224,erfinv,tensorflow/tensorflow/python/ops/distributions/special_math.py,401,function,"The inverse function for erf, the error function. Args: x: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default=""erfinv""). Returns: x: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x` is not floating-type." 10225,_double_factorial,tensorflow/tensorflow/python/ops/distributions/special_math.py,424,function,The double factorial function for small Python integer `n`. 10226,log_cdf_laplace,tensorflow/tensorflow/python/ops/distributions/special_math.py,429,function,"Log Laplace distribution function. This function calculates `Log[L(x)]`, where `L(x)` is the cumulative distribution function of the Laplace distribution, i.e. ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt``` For numerical accuracy, `L(x)` is computed in different ways depending on `x`, ``` x <= 0: Log[L(x)] = Log[0.5] + x, which is exact 0 < x: Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact ``` Args: x: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default=""log_ndtr""). Returns: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x.dtype` is not handled." 10227,StudentT,tensorflow/tensorflow/python/ops/distributions/student_t.py,47,class,"Student's t-distribution. This distribution has parameters: degree of freedom `df`, location `loc`, and `scale`. #### Mathematical details The probability density function (pdf) is, ```none pdf(x; df, mu, sigma) = (1 + y**2 / df)**(-0.5 (df + 1)) / Z where, y = (x - mu) / sigma Z = abs(sigma) sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) ``` where: * `loc = mu`, * `scale = sigma`, and, * `Z` is the normalization constant, and, * `Gamma` is the [gamma function]( https://en.wikipedia.org/wiki/Gamma_function). The StudentT distribution is a member of the [location-scale family]( https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be constructed as, ```none X ~ StudentT(df, loc=0, scale=1) Y = loc + scale * X ``` Notice that `scale` has semantics more similar to standard deviation than variance. However it is not actually the std. deviation; the Student's t-distribution std. dev. is `scale sqrt(df / (df - 2))` when `df > 2`. Samples of this distribution are reparameterized (pathwise differentiable). The derivatives are computed using the approach described in (Figurnov et al., 2018). #### Examples Examples of initialization of one or a batch of distributions. ```python import tensorflow_probability as tfp tfd = tfp.distributions # Define a single scalar Student t distribution. single_dist = tfd.StudentT(df=3) # Evaluate the pdf at 1, returning a scalar Tensor. single_dist.prob(1.) # Define a batch of two scalar valued Student t's. # The first has degrees of freedom 2, mean 1, and scale 11. # The second 3, 2 and 22. multi_dist = tfd.StudentT(df=[2, 3], loc=[1, 2.], scale=[11, 22.]) # Evaluate the pdf of the first distribution on 0, and the second on 1.5, # returning a length two tensor. multi_dist.prob([0, 1.5]) # Get 3 samples, returning a 3 x 2 tensor. multi_dist.sample(3) ``` Arguments are broadcast when possible. ```python # Define a batch of two Student's t distributions. # Both have df 2 and mean 1, but different scales. dist = tfd.StudentT(df=2, loc=1, scale=[11, 22.]) # Evaluate the pdf of both distributions on the same point, 3.0, # returning a length 2 tensor. dist.prob(3.0) ``` Compute the gradients of samples w.r.t. the parameters: ```python df = tf.constant(2.0) loc = tf.constant(2.0) scale = tf.constant(11.0) dist = tfd.StudentT(df=df, loc=loc, scale=scale) samples = dist.sample(5) # Shape [5] loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function # Unbiased stochastic gradients of the loss function grads = tf.gradients(loss, [df, loc, scale]) ``` References: Implicit Reparameterization Gradients: [Figurnov et al., 2018] (http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients) ([pdf](http://papers.nips.cc/paper/7326-implicit-reparameterization-gradients.pdf))" 10228,StudentTWithAbsDfSoftplusScale,tensorflow/tensorflow/python/ops/distributions/student_t.py,371,class,StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`. 10229,_static_value,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,45,function,Returns the static value of a `Tensor` or `None`. 10230,_logical_and,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,50,function,Convenience function which attempts to statically `reduce_all`. 10231,_logical_equal,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,62,function,Convenience function which attempts to statically compute `x == y`. 10232,_logical_not,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,71,function,Convenience function which attempts to statically apply `logical_not`. 10233,_concat_vectors,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,79,function,Convenience function which concatenates input vectors. 10234,_pick_scalar_condition,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,87,function,Convenience function which chooses the condition based on the predicate. 10235,_ones_like,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,98,function,Convenience function attempts to statically construct `ones_like`. 10236,_ndims_from_shape,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,106,function,Returns `Tensor`'s `rank` implied by a `Tensor` shape. 10237,_is_scalar_from_shape,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,117,function,Returns `True` `Tensor` if `Tensor` shape implies a scalar. 10238,TransformedDistribution,tensorflow/tensorflow/python/ops/distributions/transformed_distribution.py,122,class,"A Transformed Distribution. A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`, and a deterministic, invertible, differentiable transform, `Y = g(X)`. The transform is typically an instance of the `Bijector` class and the base distribution is typically an instance of the `Distribution` class. A `Bijector` is expected to implement the following functions: - `forward`, - `inverse`, - `inverse_log_det_jacobian`. The semantics of these functions are outlined in the `Bijector` documentation. We now describe how a `TransformedDistribution` alters the input/outputs of a `Distribution` associated with a random variable (rv) `X`. Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function of random variable `Y`; write the probability density function `pdf(Y=y) := d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y` evaluated at `y`. Assume that `Y = g(X)` where `g` is a deterministic diffeomorphism, i.e., a non-random, continuous, differentiable, and invertible function. Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for the Jacobian of `g` evaluated at `x`. A `TransformedDistribution` implements the following operations: * `sample` Mathematically: `Y = g(X)` Programmatically: `bijector.forward(distribution.sample(...))` * `log_prob` Mathematically: `(log o pdf)(Y=y) = (log o pdf o g^{-1})(y) + (log o abs o det o J o g^{-1})(y)` Programmatically: `(distribution.log_prob(bijector.inverse(y)) + bijector.inverse_log_det_jacobian(y))` * `log_cdf` Mathematically: `(log o cdf)(Y=y) = (log o cdf o g^{-1})(y)` Programmatically: `distribution.log_cdf(bijector.inverse(x))` * and similarly for: `cdf`, `prob`, `log_survival_function`, `survival_function`. A simple example constructing a Log-Normal distribution from a Normal distribution: ```python ds = tfp.distributions log_normal = ds.TransformedDistribution( distribution=ds.Normal(loc=0., scale=1.), bijector=ds.bijectors.Exp(), name=""LogNormalTransformedDistribution"") ``` A `LogNormal` made from callables: ```python ds = tfp.distributions log_normal = ds.TransformedDistribution( distribution=ds.Normal(loc=0., scale=1.), bijector=ds.bijectors.Inline( forward_fn=tf.exp, inverse_fn=tf.math.log, inverse_log_det_jacobian_fn=( lambda y: -tf.reduce_sum(tf.math.log(y), axis=-1)), name=""LogNormalTransformedDistribution"") ``` Another example constructing a Normal from a StandardNormal: ```python ds = tfp.distributions normal = ds.TransformedDistribution( distribution=ds.Normal(loc=0., scale=1.), bijector=ds.bijectors.Affine( shift=-1., scale_identity_multiplier=2.) name=""NormalTransformedDistribution"") ``` A `TransformedDistribution`'s batch- and event-shape are implied by the base distribution unless explicitly overridden by `batch_shape` or `event_shape` arguments. Specifying an overriding `batch_shape` (`event_shape`) is permitted only if the base distribution has scalar batch-shape (event-shape). The bijector is applied to the distribution as if the distribution possessed the overridden shape(s). The following example demonstrates how to construct a multivariate Normal as a `TransformedDistribution`. ```python ds = tfp.distributions # We will create two MVNs with batch_shape = event_shape = 2. mean = [[-1., 0], # batch:0 [0., 1]] # batch:1 chol_cov = [[[1., 0], [0, 1]], # batch:0 [[1, 0], [2, 2]]] # batch:1 mvn1 = ds.TransformedDistribution( distribution=ds.Normal(loc=0., scale=1.), bijector=ds.bijectors.Affine(shift=mean, scale_tril=chol_cov), batch_shape=[2], # Valid because base_distribution.batch_shape == []. event_shape=[2]) # Valid because base_distribution.event_shape == []. mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov) # mvn1.log_prob(x) == mvn2.log_prob(x) ```" 10239,Uniform,tensorflow/tensorflow/python/ops/distributions/uniform.py,37,class,"Uniform distribution with `low` and `high` parameters. #### Mathematical Details The probability density function (pdf) is, ```none pdf(x; a, b) = I[a <= x < b] / Z Z = b - a ``` where - `low = a`, - `high = b`, - `Z` is the normalizing constant, and - `I[predicate]` is the [indicator function]( https://en.wikipedia.org/wiki/Indicator_function) for `predicate`. The parameters `low` and `high` must be shaped in a way that supports broadcasting (e.g., `high - low` is a valid operation). #### Examples ```python # Without broadcasting: u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4] u2 = Uniform(low=[1.0, 2.0], high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4] u3 = Uniform(low=[[1.0, 2.0], [3.0, 4.0]], high=[[1.5, 2.5], [3.5, 4.5]]) # 4 distributions ``` ```python # With broadcasting: u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions ```" 10240,assert_integer_form,tensorflow/tensorflow/python/ops/distributions/util.py,40,function,"Assert that x has integer components (or floats equal to integers). Args: x: Floating-point `Tensor` data: The tensors to print out if the condition is `False`. Defaults to error message and first few entries of `x` and `y`. summarize: Print this many entries of each tensor. message: A string to prefix to the default message. int_dtype: A `tf.dtype` used to cast the float to. The default (`None`) implies the smallest possible signed int will be used for casting. name: A name for this operation (optional). Returns: Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`." 10241,assert_symmetric,tensorflow/tensorflow/python/ops/distributions/util.py,84,function, 10242,embed_check_nonnegative_integer_form,tensorflow/tensorflow/python/ops/distributions/util.py,90,function,"Assert x is a non-negative tensor, and optionally of integers." 10243,same_dynamic_shape,tensorflow/tensorflow/python/ops/distributions/util.py,108,function,"Returns whether a and b have the same dynamic shape. Args: a: `Tensor` b: `Tensor` Returns: `bool` `Tensor` representing if both tensors have the same shape." 10244,maybe_get_static_value,tensorflow/tensorflow/python/ops/distributions/util.py,139,function,"Helper which tries to return a static value. Given `x`, extract it's value statically, optionally casting to a specific dtype. If this is not possible, None is returned. Args: x: `Tensor` for which to extract a value statically. dtype: Optional dtype to cast to. Returns: Statically inferred value if possible, otherwise None." 10245,get_logits_and_probs,tensorflow/tensorflow/python/ops/distributions/util.py,164,function,"Converts logit to probabilities (or vice-versa), and returns both. Args: logits: Floating-point `Tensor` representing log-odds. probs: Floating-point `Tensor` representing probabilities. multidimensional: Python `bool`, default `False`. If `True`, represents whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]` dimensional tensor, representing the logit or probability of `shape[-1]` classes. validate_args: Python `bool`, default `False`. When `True`, either assert `0 <= probs <= 1` (if not `multidimensional`) or that the last dimension of `probs` sums to one. name: A name for this operation (optional). dtype: `tf.DType` to prefer when converting args to `Tensor`s. Returns: logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or `1`, then the corresponding entry in the returned logit will be `-Inf` and `Inf` respectively. Raises: ValueError: if neither `probs` nor `logits` were passed in, or both were." 10246,_is_known_unsigned_by_dtype,tensorflow/tensorflow/python/ops/distributions/util.py,245,function,Helper returning True if dtype is known to be unsigned. 10247,_is_known_signed_by_dtype,tensorflow/tensorflow/python/ops/distributions/util.py,254,function,Helper returning True if dtype is known to be signed. 10248,_is_known_dtype,tensorflow/tensorflow/python/ops/distributions/util.py,267,function,Helper returning True if dtype is known. 10249,_largest_integer_by_dtype,tensorflow/tensorflow/python/ops/distributions/util.py,272,function,Helper returning the largest integer exactly representable by dtype. 10250,_smallest_integer_by_dtype,tensorflow/tensorflow/python/ops/distributions/util.py,286,function,Helper returning the smallest integer exactly representable by dtype. 10251,_is_integer_like_by_dtype,tensorflow/tensorflow/python/ops/distributions/util.py,295,function,Helper returning True if dtype.is_integer or is `bool`. 10252,embed_check_categorical_event_shape,tensorflow/tensorflow/python/ops/distributions/util.py,302,function,"Embeds checks that categorical distributions don't have too many classes. A categorical-type distribution is one which, e.g., returns the class label rather than a one-hot encoding. E.g., `Categorical(probs)`. Since distributions output samples in the same dtype as the parameters, we must ensure that casting doesn't lose precision. That is, the `parameter.dtype` implies a maximum number of classes. However, since shape is `int32` and categorical variables are presumed to be indexes into a `Tensor`, we must also ensure that the number of classes is no larger than the largest possible `int32` index, i.e., `2**31-1`. In other words the number of classes, `K`, must satisfy the following condition: ```python K <= min( int(2**31 - 1), # Largest float as an index. { dtypes.float16: int(2**11), # Largest int as a float16. dtypes.float32: int(2**24), dtypes.float64: int(2**53), }.get(categorical_param.dtype.base_dtype, 0)) ``` Args: categorical_param: Floating-point `Tensor` representing parameters of distribution over categories. The rightmost shape is presumed to be the number of categories. name: A name for this operation (optional). Returns: categorical_param: Input `Tensor` with appropriate assertions embedded. Raises: TypeError: if `categorical_param` has an unknown `dtype`. ValueError: if we can statically identify `categorical_param` as being too large (for being closed under int32/float casting)." 10253,embed_check_integer_casting_closed,tensorflow/tensorflow/python/ops/distributions/util.py,397,function,"Ensures integers remain unaffected despite casting to/from int/float types. Example integer-types: `uint8`, `int32`, `bool`. Example floating-types: `float32`, `float64`. The largest possible integer representable by an IEEE754 floating-point is `2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is `2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have integer-form values can be cast to some other type without loss of precision. The smallest representable integer is the negative of the largest representable integer, except for types: `uint8`, `uint16`, `bool`. For these types, the smallest representable integer is `0`. Args: x: `Tensor` representing integer-form values. target_dtype: TF `dtype` under which `x` should have identical values. assert_nonnegative: `bool` indicating `x` should contain nonnegative values. name: A name for this operation (optional). Returns: x: Input `Tensor` with appropriate assertions embedded. Raises: TypeError: if `x` is neither integer- nor floating-type. TypeError: if `target_dtype` is neither integer- nor floating-type. TypeError: if neither `x` nor `target_dtype` are integer-type." 10254,log_combinations,tensorflow/tensorflow/python/ops/distributions/util.py,488,function,"Multinomial coefficient. Given `n` and `counts`, where `counts` has last dimension `k`, we compute the multinomial coefficient as: ```n! / sum_i n_i!``` where `i` runs over all `k` classes. Args: n: Floating-point `Tensor` broadcastable with `counts`. This represents `n` outcomes. counts: Floating-point `Tensor` broadcastable with `n`. This represents counts in `k` classes, where `k` is the last dimension of the tensor. name: A name for this operation (optional). Returns: `Tensor` representing the multinomial coefficient between `n` and `counts`." 10255,matrix_diag_transform,tensorflow/tensorflow/python/ops/distributions/util.py,522,function,"Transform diagonal of [batch-]matrix, leave rest of matrix unchanged. Create a trainable covariance defined by a Cholesky factor: ```python # Transform network layer into 2 x 2 array. matrix_values = tf.contrib.layers.fully_connected(activations, 4) matrix = tf.reshape(matrix_values, (batch_size, 2, 2)) # Make the diagonal positive. If the upper triangle was zero, this would be a # valid Cholesky factor. chol = matrix_diag_transform(matrix, transform=tf.nn.softplus) # LinearOperatorLowerTriangular ignores the upper triangle. operator = LinearOperatorLowerTriangular(chol) ``` Example of heteroskedastic 2-D linear regression. ```python tfd = tfp.distributions # Get a trainable Cholesky factor. matrix_values = tf.contrib.layers.fully_connected(activations, 4) matrix = tf.reshape(matrix_values, (batch_size, 2, 2)) chol = matrix_diag_transform(matrix, transform=tf.nn.softplus) # Get a trainable mean. mu = tf.contrib.layers.fully_connected(activations, 2) # This is a fully trainable multivariate normal! dist = tfd.MultivariateNormalTriL(mu, chol) # Standard log loss. Minimizing this will ""train"" mu and chol, and then dist # will be a distribution predicting labels as multivariate Gaussians. loss = -1 * tf.reduce_mean(dist.log_prob(labels)) ``` Args: matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are equal. transform: Element-wise function mapping `Tensors` to `Tensors`. To be applied to the diagonal of `matrix`. If `None`, `matrix` is returned unchanged. Defaults to `None`. name: A name to give created ops. Defaults to ""matrix_diag_transform"". Returns: A `Tensor` with same shape and `dtype` as `matrix`." 10256,rotate_transpose,tensorflow/tensorflow/python/ops/distributions/util.py,584,function,"Circularly moves dims left or right. Effectively identical to: ```python numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift)) ``` When `validate_args=False` additional graph-runtime checks are performed. These checks entail moving data from to GPU to CPU. Example: ```python x = tf.random.normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4]. rotate_transpose(x, -1).shape == [2, 3, 4, 1] rotate_transpose(x, -2).shape == [3, 4, 1, 2] rotate_transpose(x, 1).shape == [4, 1, 2, 3] rotate_transpose(x, 2).shape == [3, 4, 1, 2] rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1] rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3] ``` Args: x: `Tensor`. shift: `Tensor`. Number of dimensions to transpose left (shift<0) or transpose right (shift>0). name: Python `str`. The name to give this op. Returns: rotated_x: Input `Tensor` with dimensions circularly rotated by shift. Raises: TypeError: if shift is not integer type." 10257,pick_vector,tensorflow/tensorflow/python/ops/distributions/util.py,660,function,"Picks possibly different length row `Tensor`s based on condition. Value `Tensor`s should have exactly one dimension. If `cond` is a python Boolean or `tf.constant` then either `true_vector` or `false_vector` is immediately returned. I.e., no graph nodes are created and no validation happens. Args: cond: `Tensor`. Must have `dtype=tf.bool` and be scalar. true_vector: `Tensor` of one dimension. Returned when cond is `True`. false_vector: `Tensor` of one dimension. Returned when cond is `False`. name: Python `str`. The name to give this op. Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18)) # [15, 16, 17] ``` Returns: true_or_false_vector: `Tensor`. Raises: TypeError: if `cond.dtype != tf.bool` TypeError: if `cond` is not a constant and `true_vector.dtype != false_vector.dtype`" 10258,prefer_static_broadcast_shape,tensorflow/tensorflow/python/ops/distributions/util.py,706,function,"Convenience function which statically broadcasts shape when possible. Args: shape1: `1-D` integer `Tensor`. Already converted to tensor! shape2: `1-D` integer `Tensor`. Already converted to tensor! name: A string name to prepend to created ops. Returns: The broadcast shape, either as `TensorShape` (if broadcast can be done statically), or as a `Tensor`." 10259,prefer_static_rank,tensorflow/tensorflow/python/ops/distributions/util.py,751,function,"Return static rank of tensor `x` if available, else `tf.rank(x)`. Args: x: `Tensor` (already converted). Returns: Numpy array (if static rank is obtainable), else `Tensor`." 10260,prefer_static_shape,tensorflow/tensorflow/python/ops/distributions/util.py,763,function,"Return static shape of tensor `x` if available, else `tf.shape(x)`. Args: x: `Tensor` (already converted). Returns: Numpy array (if static shape is obtainable), else `Tensor`." 10261,prefer_static_value,tensorflow/tensorflow/python/ops/distributions/util.py,775,function,"Return static value of tensor `x` if available, else `x`. Args: x: `Tensor` (already converted). Returns: Numpy array (if static value is obtainable), else `Tensor`." 10262,gen_new_seed,tensorflow/tensorflow/python/ops/distributions/util.py,790,function,"Generate a new seed, from the given seed and salt." 10263,fill_triangular,tensorflow/tensorflow/python/ops/distributions/util.py,798,function,"Creates a (batch of) triangular matrix from a vector of inputs. Created matrix can be lower- or upper-triangular. (It is more efficient to create the matrix as upper or lower, rather than transpose.) Triangular matrix elements are filled in a clockwise spiral. See example, below. If `x.get_shape()` is `[b1, b2, ..., bB, d]` then the output shape is `[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e., `n = int(np.sqrt(0.25 + 2. * m) - 0.5)`. Example: ```python fill_triangular([1, 2, 3, 4, 5, 6]) # ==> [[4, 0, 0], # [6, 5, 0], # [3, 2, 1]] fill_triangular([1, 2, 3, 4, 5, 6], upper=True) # ==> [[1, 2, 3], # [0, 5, 6], # [0, 0, 4]] ``` For comparison, a pure numpy version of this function can be found in `util_test.py`, function `_fill_triangular`. Args: x: `Tensor` representing lower (or upper) triangular elements. upper: Python `bool` representing whether output matrix should be upper triangular (`True`) or lower triangular (`False`, default). name: Python `str`. The name to give this op. Returns: tril: `Tensor` with lower (or upper) triangular elements filled from `x`. Raises: ValueError: if `x` cannot be mapped to a triangular matrix." 10264,fill_triangular_inverse,tensorflow/tensorflow/python/ops/distributions/util.py,913,function,"Creates a vector from a (batch of) triangular matrix. The vector is created from the lower-triangular or upper-triangular portion depending on the value of the parameter `upper`. If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is `[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`. Example: ```python fill_triangular_inverse( [[4, 0, 0], [6, 5, 0], [3, 2, 1]]) # ==> [1, 2, 3, 4, 5, 6] fill_triangular_inverse( [[1, 2, 3], [0, 5, 6], [0, 0, 4]], upper=True) # ==> [1, 2, 3, 4, 5, 6] ``` Args: x: `Tensor` representing lower (or upper) triangular elements. upper: Python `bool` representing whether output matrix should be upper triangular (`True`) or lower triangular (`False`, default). name: Python `str`. The name to give this op. Returns: flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower (or upper) triangular elements from `x`." 10265,tridiag,tensorflow/tensorflow/python/ops/distributions/util.py,982,function,"Creates a matrix with values set above, below, and on the diagonal. Example: ```python tridiag(below=[1., 2., 3.], diag=[4., 5., 6., 7.], above=[8., 9., 10.]) # ==> array([[ 4., 8., 0., 0.], # [ 1., 5., 9., 0.], # [ 0., 2., 6., 10.], # [ 0., 0., 3., 7.]], dtype=float32) ``` Warning: This Op is intended for convenience, not efficiency. Args: below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below diagonal part. `None` is logically equivalent to `below = 0`. diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal part. `None` is logically equivalent to `diag = 0`. above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above diagonal part. `None` is logically equivalent to `above = 0`. name: Python `str`. The name to give this op. Returns: tridiag: `Tensor` with values set above, below and on the diagonal. Raises: ValueError: if all inputs are `None`." 10266,reduce_weighted_logsumexp,tensorflow/tensorflow/python/ops/distributions/util.py,1050,function,"Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`. If all weights `w` are known to be positive, it is more efficient to directly use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.math.log(w))` is more efficient than `du.reduce_weighted_logsumexp(logx, w)`. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. This function is more numerically stable than log(sum(w * exp(input))). It avoids overflows caused by taking the exp of large inputs and underflows caused by taking the log of small inputs. For example: ```python x = tf.constant([[0., 0, 0], [0, 0, 0]]) w = tf.constant([[-1., 1, 1], [1, 1, 1]]) du.reduce_weighted_logsumexp(x, w) # ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4) du.reduce_weighted_logsumexp(x, w, axis=0) # ==> [log(-1+1), log(1+1), log(1+1)] du.reduce_weighted_logsumexp(x, w, axis=1) # ==> [log(-1+1+1), log(1+1+1)] du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True) # ==> [[log(-1+1+1)], [log(1+1+1)]] du.reduce_weighted_logsumexp(x, w, axis=[0, 1]) # ==> log(-1+5) ``` Args: logx: The tensor to reduce. Should have numeric type. w: The weight tensor. Should have numeric type identical to `logx`. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. keep_dims: If true, retains reduced dimensions with length 1. return_sign: If `True`, returns the sign of the result. name: A name for the operation (optional). Returns: lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor. sign: (Optional) The sign of `sum(weight * exp(x))`." 10267,softplus_inverse,tensorflow/tensorflow/python/ops/distributions/util.py,1148,function,"Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)). Mathematically this op is equivalent to: ```none softplus_inverse = log(exp(x) - 1.) ``` Args: x: `Tensor`. Non-negative (not enforced), floating-point. name: A name for the operation (optional). Returns: `Tensor`. Has the same type/shape as input `x`." 10268,dimension_size,tensorflow/tensorflow/python/ops/distributions/util.py,1205,function,Returns the size of a specific dimension. 10269,process_quadrature_grid_and_probs,tensorflow/tensorflow/python/ops/distributions/util.py,1216,function,"Validates quadrature grid, probs or computes them as necessary. Args: quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s representing the sample points and the corresponding (possibly normalized) weight. When `None`, defaults to: `np.polynomial.hermite.hermgauss(deg=8)`. dtype: The expected `dtype` of `grid` and `probs`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. name: Python `str` name prefixed to Ops created by this class. Returns: quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s representing the sample points and the corresponding (possibly normalized) weight. Raises: ValueError: if `quadrature_grid_and_probs is not None` and `len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`" 10270,pad,tensorflow/tensorflow/python/ops/distributions/util.py,1283,function,"Pads `value` to the front and/or back of a `Tensor` dim, `count` times. Args: x: `Tensor` input. axis: Scalar `int`-like `Tensor` representing the single dimension to pad. (Negative indexing is supported.) front: Python `bool`; if `True` the beginning of the `axis` dimension is padded with `value`, `count` times. If `False` no front padding is made. back: Python `bool`; if `True` the end of the `axis` dimension is padded with `value`, `count` times. If `False` no end padding is made. value: Scalar `int`-like `Tensor` representing the actual value added to the front and/or back of the `axis` dimension of `x`. count: Scalar `int`-like `Tensor` representing number of elements added to the front and/or back of the `axis` dimension of `x`. E.g., if `front = back = True` then `2 * count` elements are added. name: Python `str` name prefixed to Ops created by this function. Returns: pad: The padded version of input `x`. Raises: ValueError: if both `front` and `back` are `False`. TypeError: if `count` is not `int`-like." 10271,parent_frame_arguments,tensorflow/tensorflow/python/ops/distributions/util.py,1354,function,"Returns parent frame arguments. When called inside a function, returns a dictionary with the caller's function arguments. These are positional arguments and keyword arguments (**kwargs), while variable arguments (*varargs) are excluded. When called at global scope, this will return an empty dictionary, since there are no arguments. WARNING: If caller function argument names are overloaded before invoking this method, then values will reflect the overloaded value. For this reason, we recommend calling `parent_frame_arguments` at the beginning of the function." 10272,AppendDocstring,tensorflow/tensorflow/python/ops/distributions/util.py,1391,class,"Helper class to promote private subclass docstring to public counterpart. Example: ```python class TransformedDistribution(Distribution): @distribution_util.AppendDocstring( additional_note=""A special note!"", kwargs_dict={""foo"": ""An extra arg.""}) def _prob(self, y, foo=None): pass ``` In this case, the `AppendDocstring` decorator appends the `additional_note` to the docstring of `prob` (not `_prob`) and adds a new `kwargs` section with each dictionary item as a bullet-point. For a more detailed example, see `TransformedDistribution`." 10273,_adjoint_linear_operator,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,36,function, 10274,_adjoint_adjoint_linear_operator,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,47,function, 10275,_adjoint_identity,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,53,function, 10276,_adjoint_scaled_identity,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,59,function, 10277,_adjoint_diag,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,75,function, 10278,_adjoint_block_diag,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,90,function, 10279,_adjoint_kronecker,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,103,function, 10280,_adjoint_circulant,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,117,function, 10281,_adjoint_householder,tensorflow/tensorflow/python/ops/linalg/adjoint_registrations.py,133,function, 10282,_cholesky_linear_operator,tensorflow/tensorflow/python/ops/linalg/cholesky_registrations.py,35,function, 10283,_cholesky_diag,tensorflow/tensorflow/python/ops/linalg/cholesky_registrations.py,45,function, 10284,_cholesky_identity,tensorflow/tensorflow/python/ops/linalg/cholesky_registrations.py,56,function, 10285,_cholesky_scaled_identity,tensorflow/tensorflow/python/ops/linalg/cholesky_registrations.py,69,function, 10286,_cholesky_block_diag,tensorflow/tensorflow/python/ops/linalg/cholesky_registrations.py,81,function, 10287,_cholesky_kronecker,tensorflow/tensorflow/python/ops/linalg/cholesky_registrations.py,93,function, 10288,_inverse_linear_operator,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,39,function, 10289,_inverse_inverse_linear_operator,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,50,function, 10290,_inverse_diag,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,56,function, 10291,_inverse_identity,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,67,function, 10292,_inverse_scaled_identity,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,73,function, 10293,_inverse_block_diag,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,85,function, 10294,_inverse_block_lower_triangular,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,98,function,"Inverse of LinearOperatorBlockLowerTriangular. We recursively apply the identity: ```none |A 0|' = | A' 0| |B C| |-C'BA' C'| ``` where `A` is n-by-n, `B` is m-by-n, `C` is m-by-m, and `'` denotes inverse. This identity can be verified through multiplication: ```none |A 0|| A' 0| |B C||-C'BA' C'| = | AA' 0| |BA'-CC'BA' CC'| = |I 0| |0 I| ``` Args: block_lower_triangular_operator: Instance of `LinearOperatorBlockLowerTriangular`. Returns: block_lower_triangular_operator_inverse: Instance of `LinearOperatorBlockLowerTriangular`, the inverse of `block_lower_triangular_operator`." 10295,_inverse_kronecker,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,197,function, 10296,_inverse_circulant,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,211,function, 10297,_inverse_householder,tensorflow/tensorflow/python/ops/linalg/inverse_registrations.py,224,function, 10298,logdet,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,70,function,"Computes log of the determinant of a hermitian positive definite matrix. ```python # Compute the determinant of a matrix while reducing the chance of over- or underflow: A = ... # shape 10 x 10 det = tf.exp(tf.linalg.logdet(A)) # scalar ``` Args: matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or `complex128` with shape `[..., M, M]`. name: A name to give this `Op`. Defaults to `logdet`. Returns: The natural log of the determinant of `matrix`. @compatibility(numpy) Equivalent to numpy.linalg.slogdet, although no sign is returned since only hermitian positive definite matrices are supported. @end_compatibility" 10299,adjoint,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,104,function,"Transposes the last two dimensions of and conjugates tensor `matrix`. For example: ```python x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]) tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j], # [2 - 2j, 5 - 5j], # [3 - 3j, 6 - 6j]] ``` Args: matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or `complex128` with shape `[..., M, M]`. name: A name to give this `Op` (optional). Returns: The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of matrix." 10300,_matrix_exp_pade3,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,133,function,3rd-order Pade approximant for matrix exponential. 10301,_matrix_exp_pade5,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,148,function,5th-order Pade approximant for matrix exponential. 10302,_matrix_exp_pade7,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,164,function,7th-order Pade approximant for matrix exponential. 10303,_matrix_exp_pade9,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,181,function,9th-order Pade approximant for matrix exponential. 10304,_matrix_exp_pade13,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,206,function,13th-order Pade approximant for matrix exponential. 10305,matrix_exponential,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,234,function,"Computes the matrix exponential of one or more square matrices. exp(A) = \sum_{n=0}^\infty A^n/n! The exponential is computed using a combination of the scaling and squaring method and the Pade approximation. Details can be found in: Nicholas J. Higham, ""The scaling and squaring method for the matrix exponential revisited,"" SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005. The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor of the same shape as the input containing the exponential for all input submatrices `[..., :, :]`. Args: input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or `complex128` with shape `[..., M, M]`. name: A name to give this `Op` (optional). Returns: the matrix exponential of the input. Raises: ValueError: An unsupported type is provided as input. @compatibility(scipy) Equivalent to scipy.linalg.expm @end_compatibility" 10306,banded_triangular_solve,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,344,function,"Solve triangular systems of equations with a banded solver. `bands` is a tensor of shape `[..., K, M]`, where `K` represents the number of bands stored. This corresponds to a batch of `M` by `M` matrices, whose `K` subdiagonals (when `lower` is `True`) are stored. This operator broadcasts the batch dimensions of `bands` and the batch dimensions of `rhs`. Examples: Storing 2 bands of a 3x3 matrix. Note that first element in the second row is ignored due to the 'LEFT_RIGHT' padding. >>> x = [[2., 3., 4.], [1., 2., 3.]] >>> x2 = [[2., 3., 4.], [10000., 2., 3.]] >>> y = tf.zeros([3, 3]) >>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(-1, 0)) >>> z >>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([3, 1])) >>> soln >>> are_equal = soln == tf.linalg.banded_triangular_solve(x2, tf.ones([3, 1])) >>> tf.reduce_all(are_equal).numpy() True >>> are_equal = soln == tf.linalg.triangular_solve(z, tf.ones([3, 1])) >>> tf.reduce_all(are_equal).numpy() True Storing 2 superdiagonals of a 4x4 matrix. Because of the 'LEFT_RIGHT' padding the last element of the first row is ignored. >>> x = [[2., 3., 4., 5.], [-1., -2., -3., -4.]] >>> y = tf.zeros([4, 4]) >>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(0, 1)) >>> z >>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([4, 1]), lower=False) >>> soln >>> are_equal = (soln == tf.linalg.triangular_solve( ... z, tf.ones([4, 1]), lower=False)) >>> tf.reduce_all(are_equal).numpy() True Args: bands: A `Tensor` describing the bands of the left hand side, with shape `[..., K, M]`. The `K` rows correspond to the diagonal to the `K - 1`-th diagonal (the diagonal is the top row) when `lower` is `True` and otherwise the `K - 1`-th superdiagonal to the diagonal (the diagonal is the bottom row) when `lower` is `False`. The bands are stored with 'LEFT_RIGHT' alignment, where the superdiagonals are padded on the right and subdiagonals are padded on the left. This is the alignment cuSPARSE uses. See `tf.linalg.set_diag` for more details. rhs: A `Tensor` of shape [..., M] or [..., M, N] and with the same dtype as `diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known statically, `rhs` will be treated as a matrix rather than a vector. lower: An optional `bool`. Defaults to `True`. Boolean indicating whether `bands` represents a lower or upper triangular matrix. adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether to solve with the matrix's block-wise adjoint. name: A name to give this `Op` (optional). Returns: A `Tensor` of shape [..., M] or [..., M, N] containing the solutions." 10307,tridiagonal_solve,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,441,function,"Solves tridiagonal systems of equations. The input can be supplied in various formats: `matrix`, `sequence` and `compact`, specified by the `diagonals_format` arg. In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with two inner-most dimensions representing the square tridiagonal matrices. Elements outside of the three diagonals will be ignored. In `sequence` format, `diagonals` are supplied as a tuple or list of three tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either `M-1` or `M`; in the latter case, the last element of superdiagonal and the first element of subdiagonal will be ignored. In `compact` format the three diagonals are brought together into one tensor of shape `[..., 3, M]`, with last two dimensions containing superdiagonals, diagonals, and subdiagonals, in order. Similarly to `sequence` format, elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored. The `compact` format is recommended as the one with best performance. In case you need to cast a tensor into a compact format manually, use `tf.gather_nd`. An example for a tensor of shape [m, m]: ```python rhs = tf.constant([...]) matrix = tf.constant([[...]]) m = matrix.shape[0] dummy_idx = [0, 0] # An arbitrary element to use as a dummy indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal [[i, i] for i in range(m)], # Diagonal [dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal diagonals=tf.gather_nd(matrix, indices) x = tf.linalg.tridiagonal_solve(diagonals, rhs) ``` Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or `[..., M, K]`. The latter allows to simultaneously solve K systems with the same left-hand sides and K different right-hand sides. If `transpose_rhs` is set to `True` the expected shape is `[..., M]` or `[..., K, M]`. The batch dimensions, denoted as `...`, must be the same in `diagonals` and `rhs`. The output is a tensor of the same shape as `rhs`: either `[..., M]` or `[..., M, K]`. The op isn't guaranteed to raise an error if the input matrix is not invertible. `tf.debugging.check_numerics` can be applied to the output to detect invertibility problems. **Note**: with large batch sizes, the computation on the GPU may be slow, if either `partial_pivoting=True` or there are multiple right-hand sides (`K > 1`). If this issue arises, consider if it's possible to disable pivoting and have `K = 1`, or, alternatively, consider using CPU. On CPU, solution is computed via Gaussian elimination with or without partial pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv Args: diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The shape depends of `diagonals_format`, see description above. Must be `float32`, `float64`, `complex64`, or `complex128`. rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as `diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known statically, `rhs` will be treated as a matrix rather than a vector. diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is `compact`. transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect if the shape of rhs is [..., M]). conjugate_rhs: If `True`, `rhs` is conjugated before solving. name: A name to give this `Op` (optional). partial_pivoting: whether to perform partial pivoting. `True` by default. Partial pivoting makes the procedure more stable, but slower. Partial pivoting is unnecessary in some cases, including diagonally dominant and symmetric positive definite matrices (see e.g. theorem 9.12 in [1]). Returns: A `Tensor` of shape [..., M] or [..., M, K] containing the solutions. Raises: ValueError: An unsupported type is provided as input, or when the input tensors have incorrect shapes. UnimplementedError: Whenever `partial_pivoting` is true and the backend is XLA. [1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms: Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7." 10308,_tridiagonal_solve_compact_format,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,593,function,Helper function used after the input has been cast to compact form. 10309,tridiagonal_matmul,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,643,function,"Multiplies tridiagonal matrix by matrix. `diagonals` is representation of 3-diagonal NxN matrix, which depends on `diagonals_format`. In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with two inner-most dimensions representing the square tridiagonal matrices. Elements outside of the three diagonals will be ignored. If `sequence` format, `diagonals` is list or tuple of three tensors: `[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element of `superdiag` first element of `subdiag` are ignored. In `compact` format the three diagonals are brought together into one tensor of shape `[..., 3, M]`, with last two dimensions containing superdiagonals, diagonals, and subdiagonals, in order. Similarly to `sequence` format, elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored. The `sequence` format is recommended as the one with the best performance. `rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`. Example: ```python superdiag = tf.constant([-1, -1, 0], dtype=tf.float64) maindiag = tf.constant([2, 2, 2], dtype=tf.float64) subdiag = tf.constant([0, -1, -1], dtype=tf.float64) diagonals = [superdiag, maindiag, subdiag] rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64) x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence') ``` Args: diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The shape depends of `diagonals_format`, see description above. Must be `float32`, `float64`, `complex64`, or `complex128`. rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`. diagonals_format: one of `sequence`, or `compact`. Default is `compact`. name: A name to give this `Op` (optional). Returns: A `Tensor` of shape [..., M, N] containing the result of multiplication. Raises: ValueError: An unsupported type is provided as input, or when the input tensors have incorrect shapes." 10310,_maybe_validate_matrix,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,722,function,Checks that input is a `float` matrix. 10311,matrix_rank,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,741,function,"Compute the matrix rank of one or more matrices. Arguments: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. tol: Threshold below which the singular value is counted as 'zero'. Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`). validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: 'matrix_rank'. Returns: matrix_rank: (Batch of) `int32` scalars representing the number of non-zero singular values." 10312,pinv,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,780,function,"Compute the Moore-Penrose pseudo-inverse of one or more matrices. Calculate the [generalized inverse of a matrix]( https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its singular-value decomposition (SVD) and including all large singular values. The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves' [the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1] This function is analogous to [`numpy.linalg.pinv`]( https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html). It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the default `rcond` is `1e-15`. Here the default is `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`. Args: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. rcond: `Tensor` of small singular value cutoffs. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Must broadcast against `tf.shape(a)[:-2]`. Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: 'pinv'. Returns: a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except rightmost two dimensions are transposed. Raises: TypeError: if input `a` does not have `float`-like `dtype`. ValueError: if input `a` has fewer than 2 dimensions. #### Examples ```python import tensorflow as tf import tensorflow_probability as tfp a = tf.constant([[1., 0.4, 0.5], [0.4, 0.2, 0.25], [0.5, 0.25, 0.35]]) tf.matmul(tf.linalg..pinv(a), a) # ==> array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) a = tf.constant([[1., 0.4, 0.5, 1.], [0.4, 0.2, 0.25, 2.], [0.5, 0.25, 0.35, 3.]]) tf.matmul(tf.linalg..pinv(a), a) # ==> array([[ 0.76, 0.37, 0.21, -0.02], [ 0.37, 0.43, -0.33, 0.02], [ 0.21, -0.33, 0.81, 0.01], [-0.02, 0.02, 0.01, 1. ]], dtype=float32) ``` #### References [1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press, Inc., 1980, pp. 139-142." 10313,lu_solve,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,910,function,"Solves systems of linear eqns `A X = RHS`, given LU factorizations. Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. rhs: Matrix-shaped float `Tensor` representing targets for which to solve; `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[..., tf.newaxis])[..., 0]`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., 'lu_solve'). Returns: x: The `X` in `A @ X = RHS`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[1., 2], [3, 4]], [[7, 8], [3, 4]]] inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ```" 10314,lu_matrix_inverse,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,1008,function,"Computes the inverse given the LU decomposition(s) of one or more matrices. This op is conceptually identical to, ```python inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X)) tf.assert_near(tf.matrix_inverse(X), inv_X) # ==> True ``` Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when `validate_args=True`. Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when `validate_args=True`. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., 'lu_matrix_inverse'). Returns: inv_x: The matrix_inv, i.e., `tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[3., 4], [1, 2]], [[7., 8], [3, 4]]] inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x)) tf.assert_near(tf.matrix_inverse(x), inv_x) # ==> True ```" 10315,lu_reconstruct,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,1073,function,"The reconstruct one or more matrices from their LU decomposition(s). Args: lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `lower_upper = L + U - eye`. perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) = X` then `perm = argmax(P)`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. Default value: `False` (i.e., don't validate arguments). name: Python `str` name given to ops managed by this object. Default value: `None` (i.e., 'lu_reconstruct'). Returns: x: The original input to `tf.linalg.lu`, i.e., `x` as in, `lu_reconstruct(*tf.linalg.lu(x))`. #### Examples ```python import numpy as np import tensorflow as tf import tensorflow_probability as tfp x = [[[3., 4], [1, 2]], [[7., 8], [3, 4]]] x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x)) tf.assert_near(x, x_reconstructed) # ==> True ```" 10316,lu_reconstruct_assertions,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,1145,function,Returns list of assertions related to `lu_reconstruct` assumptions. 10317,_lu_solve_assertions,tensorflow/tensorflow/python/ops/linalg/linalg_impl.py,1177,function,Returns list of assertions related to `lu_solve` assumptions. 10318,LinearOperator,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,50,class,"Base class defining a [batch of] linear operator[s]. Subclasses of `LinearOperator` provide access to common methods on a (batch) matrix, without the need to materialize the matrix. This allows: * Matrix free computations * Operators that take advantage of special structure, while providing a consistent API to users. #### Subclassing To enable a public method, subclasses should implement the leading-underscore version of the method. The argument signature should be identical except for the omission of `name=""...""`. For example, to enable `matmul(x, adjoint=False, name=""matmul"")` a subclass should implement `_matmul(x, adjoint=False)`. #### Performance contract Subclasses should only implement the assert methods (e.g. `assert_non_singular`) if they can be done in less than `O(N^3)` time. Class docstrings should contain an explanation of computational complexity. Since this is a high-performance library, attention should be paid to detail, and explanations can include constants as well as Big-O notation. #### Shape compatibility `LinearOperator` subclasses should operate on a [batch] matrix with compatible shape. Class docstrings should define what is meant by compatible shape. Some subclasses may not support batching. Examples: `x` is a batch matrix with compatible shape for `matmul` if ``` operator.shape = [B1,...,Bb] + [M, N], b >= 0, x.shape = [B1,...,Bb] + [N, R] ``` `rhs` is a batch matrix with compatible shape for `solve` if ``` operator.shape = [B1,...,Bb] + [M, N], b >= 0, rhs.shape = [B1,...,Bb] + [M, R] ``` #### Example docstring for subclasses. This operator acts like a (batch) matrix `A` with shape `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `m x n` matrix. Again, this matrix `A` may not be materialized, but for purposes of identifying and working with compatible arguments the shape is relevant. Examples: ```python some_tensor = ... shape = ???? operator = MyLinOp(some_tensor) operator.shape() ==> [2, 4, 4] operator.log_abs_determinant() ==> Shape [2] Tensor x = ... Shape [2, 4, 5] Tensor operator.matmul(x) ==> Shape [2, 4, 5] Tensor ``` #### Shape compatibility This operator acts on batch matrices with compatible shape. FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE #### Performance FILL THIS IN #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10319,_adjoint,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1136,function, 10320,_cholesky,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1141,function, 10321,_diag_part,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1149,function, 10322,_det,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1160,function, 10323,_inverse,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1165,function, 10324,_logdet,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1173,function, 10325,_matmul,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1180,function, 10326,_solve,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1207,function, 10327,_trace,tensorflow/tensorflow/python/ops/linalg/linear_operator.py,1219,function, 10328,add_operators,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,38,function,"Efficiently add one or more linear operators. Given operators `[A1, A2,...]`, this `Op` returns a possibly shorter list of operators `[B1, B2,...]` such that ```sum_k Ak.matmul(x) = sum_k Bk.matmul(x).``` The operators `Bk` result by adding some of the `Ak`, as allowed by `addition_tiers`. Example of efficient adding of diagonal operators. ```python A1 = LinearOperatorDiag(diag=[1., 1.], name=""A1"") A2 = LinearOperatorDiag(diag=[2., 2.], name=""A2"") # Use two tiers, the first contains an Adder that returns Diag. Since both # A1 and A2 are Diag, they can use this Adder. The second tier will not be # used. addition_tiers = [ [_AddAndReturnDiag()], [_AddAndReturnMatrix()]] B_list = add_operators([A1, A2], addition_tiers=addition_tiers) len(B_list) ==> 1 B_list[0].__class__.__name__ ==> 'LinearOperatorDiag' B_list[0].to_dense() ==> [[3., 0.], [0., 3.]] B_list[0].name ==> 'Add/A1__A2/' ``` Args: operators: Iterable of `LinearOperator` objects with same `dtype`, domain and range dimensions, and broadcastable batch shapes. operator_name: String name for returned `LinearOperator`. Defaults to concatenation of ""Add/A__B/"" that indicates the order of addition steps. addition_tiers: List tiers, like `[tier_0, tier_1, ...]`, where `tier_i` is a list of `Adder` objects. This function attempts to do all additions in tier `i` before trying tier `i + 1`. name: A name for this `Op`. Defaults to `add_operators`. Returns: Subclass of `LinearOperator`. Class and order of addition may change as new (and better) addition strategies emerge. Raises: ValueError: If `operators` argument is empty. ValueError: If shapes are incompatible." 10329,_pop_a_match_at_tier,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,141,function, 10330,_infer_hints_allowing_override,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,152,function,"Infer hints from op1 and op2. hints argument is an override. Args: op1: LinearOperator op2: LinearOperator hints: _Hints object holding ""is_X"" boolean hints to use for returned operator. If some hint is None, try to set using op1 and op2. If the hint is provided, ignore op1 and op2 hints. This allows an override of previous hints, but does not allow forbidden hints (e.g. you still cannot say a real diagonal operator is not self-adjoint. Returns: _Hints object." 10331,_static_check_for_same_dimensions,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,193,function,ValueError if operators determined to have different dimensions. 10332,_static_check_for_broadcastable_batch_shape,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,215,function,ValueError if operators determined to have non-broadcastable shapes. 10333,_Hints,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,226,class,Holds 'is_X' flags that every LinearOperator is initialized with. 10334,_Adder,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,244,class,"Abstract base class to add two operators. Each `Adder` acts independently, adding everything it can, paying no attention as to whether another `Adder` could have done the addition more efficiently." 10335,_AddAndReturnScaledIdentity,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,293,class,"Handles additions resulting in an Identity family member. The Identity (`LinearOperatorScaledIdentity`, `LinearOperatorIdentity`) family is closed under addition. This `Adder` respects that, and returns an Identity" 10336,_AddAndReturnDiag,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,326,class,Handles additions resulting in a Diag operator. 10337,_AddAndReturnTriL,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,342,class,Handles additions resulting in a TriL operator. 10338,_AddAndReturnMatrix,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,363,class,"""Handles additions resulting in a `LinearOperatorFullMatrix`." 10339,_type,tensorflow/tensorflow/python/ops/linalg/linear_operator_addition.py,410,function,Returns the type name constant (e.g. _TRIL) for operator. 10340,LinearOperatorAdjoint,tensorflow/tensorflow/python/ops/linalg/linear_operator_adjoint.py,33,class,"`LinearOperator` representing the adjoint of another operator. This operator represents the adjoint of another operator. ```python # Create a 2 x 2 linear operator. operator = LinearOperatorFullMatrix([[1 - i., 3.], [0., 1. + i]]) operator_adjoint = LinearOperatorAdjoint(operator) operator_adjoint.to_dense() ==> [[1. + i, 0.] [3., 1 - i]] operator_adjoint.shape ==> [2, 2] operator_adjoint.log_abs_determinant() ==> - log(2) x = ... Shape [2, 4] Tensor operator_adjoint.matmul(x) ==> Shape [2, 4] Tensor, equal to operator.matmul(x, adjoint=True) ``` #### Performance The performance of `LinearOperatorAdjoint` depends on the underlying operators performance. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10341,_registered_function,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,35,function,"Given a list of classes, finds the most specific function registered." 10342,_registered_adjoint,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,51,function,Get the Adjoint function registered for class a. 10343,_registered_cholesky,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,56,function,Get the Cholesky function registered for class a. 10344,_registered_matmul,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,61,function,Get the Matmul function registered for classes a and b. 10345,_registered_solve,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,66,function,Get the Solve function registered for classes a and b. 10346,_registered_inverse,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,71,function,Get the Cholesky function registered for class a. 10347,adjoint,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,76,function,"Get the adjoint associated to lin_op_a. Args: lin_op_a: The LinearOperator to take the adjoint of. name: Name to use for this operation. Returns: A LinearOperator that represents the adjoint of `lin_op_a`. Raises: NotImplementedError: If no Adjoint method is defined for the LinearOperator type of `lin_op_a`." 10348,cholesky,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,99,function,"Get the Cholesky factor associated to lin_op_a. Args: lin_op_a: The LinearOperator to decompose. name: Name to use for this operation. Returns: A LinearOperator that represents the lower Cholesky factor of `lin_op_a`. Raises: NotImplementedError: If no Cholesky method is defined for the LinearOperator type of `lin_op_a`." 10349,matmul,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,122,function,"Compute lin_op_a.matmul(lin_op_b). Args: lin_op_a: The LinearOperator on the left. lin_op_b: The LinearOperator on the right. name: Name to use for this operation. Returns: A LinearOperator that represents the matmul between `lin_op_a` and `lin_op_b`. Raises: NotImplementedError: If no matmul method is defined between types of `lin_op_a` and `lin_op_b`." 10350,solve,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,147,function,"Compute lin_op_a.solve(lin_op_b). Args: lin_op_a: The LinearOperator on the left. lin_op_b: The LinearOperator on the right. name: Name to use for this operation. Returns: A LinearOperator that represents the solve between `lin_op_a` and `lin_op_b`. Raises: NotImplementedError: If no solve method is defined between types of `lin_op_a` and `lin_op_b`." 10351,inverse,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,172,function,"Get the Inverse associated to lin_op_a. Args: lin_op_a: The LinearOperator to decompose. name: Name to use for this operation. Returns: A LinearOperator that represents the inverse of `lin_op_a`. Raises: NotImplementedError: If no Inverse method is defined for the LinearOperator type of `lin_op_a`." 10352,RegisterAdjoint,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,195,class,"Decorator to register an Adjoint implementation function. Usage: @linear_operator_algebra.RegisterAdjoint(lin_op.LinearOperatorIdentity) def _adjoint_identity(lin_op_a): # Return the identity matrix." 10353,RegisterCholesky,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,237,class,"Decorator to register a Cholesky implementation function. Usage: @linear_operator_algebra.RegisterCholesky(lin_op.LinearOperatorIdentity) def _cholesky_identity(lin_op_a): # Return the identity matrix." 10354,RegisterMatmul,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,279,class,"Decorator to register a Matmul implementation function. Usage: @linear_operator_algebra.RegisterMatmul( lin_op.LinearOperatorIdentity, lin_op.LinearOperatorIdentity) def _matmul_identity(a, b): # Return the identity matrix." 10355,RegisterSolve,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,325,class,"Decorator to register a Solve implementation function. Usage: @linear_operator_algebra.RegisterSolve( lin_op.LinearOperatorIdentity, lin_op.LinearOperatorIdentity) def _solve_identity(a, b): # Return the identity matrix." 10356,RegisterInverse,tensorflow/tensorflow/python/ops/linalg/linear_operator_algebra.py,371,class,"Decorator to register an Inverse implementation function. Usage: @linear_operator_algebra.RegisterInverse(lin_op.LinearOperatorIdentity) def _inverse_identity(lin_op_a): # Return the identity matrix." 10357,LinearOperatorBlockDiag,tensorflow/tensorflow/python/ops/linalg/linear_operator_block_diag.py,37,class,"Combines one or more `LinearOperators` in to a Block Diagonal matrix. This operator combines one or more linear operators `[op1,...,opJ]`, building a new `LinearOperator`, whose underlying matrix representation is square and has each operator `opi` on the main diagonal, and zero's elsewhere. #### Shape compatibility If `opj` acts like a [batch] square matrix `Aj`, then `op_combined` acts like the [batch] square matrix formed by having each matrix `Aj` on the main diagonal. Each `opj` is required to represent a square matrix, and hence will have shape `batch_shape_j + [M_j, M_j]`. If `opj` has shape `batch_shape_j + [M_j, M_j]`, then the combined operator has shape `broadcast_batch_shape + [sum M_j, sum M_j]`, where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate batch shapes broadcast. Even if the combined shape is well defined, the combined operator's methods may fail due to lack of broadcasting ability in the defining operators' methods. Arguments to `matmul`, `matvec`, `solve`, and `solvevec` may either be single `Tensor`s or lists of `Tensor`s that are interpreted as blocks. The `j`th element of a blockwise list of `Tensor`s must have dimensions that match `opj` for the given method. If a list of blocks is input, then a list of blocks is returned as well. ```python # Create a 4 x 4 linear operator combined of two 2 x 2 operators. operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) operator = LinearOperatorBlockDiag([operator_1, operator_2]) operator.to_dense() ==> [[1., 2., 0., 0.], [3., 4., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]] operator.shape ==> [4, 4] operator.log_abs_determinant() ==> scalar Tensor x1 = ... # Shape [2, 2] Tensor x2 = ... # Shape [2, 2] Tensor x = tf.concat([x1, x2], 0) # Shape [2, 4] Tensor operator.matmul(x) ==> tf.concat([operator_1.matmul(x1), operator_2.matmul(x2)]) # Create a [2, 3] batch of 4 x 4 linear operators. matrix_44 = tf.random.normal(shape=[2, 3, 4, 4]) operator_44 = LinearOperatorFullMatrix(matrix) # Create a [1, 3] batch of 5 x 5 linear operators. matrix_55 = tf.random.normal(shape=[1, 3, 5, 5]) operator_55 = LinearOperatorFullMatrix(matrix_55) # Combine to create a [2, 3] batch of 9 x 9 operators. operator_99 = LinearOperatorBlockDiag([operator_44, operator_55]) # Create a shape [2, 3, 9] vector. x = tf.random.normal(shape=[2, 3, 9]) operator_99.matmul(x) ==> Shape [2, 3, 9] Tensor # Create a blockwise list of vectors. x = [tf.random.normal(shape=[2, 3, 4]), tf.random.normal(shape=[2, 3, 5])] operator_99.matmul(x) ==> [Shape [2, 3, 4] Tensor, Shape [2, 3, 5] Tensor] ``` #### Performance The performance of `LinearOperatorBlockDiag` on any operation is equal to the sum of the individual operators' operations. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10358,LinearOperatorBlockLowerTriangular,tensorflow/tensorflow/python/ops/linalg/linear_operator_block_lower_triangular.py,39,class,"Combines `LinearOperators` into a blockwise lower-triangular matrix. This operator is initialized with a nested list of linear operators, which are combined into a new `LinearOperator` whose underlying matrix representation is square and has each operator on or below the main diagonal, and zero's elsewhere. Each element of the outer list is a list of `LinearOperators` corresponding to a row-partition of the blockwise structure. The number of `LinearOperator`s in row-partion `i` must be equal to `i`. For example, a blockwise `3 x 3` `LinearOperatorBlockLowerTriangular` is initialized with the list `[[op_00], [op_10, op_11], [op_20, op_21, op_22]]`, where the `op_ij`, `i < 3, j <= i`, are `LinearOperator` instances. The `LinearOperatorBlockLowerTriangular` behaves as the following blockwise matrix, where `0` represents appropriately-sized [batch] matrices of zeros: ```none [[op_00, 0, 0], [op_10, op_11, 0], [op_20, op_21, op_22]] ``` Each `op_jj` on the diagonal is required to represent a square matrix, and hence will have shape `batch_shape_j + [M_j, M_j]`. `LinearOperator`s in row `j` of the blockwise structure must have `range_dimension` equal to that of `op_jj`, and `LinearOperators` in column `j` must have `domain_dimension` equal to that of `op_jj`. If each `op_jj` on the diagonal has shape `batch_shape_j + [M_j, M_j]`, then the combined operator has shape `broadcast_batch_shape + [sum M_j, sum M_j]`, where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`, `j = 0, 1, ..., J`, assuming the intermediate batch shapes broadcast. Even if the combined shape is well defined, the combined operator's methods may fail due to lack of broadcasting ability in the defining operators' methods. For example, to create a 4 x 4 linear operator combined of three 2 x 2 operators: >>> operator_0 = tf.linalg.LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) >>> operator_1 = tf.linalg.LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) >>> operator_2 = tf.linalg.LinearOperatorLowerTriangular([[5., 6.], [7., 8]]) >>> operator = LinearOperatorBlockLowerTriangular( ... [[operator_0], [operator_1, operator_2]]) >>> operator.to_dense() >>> operator.shape TensorShape([4, 4]) >>> operator.log_abs_determinant() >>> x0 = [[1., 6.], [-3., 4.]] >>> x1 = [[0., 2.], [4., 0.]] >>> x = tf.concat([x0, x1], 0) # Shape [2, 4] Tensor >>> operator.matmul(x) The above `matmul` is equivalent to: >>> tf.concat([operator_0.matmul(x0), ... operator_1.matmul(x0) + operator_2.matmul(x1)], axis=0) #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [M, N], with b >= 0 x.shape = [B1,...,Bb] + [N, R], with R >= 0. ``` For example: Create a [2, 3] batch of 4 x 4 linear operators: >>> matrix_44 = tf.random.normal(shape=[2, 3, 4, 4]) >>> operator_44 = tf.linalg.LinearOperatorFullMatrix(matrix_44) Create a [1, 3] batch of 5 x 4 linear operators: >>> matrix_54 = tf.random.normal(shape=[1, 3, 5, 4]) >>> operator_54 = tf.linalg.LinearOperatorFullMatrix(matrix_54) Create a [1, 3] batch of 5 x 5 linear operators: >>> matrix_55 = tf.random.normal(shape=[1, 3, 5, 5]) >>> operator_55 = tf.linalg.LinearOperatorFullMatrix(matrix_55) Combine to create a [2, 3] batch of 9 x 9 operators: >>> operator_99 = LinearOperatorBlockLowerTriangular( ... [[operator_44], [operator_54, operator_55]]) >>> operator_99.shape TensorShape([2, 3, 9, 9]) Create a shape [2, 1, 9] batch of vectors and apply the operator to it. >>> x = tf.random.normal(shape=[2, 1, 9]) >>> y = operator_99.matvec(x) >>> y.shape TensorShape([2, 3, 9]) Create a blockwise list of vectors and apply the operator to it. A blockwise list is returned. >>> x4 = tf.random.normal(shape=[2, 1, 4]) >>> x5 = tf.random.normal(shape=[2, 3, 5]) >>> y_blockwise = operator_99.matvec([x4, x5]) >>> y_blockwise[0].shape TensorShape([2, 3, 4]) >>> y_blockwise[1].shape TensorShape([2, 3, 5]) #### Performance Suppose `operator` is a `LinearOperatorBlockLowerTriangular` consisting of `D` row-partitions and `D` column-partitions, such that the total number of operators is `N = D * (D + 1) // 2`. * `operator.matmul` has complexity equal to the sum of the `matmul` complexities of the individual operators. * `operator.solve` has complexity equal to the sum of the `solve` complexities of the operators on the diagonal and the `matmul` complexities of the operators off the diagonal. * `operator.determinant` has complexity equal to the sum of the `determinant` complexities of the operators on the diagonal. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10359,_BaseLinearOperatorCirculant,tensorflow/tensorflow/python/ops/linalg/linear_operator_circulant.py,52,class,"Base class for circulant operators. Not user facing. `LinearOperator` acting like a [batch] [[nested] block] circulant matrix." 10360,LinearOperatorCirculant,tensorflow/tensorflow/python/ops/linalg/linear_operator_circulant.py,516,class,"`LinearOperator` acting like a circulant matrix. This operator acts like a circulant matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. #### Description in terms of circulant matrices Circulant means the entries of `A` are generated by a single vector, the convolution kernel `h`: `A_{mn} := h_{m-n mod N}`. With `h = [w, x, y, z]`, ``` A = |w z y x| |x w z y| |y x w z| |z y x w| ``` This means that the result of matrix multiplication `v = Au` has `Lth` column given circular convolution between `h` with the `Lth` column of `u`. #### Description in terms of the frequency spectrum There is an equivalent description in terms of the [batch] spectrum `H` and Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch dimensions. Define the discrete Fourier transform (DFT) and its inverse by ``` DFT[ h[n] ] = H[k] := sum_{n = 0}^{N - 1} h_n e^{-i 2pi k n / N} IDFT[ H[k] ] = h[n] = N^{-1} sum_{k = 0}^{N - 1} H_k e^{i 2pi k n / N} ``` From these definitions, we see that ``` H[0] = sum_{n = 0}^{N - 1} h_n H[1] = ""the first positive frequency"" H[N - 1] = ""the first negative frequency"" ``` Loosely speaking, with `*` element-wise multiplication, matrix multiplication is equal to the action of a Fourier multiplier: `A u = IDFT[ H * DFT[u] ]`. Precisely speaking, given `[N, R]` matrix `u`, let `DFT[u]` be the `[N, R]` matrix with `rth` column equal to the DFT of the `rth` column of `u`. Define the `IDFT` similarly. Matrix multiplication may be expressed columnwise: ```(A u)_r = IDFT[ H * (DFT[u])_r ]``` #### Operator properties deduced from the spectrum. Letting `U` be the `kth` Euclidean basis vector, and `U = IDFT[u]`. The above formulas show that`A U = H_k * U`. We conclude that the elements of `H` are the eigenvalues of this operator. Therefore * This operator is positive definite if and only if `Real{H} > 0`. A general property of Fourier transforms is the correspondence between Hermitian functions and real valued transforms. Suppose `H.shape = [B1,...,Bb, N]`. We say that `H` is a Hermitian spectrum if, with `%` meaning modulus division, ```H[..., n % N] = ComplexConjugate[ H[..., (-n) % N] ]``` * This operator corresponds to a real matrix if and only if `H` is Hermitian. * This operator is self-adjoint if and only if `H` is real. See e.g. ""Discrete-Time Signal Processing"", Oppenheim and Schafer. #### Example of a self-adjoint positive definite operator ```python # spectrum is real ==> operator is self-adjoint # spectrum is positive ==> operator is positive definite spectrum = [6., 4, 2] operator = LinearOperatorCirculant(spectrum) # IFFT[spectrum] operator.convolution_kernel() ==> [4 + 0j, 1 + 0.58j, 1 - 0.58j] operator.to_dense() ==> [[4 + 0.0j, 1 - 0.6j, 1 + 0.6j], [1 + 0.6j, 4 + 0.0j, 1 - 0.6j], [1 - 0.6j, 1 + 0.6j, 4 + 0.0j]] ``` #### Example of defining in terms of a real convolution kernel ```python # convolution_kernel is real ==> spectrum is Hermitian. convolution_kernel = [1., 2., 1.]] spectrum = tf.signal.fft(tf.cast(convolution_kernel, tf.complex64)) # spectrum is Hermitian ==> operator is real. # spectrum is shape [3] ==> operator is shape [3, 3] # We force the input/output type to be real, which allows this to operate # like a real matrix. operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32) operator.to_dense() ==> [[ 1, 1, 2], [ 2, 1, 1], [ 1, 2, 1]] ``` #### Example of Hermitian spectrum ```python # spectrum is shape [3] ==> operator is shape [3, 3] # spectrum is Hermitian ==> operator is real. spectrum = [1, 1j, -1j] operator = LinearOperatorCirculant(spectrum) operator.to_dense() ==> [[ 0.33 + 0j, 0.91 + 0j, -0.24 + 0j], [-0.24 + 0j, 0.33 + 0j, 0.91 + 0j], [ 0.91 + 0j, -0.24 + 0j, 0.33 + 0j] ``` #### Example of forcing real `dtype` when spectrum is Hermitian ```python # spectrum is shape [4] ==> operator is shape [4, 4] # spectrum is real ==> operator is self-adjoint # spectrum is Hermitian ==> operator is real # spectrum has positive real part ==> operator is positive-definite. spectrum = [6., 4, 2, 4] # Force the input dtype to be float32. # Cast the output to float32. This is fine because the operator will be # real due to Hermitian spectrum. operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32) operator.shape ==> [4, 4] operator.to_dense() ==> [[4, 1, 0, 1], [1, 4, 1, 0], [0, 1, 4, 1], [1, 0, 1, 4]] # convolution_kernel = tf.signal.ifft(spectrum) operator.convolution_kernel() ==> [4, 1, 0, 1] ``` #### Performance Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` is `O(R*N*Log[N])` * `operator.solve(x)` is `O(R*N*Log[N])` * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. References: Toeplitz and Circulant Matrices - A Review: [Gray, 2006](https://www.nowpublishers.com/article/Details/CIT-006) ([pdf](https://ee.stanford.edu/~gray/toeplitz.pdf))" 10361,LinearOperatorCirculant2D,tensorflow/tensorflow/python/ops/linalg/linear_operator_circulant.py,762,class,"`LinearOperator` acting like a block circulant matrix. This operator acts like a block circulant matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. #### Description in terms of block circulant matrices If `A` is block circulant, with block sizes `N0, N1` (`N0 * N1 = N`): `A` has a block circulant structure, composed of `N0 x N0` blocks, with each block an `N1 x N1` circulant matrix. For example, with `W`, `X`, `Y`, `Z` each circulant, ``` A = |W Z Y X| |X W Z Y| |Y X W Z| |Z Y X W| ``` Note that `A` itself will not in general be circulant. #### Description in terms of the frequency spectrum There is an equivalent description in terms of the [batch] spectrum `H` and Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch dimensions. If `H.shape = [N0, N1]`, (`N0 * N1 = N`): Loosely speaking, matrix multiplication is equal to the action of a Fourier multiplier: `A u = IDFT2[ H DFT2[u] ]`. Precisely speaking, given `[N, R]` matrix `u`, let `DFT2[u]` be the `[N0, N1, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, R]` and taking a two dimensional DFT across the first two dimensions. Let `IDFT2` be the inverse of `DFT2`. Matrix multiplication may be expressed columnwise: ```(A u)_r = IDFT2[ H * (DFT2[u])_r ]``` #### Operator properties deduced from the spectrum. * This operator is positive definite if and only if `Real{H} > 0`. A general property of Fourier transforms is the correspondence between Hermitian functions and real valued transforms. Suppose `H.shape = [B1,...,Bb, N0, N1]`, we say that `H` is a Hermitian spectrum if, with `%` indicating modulus division, ``` H[..., n0 % N0, n1 % N1] = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1 ]. ``` * This operator corresponds to a real matrix if and only if `H` is Hermitian. * This operator is self-adjoint if and only if `H` is real. See e.g. ""Discrete-Time Signal Processing"", Oppenheim and Schafer. ### Example of a self-adjoint positive definite operator ```python # spectrum is real ==> operator is self-adjoint # spectrum is positive ==> operator is positive definite spectrum = [[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]] operator = LinearOperatorCirculant2D(spectrum) # IFFT[spectrum] operator.convolution_kernel() ==> [[5.0+0.0j, -0.5-.3j, -0.5+.3j], [-1.5-.9j, 0, 0], [-1.5+.9j, 0, 0]] operator.to_dense() ==> Complex self adjoint 9 x 9 matrix. ``` #### Example of defining in terms of a real convolution kernel, ```python # convolution_kernel is real ==> spectrum is Hermitian. convolution_kernel = [[1., 2., 1.], [5., -1., 1.]] spectrum = tf.signal.fft2d(tf.cast(convolution_kernel, tf.complex64)) # spectrum is shape [2, 3] ==> operator is shape [6, 6] # spectrum is Hermitian ==> operator is real. operator = LinearOperatorCirculant2D(spectrum, input_output_dtype=tf.float32) ``` #### Performance Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` is `O(R*N*Log[N])` * `operator.solve(x)` is `O(R*N*Log[N])` * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10362,LinearOperatorCirculant3D,tensorflow/tensorflow/python/ops/linalg/linear_operator_circulant.py,939,class,"`LinearOperator` acting like a nested block circulant matrix. This operator acts like a block circulant matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. #### Description in terms of block circulant matrices If `A` is nested block circulant, with block sizes `N0, N1, N2` (`N0 * N1 * N2 = N`): `A` has a block structure, composed of `N0 x N0` blocks, with each block an `N1 x N1` block circulant matrix. For example, with `W`, `X`, `Y`, `Z` each block circulant, ``` A = |W Z Y X| |X W Z Y| |Y X W Z| |Z Y X W| ``` Note that `A` itself will not in general be circulant. #### Description in terms of the frequency spectrum There is an equivalent description in terms of the [batch] spectrum `H` and Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch dimensions. If `H.shape = [N0, N1, N2]`, (`N0 * N1 * N2 = N`): Loosely speaking, matrix multiplication is equal to the action of a Fourier multiplier: `A u = IDFT3[ H DFT3[u] ]`. Precisely speaking, given `[N, R]` matrix `u`, let `DFT3[u]` be the `[N0, N1, N2, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, N2, R]` and taking a three dimensional DFT across the first three dimensions. Let `IDFT3` be the inverse of `DFT3`. Matrix multiplication may be expressed columnwise: ```(A u)_r = IDFT3[ H * (DFT3[u])_r ]``` #### Operator properties deduced from the spectrum. * This operator is positive definite if and only if `Real{H} > 0`. A general property of Fourier transforms is the correspondence between Hermitian functions and real valued transforms. Suppose `H.shape = [B1,...,Bb, N0, N1, N2]`, we say that `H` is a Hermitian spectrum if, with `%` meaning modulus division, ``` H[..., n0 % N0, n1 % N1, n2 % N2] = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1, (-n2) % N2] ]. ``` * This operator corresponds to a real matrix if and only if `H` is Hermitian. * This operator is self-adjoint if and only if `H` is real. See e.g. ""Discrete-Time Signal Processing"", Oppenheim and Schafer. ### Examples See `LinearOperatorCirculant` and `LinearOperatorCirculant2D` for examples. #### Performance Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` is `O(R*N*Log[N])` * `operator.solve(x)` is `O(R*N*Log[N])` * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10363,_to_complex,tensorflow/tensorflow/python/ops/linalg/linear_operator_circulant.py,1088,function, 10364,LinearOperatorComposition,tensorflow/tensorflow/python/ops/linalg/linear_operator_composition.py,34,class,"Composes one or more `LinearOperators`. This operator composes one or more linear operators `[op1,...,opJ]`, building a new `LinearOperator` with action defined by: ``` op_composed(x) := op1(op2(...(opJ(x)...)) ``` If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the [batch] matrix formed with the multiplication `A1 A2...AJ`. If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have `N_j = M_{j+1}`, in which case the composed operator has shape equal to `broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate batch shapes broadcast. Even if the composed shape is well defined, the composed operator's methods may fail due to lack of broadcasting ability in the defining operators' methods. ```python # Create a 2 x 2 linear operator composed of two 2 x 2 operators. operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) operator = LinearOperatorComposition([operator_1, operator_2]) operator.to_dense() ==> [[1., 2.] [3., 4.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 5 linear operators. matrix_45 = tf.random.normal(shape=[2, 3, 4, 5]) operator_45 = LinearOperatorFullMatrix(matrix) # Create a [2, 3] batch of 5 x 6 linear operators. matrix_56 = tf.random.normal(shape=[2, 3, 5, 6]) operator_56 = LinearOperatorFullMatrix(matrix_56) # Compose to create a [2, 3] batch of 4 x 6 operators. operator_46 = LinearOperatorComposition([operator_45, operator_56]) # Create a shape [2, 3, 6, 2] vector. x = tf.random.normal(shape=[2, 3, 6, 2]) operator.matmul(x) ==> Shape [2, 3, 4, 2] Tensor ``` #### Performance The performance of `LinearOperatorComposition` on any operation is equal to the sum of the individual operators' operations. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10365,LinearOperatorDiag,tensorflow/tensorflow/python/ops/linalg/linear_operator_diag.py,34,class,"`LinearOperator` acting like a [batch] square diagonal matrix. This operator acts like a [batch] diagonal matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorDiag` is initialized with a (batch) vector. ```python # Create a 2 x 2 diagonal linear operator. diag = [1., -1.] operator = LinearOperatorDiag(diag) operator.to_dense() ==> [[1., 0.] [0., -1.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 4 linear operators. diag = tf.random.normal(shape=[2, 3, 4]) operator = LinearOperatorDiag(diag) # Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible # since the batch dimensions, [2, 1], are broadcast to # operator.batch_shape = [2, 3]. y = tf.random.normal(shape=[2, 1, 4, 2]) x = operator.solve(y) ==> operator.matmul(x) = y ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Performance Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` involves `N * R` multiplications. * `operator.solve(x)` involves `N` divisions and `N * R` multiplications. * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10366,LinearOperatorFullMatrix,tensorflow/tensorflow/python/ops/linalg/linear_operator_full_matrix.py,33,class,"`LinearOperator` that wraps a [batch] matrix. This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `M x N` matrix. ```python # Create a 2 x 2 linear operator. matrix = [[1., 2.], [3., 4.]] operator = LinearOperatorFullMatrix(matrix) operator.to_dense() ==> [[1., 2.] [3., 4.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 4 linear operators. matrix = tf.random.normal(shape=[2, 3, 4, 4]) operator = LinearOperatorFullMatrix(matrix) ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [M, N], with b >= 0 x.shape = [B1,...,Bb] + [N, R], with R >= 0. ``` #### Performance `LinearOperatorFullMatrix` has exactly the same performance as would be achieved by using standard `TensorFlow` matrix ops. Intelligent choices are made based on the following initialization hints. * If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a Cholesky factorization is used for the determinant and solve. In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape `[M, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` is `O(M * N * R)`. * If `M=N`, `operator.solve(x)` is `O(N^3 * R)`. * If `M=N`, `operator.determinant()` is `O(N^3)`. If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10367,LinearOperatorHouseholder,tensorflow/tensorflow/python/ops/linalg/linear_operator_householder.py,35,class,"`LinearOperator` acting like a [batch] of Householder transformations. This operator acts like a [batch] of householder reflections with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorHouseholder` is initialized with a (batch) vector. A Householder reflection, defined via a vector `v`, which reflects points in `R^n` about the hyperplane orthogonal to `v` and through the origin. ```python # Create a 2 x 2 householder transform. vec = [1 / np.sqrt(2), 1. / np.sqrt(2)] operator = LinearOperatorHouseholder(vec) operator.to_dense() ==> [[0., -1.] [-1., -0.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10368,BaseLinearOperatorIdentity,tensorflow/tensorflow/python/ops/linalg/linear_operator_identity.py,42,class,Base class for Identity operators. 10369,LinearOperatorIdentity,tensorflow/tensorflow/python/ops/linalg/linear_operator_identity.py,102,class,"`LinearOperator` acting like a [batch] square identity matrix. This operator acts like a [batch] identity matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorIdentity` is initialized with `num_rows`, and optionally `batch_shape`, and `dtype` arguments. If `batch_shape` is `None`, this operator efficiently passes through all arguments. If `batch_shape` is provided, broadcasting may occur, which will require making copies. ```python # Create a 2 x 2 identity matrix. operator = LinearOperatorIdentity(num_rows=2, dtype=tf.float32) operator.to_dense() ==> [[1., 0.] [0., 1.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> 0. x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor, same as x. y = tf.random.normal(shape=[3, 2, 4]) # Note that y.shape is compatible with operator.shape because operator.shape # is broadcast to [3, 2, 2]. # This broadcast does NOT require copying data, since we can infer that y # will be passed through without changing shape. We are always able to infer # this if the operator has no batch_shape. x = operator.solve(y) ==> Shape [3, 2, 4] Tensor, same as y. # Create a 2-batch of 2x2 identity matrices operator = LinearOperatorIdentity(num_rows=2, batch_shape=[2]) operator.to_dense() ==> [[[1., 0.] [0., 1.]], [[1., 0.] [0., 1.]]] # Here, even though the operator has a batch shape, the input is the same as # the output, so x can be passed through without a copy. The operator is able # to detect that no broadcast is necessary because both x and the operator # have statically defined shape. x = ... Shape [2, 2, 3] operator.matmul(x) ==> Shape [2, 2, 3] Tensor, same as x # Here the operator and x have different batch_shape, and are broadcast. # This requires a copy, since the output is different size than the input. x = ... Shape [1, 2, 3] operator.matmul(x) ==> Shape [2, 2, 3] Tensor, equal to [x, x] ``` ### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` ### Performance If `batch_shape` initialization arg is `None`: * `operator.matmul(x)` is `O(1)` * `operator.solve(x)` is `O(1)` * `operator.determinant()` is `O(1)` If `batch_shape` initialization arg is provided, and static checks cannot rule out the need to broadcast: * `operator.matmul(x)` is `O(D1*...*Dd*N*R)` * `operator.solve(x)` is `O(D1*...*Dd*N*R)` * `operator.determinant()` is `O(B1*...*Bb)` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10370,LinearOperatorScaledIdentity,tensorflow/tensorflow/python/ops/linalg/linear_operator_identity.py,475,class,"`LinearOperator` acting like a scaled [batch] identity matrix `A = c I`. This operator acts like a scaled [batch] identity matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is a scaled version of the `N x N` identity matrix. `LinearOperatorIdentity` is initialized with `num_rows`, and a `multiplier` (a `Tensor`) of shape `[B1,...,Bb]`. `N` is set to `num_rows`, and the `multiplier` determines the scale for each batch member. ```python # Create a 2 x 2 scaled identity matrix. operator = LinearOperatorIdentity(num_rows=2, multiplier=3.) operator.to_dense() ==> [[3., 0.] [0., 3.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> 2 * Log[3] x = ... Shape [2, 4] Tensor operator.matmul(x) ==> 3 * x y = tf.random.normal(shape=[3, 2, 4]) # Note that y.shape is compatible with operator.shape because operator.shape # is broadcast to [3, 2, 2]. x = operator.solve(y) ==> 3 * x # Create a 2-batch of 2x2 identity matrices operator = LinearOperatorIdentity(num_rows=2, multiplier=5.) operator.to_dense() ==> [[[5., 0.] [0., 5.]], [[5., 0.] [0., 5.]]] x = ... Shape [2, 2, 3] operator.matmul(x) ==> 5 * x # Here the operator and x have different batch_shape, and are broadcast. x = ... Shape [1, 2, 3] operator.matmul(x) ==> 5 * x ``` ### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` ### Performance * `operator.matmul(x)` is `O(D1*...*Dd*N*R)` * `operator.solve(x)` is `O(D1*...*Dd*N*R)` * `operator.determinant()` is `O(D1*...*Dd)` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10371,LinearOperatorInversion,tensorflow/tensorflow/python/ops/linalg/linear_operator_inversion.py,30,class,"`LinearOperator` representing the inverse of another operator. This operator represents the inverse of another operator. ```python # Create a 2 x 2 linear operator. operator = LinearOperatorFullMatrix([[1., 0.], [0., 2.]]) operator_inv = LinearOperatorInversion(operator) operator_inv.to_dense() ==> [[1., 0.] [0., 0.5]] operator_inv.shape ==> [2, 2] operator_inv.log_abs_determinant() ==> - log(2) x = ... Shape [2, 4] Tensor operator_inv.matmul(x) ==> Shape [2, 4] Tensor, equal to operator.solve(x) ``` #### Performance The performance of `LinearOperatorInversion` depends on the underlying operators performance: `solve` and `matmul` are swapped, and determinant is inverted. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10372,_vec,tensorflow/tensorflow/python/ops/linalg/linear_operator_kronecker.py,36,function,Stacks column of matrix to form a single column. 10373,_unvec_by,tensorflow/tensorflow/python/ops/linalg/linear_operator_kronecker.py,44,function,"Unstack vector to form a matrix, with a specified amount of columns." 10374,_rotate_last_dim,tensorflow/tensorflow/python/ops/linalg/linear_operator_kronecker.py,53,function,Rotate the last dimension either left or right. 10375,LinearOperatorKronecker,tensorflow/tensorflow/python/ops/linalg/linear_operator_kronecker.py,66,class,"Kronecker product between two `LinearOperators`. This operator composes one or more linear operators `[op1,...,opJ]`, building a new `LinearOperator` representing the Kronecker product: `op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is associative). If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the composed operator will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`, where the product is over all operators. ```python # Create a 4 x 4 linear operator composed of two 2 x 2 operators. operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]]) operator = LinearOperatorKronecker([operator_1, operator_2]) operator.to_dense() ==> [[1., 0., 2., 0.], [2., 1., 4., 2.], [3., 0., 4., 0.], [6., 3., 8., 4.]] operator.shape ==> [4, 4] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [4, 2] Tensor operator.matmul(x) ==> Shape [4, 2] Tensor # Create a [2, 3] batch of 4 x 5 linear operators. matrix_45 = tf.random.normal(shape=[2, 3, 4, 5]) operator_45 = LinearOperatorFullMatrix(matrix) # Create a [2, 3] batch of 5 x 6 linear operators. matrix_56 = tf.random.normal(shape=[2, 3, 5, 6]) operator_56 = LinearOperatorFullMatrix(matrix_56) # Compose to create a [2, 3] batch of 20 x 30 operators. operator_large = LinearOperatorKronecker([operator_45, operator_56]) # Create a shape [2, 3, 20, 2] vector. x = tf.random.normal(shape=[2, 3, 6, 2]) operator_large.matmul(x) ==> Shape [2, 3, 30, 2] Tensor ``` #### Performance The performance of `LinearOperatorKronecker` on any operation is equal to the sum of the individual operators' operations. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10376,LinearOperatorLowRankUpdate,tensorflow/tensorflow/python/ops/linalg/linear_operator_low_rank_update.py,39,class,"Perturb a `LinearOperator` with a rank `K` update. This operator acts like a [batch] matrix `A` with shape `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `M x N` matrix. `LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where ``` L, is a LinearOperator representing [batch] M x N matrices U, is a [batch] M x K matrix. Typically K << M. D, is a [batch] K x K matrix. V, is a [batch] N x K matrix. Typically K << N. V^H is the Hermitian transpose (adjoint) of V. ``` If `M = N`, determinants and solves are done using the matrix determinant lemma and Woodbury identities, and thus require L and D to be non-singular. Solves and determinants will be attempted unless the ""is_non_singular"" property of L and D is False. In the event that L and D are positive-definite, and U = V, solves and determinants can be done using a Cholesky factorization. ```python # Create a 3 x 3 diagonal linear operator. diag_operator = LinearOperatorDiag( diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True, is_positive_definite=True) # Perturb with a rank 2 perturbation operator = LinearOperatorLowRankUpdate( operator=diag_operator, u=[[1., 2.], [-1., 3.], [0., 0.]], diag_update=[11., 12.], v=[[1., 2.], [-1., 3.], [10., 10.]]) operator.shape ==> [3, 3] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [3, 4] Tensor operator.matmul(x) ==> Shape [3, 4] Tensor ``` ### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [M, N], with b >= 0 x.shape = [B1,...,Bb] + [N, R], with R >= 0. ``` ### Performance Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`, made from a rank `K` update of `base_operator` which performs `.matmul(x)` on `x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly for `solve`, `determinant`. Then, if `x.shape = [N, R]`, * `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)` and if `M = N`, * `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)` * `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)` If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular`, `self_adjoint`, `positive_definite`, `diag_update_positive` and `square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10377,LinearOperatorLowerTriangular,tensorflow/tensorflow/python/ops/linalg/linear_operator_lower_triangular.py,35,class,"`LinearOperator` acting like a [batch] square lower triangular matrix. This operator acts like a [batch] lower triangular matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. `LinearOperatorLowerTriangular` is initialized with a `Tensor` having dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two dimensions is ignored. ```python # Create a 2 x 2 lower-triangular linear operator. tril = [[1., 2.], [3., 4.]] operator = LinearOperatorLowerTriangular(tril) # The upper triangle is ignored. operator.to_dense() ==> [[1., 0.] [3., 4.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 4 linear operators. tril = tf.random.normal(shape=[2, 3, 4, 4]) operator = LinearOperatorLowerTriangular(tril) ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [B1,...,Bb] + [N, R], with R >= 0. ``` #### Performance Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` involves `N^2 * R` multiplications. * `operator.solve(x)` involves `N * R` size `N` back-substitutions. * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10378,LinearOperatorPermutation,tensorflow/tensorflow/python/ops/linalg/linear_operator_permutation.py,39,class,"`LinearOperator` acting like a [batch] of permutation matrices. This operator acts like a [batch] of permutations with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorPermutation` is initialized with a (batch) vector. A permutation, is defined by an integer vector `v` whose values are unique and are in the range `[0, ... n]`. Applying the permutation on an input matrix has the folllowing meaning: the value of `v` at index `i` says to move the `v[i]`-th row of the input matrix to the `i`-th row. Because all values are unique, this will result in a permutation of the rows the input matrix. Note, that the permutation vector `v` has the same semantics as `tf.transpose`. ```python # Create a 3 x 3 permutation matrix that swaps the last two columns. vec = [0, 2, 1] operator = LinearOperatorPermutation(vec) operator.to_dense() ==> [[1., 0., 0.] [0., 0., 1.] [0., 1., 0.]] operator.shape ==> [3, 3] # This will be zero. operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [3, 4] Tensor operator.matmul(x) ==> Shape [3, 4] Tensor ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10379,OperatorShapesInfo,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,45,class,"Object encoding expected shape for a test. Encodes the expected shape of a matrix for a test. Also allows additional metadata for the test harness." 10380,CheckTapeSafeSkipOptions,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,57,class, 10381,LinearOperatorDerivedClassTest,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,67,class,"Tests for derived classes. Subclasses should implement every abstractmethod, and this will enable all test methods to work." 10382,_test_to_dense,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,275,function, 10383,_test_det,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,289,function, 10384,_test_log_abs_det,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,304,function, 10385,_test_matmul_base,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,321,function, 10386,_test_matmul,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,386,function, 10387,_test_matmul_with_broadcast,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,406,function, 10388,_test_adjoint,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,426,function, 10389,_test_cholesky,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,442,function, 10390,_test_eigvalsh,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,461,function, 10391,_test_cond,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,491,function, 10392,_test_solve_base,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,537,function, 10393,_test_solve,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,603,function, 10394,_test_solve_with_broadcast,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,618,function, 10395,_test_inverse,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,633,function, 10396,_test_trace,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,645,function, 10397,_test_add_to_tensor,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,660,function, 10398,_test_diag_part,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,677,function, 10399,add_tests,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,699,function,Add tests for LinearOperator methods. 10400,SquareLinearOperatorDerivedClassTest,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,763,class,"Base test class appropriate for square operators. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here." 10401,NonSquareLinearOperatorDerivedClassTest,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,819,class,"Base test class appropriate for generic rectangular operators. Square shapes are never tested by this class, so if you want to test your operator with a square shape, create two test classes, the other subclassing SquareLinearOperatorFullMatrixTest. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here." 10402,random_positive_definite_matrix,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,895,function,"[batch] positive definite Wisart matrix. A Wishart(N, S) matrix is the S sample covariance matrix of an N-variate (standard) Normal random variable. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. oversampling_ratio: S / N in the above. If S < N, the matrix will be singular (unless `force_well_conditioned is True`). force_well_conditioned: Python bool. If `True`, add `1` to the diagonal of the Wishart matrix, then divide by 2, ensuring most eigenvalues are close to 1. Returns: `Tensor` with desired shape and dtype." 10403,random_tril_matrix,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,938,function,"[batch] lower triangular matrix. Args: shape: `TensorShape` or Python `list`. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype force_well_conditioned: Python `bool`. If `True`, returned matrix will have eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit normal random variables. remove_upper: Python `bool`. If `True`, zero out the strictly upper triangle. If `False`, the lower triangle of returned matrix will have desired properties, but will not have the strictly upper triangle zero'd out. Returns: `Tensor` with desired shape and dtype." 10404,random_normal,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,974,function,"Tensor with (possibly complex) Gaussian entries. Samples are distributed like ``` N(mean, stddev^2), if dtype is real, X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype." 10405,random_uniform,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,1008,function,"Tensor with (possibly complex) Uniform entries. Samples are distributed like ``` Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype." 10406,random_sign_uniform,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,1050,function,"Tensor with (possibly complex) random entries from a ""sign Uniform"". Letting `Z` be a random variable equal to `-1` and `1` with equal probability, Samples from this `Op` are distributed like ``` Z * X, where X ~ Uniform[minval, maxval], if dtype is real, Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype." 10407,random_normal_correlated_columns,tensorflow/tensorflow/python/ops/linalg/linear_operator_test_util.py,1087,function,"Batch matrix with (possibly complex) Gaussian entries and correlated cols. Returns random batch matrix `A` with specified element-wise `mean`, `stddev`, living close to an embedded hyperplane. Suppose `shape[-2:] = (M, N)`. If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries. If `M >= N`, then the columns of `A` will be made almost dependent as follows: ``` L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1) B = random normal M x N-1 matrix, mean = 0, stddev = stddev. G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane E = a random normal M x N matrix, mean = 0, stddev = eps mu = a constant M x N matrix, equal to the argument ""mean"" A = G + E + mu ``` Args: shape: Python list of integers. Shape of the returned tensor. Must be at least length two. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype eps: Distance each column is perturbed from the low-dimensional subspace. seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. Raises: ValueError: If `shape` is not at least length 2." 10408,LinearOperatorToeplitz,tensorflow/tensorflow/python/ops/linalg/linear_operator_toeplitz.py,37,class,"`LinearOperator` acting like a [batch] of toeplitz matrices. This operator acts like a [batch] Toeplitz matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. #### Description in terms of toeplitz matrices Toeplitz means that `A` has constant diagonals. Hence, `A` can be generated with two vectors. One represents the first column of the matrix, and the other represents the first row. Below is a 4 x 4 example: ``` A = |a b c d| |e a b c| |f e a b| |g f e a| ``` #### Example of a Toeplitz operator. ```python # Create a 3 x 3 Toeplitz operator. col = [1., 2., 3.] row = [1., 4., -9.] operator = LinearOperatorToeplitz(col, row) operator.to_dense() ==> [[1., 4., -9.], [2., 1., 4.], [3., 2., 1.]] operator.shape ==> [3, 3] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [3, 4] Tensor operator.matmul(x) ==> Shape [3, 4] Tensor ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10409,_to_complex,tensorflow/tensorflow/python/ops/linalg/linear_operator_toeplitz.py,274,function, 10410,LinearOperatorTridiag,tensorflow/tensorflow/python/ops/linalg/linear_operator_tridiag.py,42,class,"`LinearOperator` acting like a [batch] square tridiagonal matrix. This operator acts like a [batch] square tridiagonal matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x M` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. Example usage: Create a 3 x 3 tridiagonal linear operator. >>> superdiag = [3., 4., 5.] >>> diag = [1., -1., 2.] >>> subdiag = [6., 7., 8] >>> operator = tf.linalg.LinearOperatorTridiag( ... [superdiag, diag, subdiag], ... diagonals_format='sequence') >>> operator.to_dense() >>> operator.shape TensorShape([3, 3]) Scalar Tensor output. >>> operator.log_abs_determinant() Create a [2, 3] batch of 4 x 4 linear operators. >>> diagonals = tf.random.normal(shape=[2, 3, 3, 4]) >>> operator = tf.linalg.LinearOperatorTridiag( ... diagonals, ... diagonals_format='compact') Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible since the batch dimensions, [2, 1], are broadcast to operator.batch_shape = [2, 3]. >>> y = tf.random.normal(shape=[2, 1, 4, 2]) >>> x = operator.solve(y) >>> x #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb]. ``` #### Performance Suppose `operator` is a `LinearOperatorTridiag` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` will take O(N * R) time. * `operator.solve(x)` will take O(N * R) time. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10411,convert_nonref_to_tensor,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,40,function,"Converts the given `value` to a `Tensor` if input is nonreference type. This function converts Python objects of various types to `Tensor` objects except if the input has nonreference semantics. Reference semantics are characterized by `is_ref` and is any object which is a `tf.Variable` or instance of `tf.Module`. This function accepts any input which `tf.convert_to_tensor` would also. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. dtype_hint: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. name: Optional name to use if a new `Tensor` is created. Returns: tensor: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode. #### Examples: ```python x = tf.Variable(0.) y = convert_nonref_to_tensor(x) x is y # ==> True x = tf.constant(0.) y = convert_nonref_to_tensor(x) x is y # ==> True x = np.array(0.) y = convert_nonref_to_tensor(x) x is y # ==> False tf.is_tensor(y) # ==> True x = tfp.util.DeferredTensor(13.37, lambda x: x) y = convert_nonref_to_tensor(x) x is y # ==> True tf.is_tensor(y) # ==> False tf.equal(y, 13.37) # ==> True ```" 10412,base_dtype,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,121,function,Returns a non-reference `dtype` based on this `dtype`. 10413,dtype_name,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,129,function,Returns the string name for this `dtype`. 10414,check_dtype,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,139,function,Check that arg.dtype == self.dtype. 10415,is_ref,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,147,function,"Evaluates if the object has reference semantics. An object is deemed ""reference"" if it is a `tf.Variable` instance or is derived from a `tf.Module` with `dtype` and `shape` properties. Args: x: Any object. Returns: is_ref: Python `bool` indicating input is has nonreference semantics, i.e., is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties." 10416,assert_not_ref_type,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,168,function, 10417,assert_no_entries_with_modulus_zero,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,179,function,"Returns `Op` that asserts Tensor `x` has no entries with modulus zero. Args: x: Numeric `Tensor`, real, integer, or complex. message: A string message to prepend to failure message. name: A name to give this `Op`. Returns: An `Op` that asserts `x` has no entries with modulus zero." 10418,assert_zero_imag_part,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,199,function,"Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts. Args: x: Numeric `Tensor`, real, integer, or complex. message: A string message to prepend to failure message. name: A name to give this `Op`. Returns: An `Op` that asserts `x` has no entries with modulus zero." 10419,assert_compatible_matrix_dimensions,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,221,function,"Assert that an argument to solve/matmul has proper domain dimension. If `operator.shape[-2:] = [M, N]`, and `x.shape[-2:] = [Q, R]`, then `operator.matmul(x)` is defined only if `N = Q`. This `Op` returns an `Assert` that ""fires"" if this is not the case. Static checks are already done by the base class `LinearOperator`. Args: operator: `LinearOperator`. x: `Tensor`. Returns: `Assert` `Op`." 10420,assert_is_batch_matrix,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,248,function,Static assert that `tensor` has rank `2` or higher. 10421,shape_tensor,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,257,function,"Convert Tensor using default type, unless empty list or tuple." 10422,broadcast_matrix_batch_dims,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,273,function,"Broadcast leading dimensions of zero or more [batch] matrices. Example broadcasting one batch dim of two simple matrices. ```python x = [[1, 2], [3, 4]] # Shape [2, 2], no batch dims y = [[[1]]] # Shape [1, 1, 1], 1 batch dim of shape [1] x_bc, y_bc = broadcast_matrix_batch_dims([x, y]) x_bc ==> [[[1, 2], [3, 4]]] # Shape [1, 2, 2], 1 batch dim of shape [1]. y_bc ==> same as y ``` Example broadcasting many batch dims ```python x = tf.random.normal(shape=(2, 3, 1, 4, 4)) y = tf.random.normal(shape=(1, 3, 2, 5, 5)) x_bc, y_bc = broadcast_matrix_batch_dims([x, y]) x_bc.shape ==> (2, 3, 2, 4, 4) y_bc.shape ==> (2, 3, 2, 5, 5) ``` Args: batch_matrices: Iterable of `Tensor`s, each having two or more dimensions. name: A string name to prepend to created ops. Returns: bcast_matrices: List of `Tensor`s, with `bcast_matrices[i]` containing the values from `batch_matrices[i]`, with possibly broadcast batch dims. Raises: ValueError: If any input `Tensor` is statically determined to have less than two dimensions." 10423,matrix_solve_with_broadcast,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,366,function,Solve systems of linear equations. 10424,_reshape_for_efficiency,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,385,function,"Maybe reshape a, b, and return an inverse map. For matmul/solve." 10425,use_operator_or_provided_hint_unless_contradicting,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,484,function,"Get combined hint in the case where operator.hint should equal hint. Args: operator: LinearOperator that a meta-operator was initialized with. hint_attr_name: String name for the attribute. provided_hint_value: Bool or None. Value passed by user in initialization. message: Error message to print if hints contradict. Returns: True, False, or None. Raises: ValueError: If hints contradict." 10426,arg_is_blockwise,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,519,function,Detect if input should be interpreted as a list of blocks. 10427,split_arg_into_blocks,tensorflow/tensorflow/python/ops/linalg/linear_operator_util.py,566,function,"Split `x` into blocks matching `operators`'s `domain_dimension`. Specifically, if we have a blockwise lower-triangular matrix, with block sizes along the diagonal `[M_j, M_j] j = 0,1,2..J`, this method splits `arg` on `axis` into `J` tensors, whose shape at `axis` is `M_j`. Args: block_dims: Iterable of `TensorShapes`. block_dims_fn: Callable returning an iterable of `Tensor`s. arg: `Tensor`. `arg` is split into `J` tensors. axis: Python `Integer` representing the axis to split `arg` on. Returns: A list of `Tensor`s." 10428,LinearOperatorZeros,tensorflow/tensorflow/python/ops/linalg/linear_operator_zeros.py,43,class,"`LinearOperator` acting like a [batch] zero matrix. This operator acts like a [batch] zero matrix `A` with shape `[B1,...,Bb, N, M]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x M` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorZeros` is initialized with `num_rows`, and optionally `num_columns, `batch_shape`, and `dtype` arguments. If `num_columns` is `None`, then this operator will be initialized as a square matrix. If `batch_shape` is `None`, this operator efficiently passes through all arguments. If `batch_shape` is provided, broadcasting may occur, which will require making copies. ```python # Create a 2 x 2 zero matrix. operator = LinearOperatorZero(num_rows=2, dtype=tf.float32) operator.to_dense() ==> [[0., 0.] [0., 0.]] operator.shape ==> [2, 2] operator.determinant() ==> 0. x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor, same as x. # Create a 2-batch of 2x2 zero matrices operator = LinearOperatorZeros(num_rows=2, batch_shape=[2]) operator.to_dense() ==> [[[0., 0.] [0., 0.]], [[0., 0.] [0., 0.]]] # Here, even though the operator has a batch shape, the input is the same as # the output, so x can be passed through without a copy. The operator is able # to detect that no broadcast is necessary because both x and the operator # have statically defined shape. x = ... Shape [2, 2, 3] operator.matmul(x) ==> Shape [2, 2, 3] Tensor, same as tf.zeros_like(x) # Here the operator and x have different batch_shape, and are broadcast. # This requires a copy, since the output is different size than the input. x = ... Shape [1, 2, 3] operator.matmul(x) ==> Shape [2, 2, 3] Tensor, equal to tf.zeros_like([x, x]) ``` ### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, M], with b >= 0 x.shape = [C1,...,Cc] + [M, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way." 10429,_matmul_linear_operator,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,35,function,Generic matmul of two `LinearOperator`s. 10430,_matmul_linear_operator_identity_left,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,64,function, 10431,_matmul_linear_operator_identity_right,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,72,function, 10432,_matmul_linear_operator_scaled_identity,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,80,function,Matmul of two ScaledIdentity `LinearOperators`. 10433,_matmul_linear_operator_zeros_right,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,101,function, 10434,_matmul_linear_operator_zeros_left,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,111,function, 10435,_matmul_linear_operator_diag,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,124,function, 10436,_matmul_linear_operator_diag_scaled_identity_right,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,140,function, 10437,_matmul_linear_operator_diag_scaled_identity_left,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,157,function, 10438,_matmul_linear_operator_diag_tril,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,174,function, 10439,_matmul_linear_operator_tril_diag,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,190,function, 10440,_matmul_linear_operator_circulant_circulant,tensorflow/tensorflow/python/ops/linalg/matmul_registrations.py,208,function, 10441,combined_commuting_self_adjoint_hint,tensorflow/tensorflow/python/ops/linalg/registrations_util.py,23,function,Get combined hint for self-adjoint-ness. 10442,is_square,tensorflow/tensorflow/python/ops/linalg/registrations_util.py,47,function,Return a hint to whether the composition is square. 10443,combined_commuting_positive_definite_hint,tensorflow/tensorflow/python/ops/linalg/registrations_util.py,68,function,Get combined PD hint for compositions. 10444,combined_non_singular_hint,tensorflow/tensorflow/python/ops/linalg/registrations_util.py,81,function,Get combined hint for when . 10445,_solve_linear_operator,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,35,function,Generic solve of two `LinearOperator`s. 10446,_solve_inverse_linear_operator,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,65,function,Solve inverse of generic `LinearOperator`s. 10447,_solve_linear_operator_identity_left,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,74,function, 10448,_solve_linear_operator_identity_right,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,82,function, 10449,_solve_linear_operator_scaled_identity,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,90,function,Solve of two ScaledIdentity `LinearOperators`. 10450,_solve_linear_operator_diag,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,111,function, 10451,_solve_linear_operator_diag_scaled_identity_right,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,127,function, 10452,_solve_linear_operator_diag_scaled_identity_left,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,144,function, 10453,_solve_linear_operator_diag_tril,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,161,function, 10454,_solve_linear_operator_circulant_circulant,tensorflow/tensorflow/python/ops/linalg/solve_registrations.py,180,function, 10455,conjugate_gradient,tensorflow/tensorflow/python/ops/linalg/sparse/conjugate_gradient.py,36,function,"Conjugate gradient solver. Solves a linear system of equations `A*x = rhs` for self-adjoint, positive definite matrix `A` and right-hand side vector `rhs`, using an iterative, matrix-free algorithm where the action of the matrix A is represented by `operator`. The iteration terminates when either the number of iterations exceeds `max_iter` or when the residual norm has been reduced to `tol` times its initial value, i.e. \\(||rhs - A x_k|| <= tol ||rhs||\\). Args: operator: A `LinearOperator` that is self-adjoint and positive definite. rhs: A possibly batched vector of shape `[..., N]` containing the right-hand size vector. preconditioner: A `LinearOperator` that approximates the inverse of `A`. An efficient preconditioner could dramatically improve the rate of convergence. If `preconditioner` represents matrix `M`(`M` approximates `A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate `A^{-1}x`. For this to be useful, the cost of applying `M` should be much lower than computing `A^{-1}` directly. x: A possibly batched vector of shape `[..., N]` containing the initial guess for the solution. tol: A float scalar convergence tolerance. max_iter: An integer giving the maximum number of iterations. name: A name scope for the operation. Returns: output: A namedtuple representing the final state with fields: - i: A scalar `int32` `Tensor`. Number of iterations executed. - x: A rank-1 `Tensor` of shape `[..., N]` containing the computed solution. - r: A rank-1 `Tensor` of shape `[.., M]` containing the residual vector. - p: A rank-1 `Tensor` of shape `[..., N]`. `A`-conjugate basis vector. - gamma: \\(r \dot M \dot r\\), equivalent to \\(||r||_2^2\\) when `preconditioner=None`." 10456,_DenseToCSRSparseMatrixGrad,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py,28,function,Gradient for dense_to_csr_sparse_matrix op. 10457,_CSRSparseMatrixToDenseGrad,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py,38,function,Gradient for csr_sparse_matrix_to_dense op. 10458,_SparseMatrixAddGrad,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py,51,function,Gradient for sparse_matrix_add op. 10459,_SparseMatrixTransposeGrad,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py,71,function,Gradient for sparse_matrix_transpose op. 10460,_SparseMatrixSoftmaxGrad,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py,78,function,Gradient for sparse_matrix_softmax op. 10461,_SparseMatrixMatMulGrad,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py,86,function,Gradient for sparse_matrix_mat_mul op. 10462,_SparseMatrixSparseMatMulGrad,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py,173,function,Gradient for sparse_matrix_sparse_mat_mul op. 10463,_SparseMatrixMulGrad,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_grad.py,226,function,Gradient for sparse_matrix_mul op. 10464,DenseShapeAndType,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,50,class, 10465,_get_handle_data,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,55,function, 10466,_create_handle_data_proto,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,59,function,Create handle data based on shape and dtype protos. 10467,_make_handle_data,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,72,function,Create handle data based on tensor shape and dtype. 10468,get_shape_and_type,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,78,function,Return matrix's shape and type if available. 10469,dense_shape_and_type,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,90,function,"Get dense shape and dtype of the tf.Tensor containing the matrix. Args: matrix: A `tf.Tensor` of type `tf.variant` storing a sparse matrix. Returns: An instance of `ShapeAndType` with properties `shape` (a `tf.TensorShape`) and `dtype` (a `tf.DType`). Raises: TypeError: if `matrix` is not a tensor or its dtype is not variant. ValueError: if `matrix` lacks static handle data containing the dense shape and dtype." 10470,matmul_shape_inference,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,121,function,Helper function for matmul to set the result matrix's handle data. 10471,matmul,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,147,function,"Perform a sparse matrix matmul between `a` and `b`. Performs a contraction between `a` and `b` along the two innermost dimensions. If both `a` and `b` are instances of `SparseMatrix`, returns a new instance of `SparseMatrix` (same type as `a`). If one is not an instance of `SparseMatrix`, returns a dense `Tensor`: ``` c = opA(a) . opB(b) ``` where `opA` (resp. `opB`) is the transpose or hermitian transpose depending on the values of `transpose_a` (resp. `transpose_b`) and `adjoint_a` (resp. `adjoint_b`). Args: a: `Tensor` or `SparseMatrix`, having rank `2` or `3`. b: `Tensor` or `SparseMatrix`, having rank `2` or `3`. transpose_a: Python `bool`. transpose_b: Python `bool`. adjoint_a: Python `bool`. adjoint_b: Python `bool`. name: Optional name to use when creating ops. Returns: A `SparseMatrix` if both `a` and `b` are instances of `SparseMatrix`, otherwise a dense `Tensor`." 10472,SparseMatrix,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,248,class,Abstract class for sparse matrix types. 10473,CSRSparseMatrix,tensorflow/tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py,315,class,(Optionally batched) CSR Sparse Matrix. 10474,ReductionV2,tensorflow/tensorflow/python/ops/losses/loss_reduction.py,21,class,"Types of loss reduction. Contains the following values: * `AUTO`: Indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, we expect reduction value to be `SUM` or `NONE`. Using `AUTO` in that case will raise an error. * `NONE`: Weighted losses with one dimension reduced (axis=-1, or axis specified by loss function). When this reduction type used with built-in Keras training loops like `fit`/`evaluate`, the unreduced vector loss is passed to the optimizer but the reported loss will be a scalar value. * `SUM`: Scalar sum of weighted losses. * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses. This reduction type is not supported when used with `tf.distribute.Strategy` outside of built-in training loops like `tf.keras` `compile`/`fit`. You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: ``` with strategy.scope(): loss_obj = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) .... loss = tf.reduce_sum(loss_object(labels, predictions)) * (1. / global_batch_size) ``` Please see the [custom training guide](https://www.tensorflow.org/tutorials/distribute/custom_training) # pylint: disable=line-too-long for more details on this." 10475,Reduction,tensorflow/tensorflow/python/ops/losses/losses_impl.py,39,class,"Types of loss reduction. Contains the following values: * `NONE`: Un-reduced weighted losses with the same shape as input. * `SUM`: Scalar sum of weighted losses. * `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED. * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses. * `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero weights. DEPRECATED. * `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`. DEPRECATED." 10476,_safe_mean,tensorflow/tensorflow/python/ops/losses/losses_impl.py,76,function,"Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. num_present: The number of measurable elements in `losses`. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned." 10477,_num_present,tensorflow/tensorflow/python/ops/losses/losses_impl.py,91,function,"Computes the number of elements in the loss function induced by `weights`. A given weights tensor induces different numbers of usable elements in the `losses` tensor. The `weights` tensor is broadcast across `losses` for all possible dimensions. For example, if `losses` is a tensor of dimension `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is, in effect, tiled to match the shape of `losses`. Following this effective tile, the total number of present elements is the number of non-zero weights. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. weights: `Tensor` of shape `[]`, `[batch_size]` or `[batch_size, d1, ... dK]`, where K < N. per_batch: Whether to return the number of elements per batch or as a sum total. Returns: The number of present (non-zero) elements in the losses tensor. If `per_batch` is `True`, the value is returned as a tensor of size `[batch_size]`. Otherwise, a single scalar tensor is returned." 10478,_num_elements,tensorflow/tensorflow/python/ops/losses/losses_impl.py,133,function,Computes the number of elements in `losses` tensor. 10479,compute_weighted_loss,tensorflow/tensorflow/python/ops/losses/losses_impl.py,141,function,"Computes the weighted loss. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `losses`, and must be broadcastable to `losses` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: the scope for the operations performed in computing the loss. loss_collection: the loss will be added to these collections. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `losses`. If `reduction` is `NONE`, this has the same shape as `losses`; otherwise, it is scalar. Raises: ValueError: If `weights` is `None` or the shape is not compatible with `losses`, or if the number of dimensions (rank) of either `losses` or `weights` is missing. Note: When calculating the gradient of a weighted loss contributions from both `losses` and `weights` are considered. If your `weights` depend on some model parameters but you do not want this to affect the loss gradient, you need to apply `tf.stop_gradient` to `weights` before passing them to `compute_weighted_loss`. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10480,absolute_difference,tensorflow/tensorflow/python/ops/losses/losses_impl.py,210,function,"Adds an Absolute Difference loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a `Tensor` of shape `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid or if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10481,cosine_distance,tensorflow/tensorflow/python/ops/losses/losses_impl.py,265,function,"Adds a cosine-distance loss to the training procedure. Note that the function assumes that `predictions` and `labels` are already unit-normalized. Args: labels: `Tensor` whose shape matches 'predictions' predictions: An arbitrary matrix. axis: The dimension along which the cosine distance is computed. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. dim: The old (deprecated) name for `axis`. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If `predictions` shape doesn't match `labels` shape, or `axis`, `labels`, `predictions` or `weights` is `None`. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10482,hinge_loss,tensorflow/tensorflow/python/ops/losses/losses_impl.py,321,function,"Adds a hinge loss to the training procedure. Args: labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. Internally the {0,1} labels are converted to {-1,1} when calculating the hinge loss. logits: The logits, a float tensor. Note that logits are assumed to be unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive (resp. negative) binary prediction. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shapes of `logits` and `labels` don't match or if `labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10483,huber_loss,tensorflow/tensorflow/python/ops/losses/losses_impl.py,372,function,"Adds a [Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) term to the training procedure. For each value x in `error=labels-predictions`, the following is calculated: ``` 0.5 * x^2 if |x| <= d 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). delta: `float`, the point where the huber loss function changes from a quadratic to linear. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10484,log_loss,tensorflow/tensorflow/python/ops/losses/losses_impl.py,449,function,"Adds a Log Loss term to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10485,mean_pairwise_squared_error,tensorflow/tensorflow/python/ops/losses/losses_impl.py,507,function,"Adds a pairwise-errors-squared loss to the training procedure. Unlike `mean_squared_error`, which is a measure of the differences between corresponding elements of `predictions` and `labels`, `mean_pairwise_squared_error` is a measure of the differences between pairs of corresponding elements of `predictions` and `labels`. For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are three pairs of differences are summed to compute the loss: loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3 Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the corresponding pairs are computed within each batch sample but not across samples within a batch. For example, if `predictions` represents a batch of 16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs is drawn from each image, but not across images. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. Args: labels: The ground truth output tensor, whose shape must match the shape of `predictions`. predictions: The predicted outputs, a tensor of size `[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in `predictions`. weights: Coefficients for the loss a scalar, a tensor of shape `[batch_size]` or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. Returns: A scalar `Tensor` that returns the weighted loss. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10486,mean_squared_error,tensorflow/tensorflow/python/ops/losses/losses_impl.py,604,function,"Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10487,sigmoid_cross_entropy,tensorflow/tensorflow/python/ops/losses/losses_impl.py,658,function,"Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. If `label_smoothing` is nonzero, smooth the labels towards 1/2: new_multiclass_labels = multiclass_labels * (1 - label_smoothing) + 0.5 * label_smoothing Args: multi_class_labels: `[batch_size, num_classes]` target integer labels in `{0, 1}`. logits: Float `[batch_size, num_classes]` logits outputs of the network. weights: Optional `Tensor` whose rank is either 0, or the same rank as `multi_class_labels`, and must be broadcastable to `multi_class_labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). label_smoothing: If greater than `0` then smooth the labels. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has the same shape as `logits`; otherwise, it is scalar. Raises: ValueError: If the shape of `logits` doesn't match that of `multi_class_labels` or if the shape of `weights` is invalid, or if `weights` is None. Also if `multi_class_labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10488,softmax_cross_entropy,tensorflow/tensorflow/python/ops/losses/losses_impl.py,724,function,"Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes: new_onehot_labels = onehot_labels * (1 - label_smoothing) + label_smoothing / num_classes Note that `onehot_labels` and `logits` must have the same shape, e.g. `[batch_size, num_classes]`. The shape of `weights` must be broadcastable to loss, whose shape is decided by the shape of `logits`. In case the shape of `logits` is `[batch_size, num_classes]`, loss is a `Tensor` of shape `[batch_size]`. Args: onehot_labels: One-hot-encoded labels. logits: Logits outputs of the network. weights: Optional `Tensor` that is broadcastable to loss. label_smoothing: If greater than 0 then smooth the labels. scope: the scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has shape `[batch_size]`; otherwise, it is scalar. Raises: ValueError: If the shape of `logits` doesn't match that of `onehot_labels` or if the shape of `weights` is invalid or if `weights` is None. Also if `onehot_labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10489,_remove_squeezable_dimensions,tensorflow/tensorflow/python/ops/losses/losses_impl.py,795,function,"Internal version of _remove_squeezable_dimensions which handles weights. Squeezes `predictions` and `labels` if their ranks differ from expected by exactly 1. Squeezes `weights` if its rank is 1 more than the new rank of `predictions` This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: labels: Label values, a `Tensor` whose dimensions match `predictions`. predictions: Predicted values, a `Tensor` of arbitrary dimensions. weights: Optional weight `Tensor`. It will be squeezed if it's not scalar, and its rank is 1 more than the new rank of `labels`. expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`. Returns: Tuple of `predictions`, `labels` and `weights`, possibly with the last dimension squeezed." 10490,sparse_softmax_cross_entropy,tensorflow/tensorflow/python/ops/losses/losses_impl.py,847,function,"Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. Args: labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or `float64`. weights: Coefficients for the loss. This must be scalar or broadcastable to `labels` (i.e. same rank and each dimension is either 1 or the same). scope: the scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shapes of `logits`, `labels`, and `weights` are incompatible, or if any of them are None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility" 10491,squeeze_or_expand_dimensions,tensorflow/tensorflow/python/ops/losses/util.py,34,function,"Squeeze or expand last dimension if needed. 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1 (using `confusion_matrix.remove_squeezable_dimensions`). 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1 from the new rank of `y_pred`. If `sample_weight` is scalar, it is kept scalar. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: y_pred: Predicted values, a `Tensor` of arbitrary dimensions. y_true: Optional label `Tensor` whose dimensions match `y_pred`. sample_weight: Optional weight scalar or `Tensor` whose dimensions match `y_pred`. Returns: Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has the last dimension squeezed, `sample_weight` could be extended by one dimension. If `sample_weight` is None, (y_pred, y_true) is returned." 10492,scale_losses_by_sample_weight,tensorflow/tensorflow/python/ops/losses/util.py,123,function,"Scales loss values by the given sample weights. `sample_weight` dimensions are updated to match with the dimension of `losses` if possible by using squeeze/expand/broadcast. Args: losses: Loss tensor. sample_weight: Sample weights tensor. Returns: `losses` scaled by `sample_weight` with dtype float32." 10493,check_per_example_loss_rank,tensorflow/tensorflow/python/ops/losses/util.py,148,function,"Context manager that checks that the rank of per_example_loss is atleast 1. Args: per_example_loss: Per example loss tensor. Yields: A context manager." 10494,add_loss,tensorflow/tensorflow/python/ops/losses/util.py,178,function,"Adds a externally defined loss to the collection of losses. Args: loss: A loss `Tensor`. loss_collection: Optional collection to add the loss to." 10495,get_losses,tensorflow/tensorflow/python/ops/losses/util.py,193,function,"Gets the list of losses from the loss_collection. Args: scope: An optional scope name for filtering the losses to return. loss_collection: Optional losses collection. Returns: a list of loss tensors." 10496,get_regularization_losses,tensorflow/tensorflow/python/ops/losses/util.py,207,function,"Gets the list of regularization losses. Args: scope: An optional scope name for filtering the losses to return. Returns: A list of regularization losses as Tensors." 10497,get_regularization_loss,tensorflow/tensorflow/python/ops/losses/util.py,220,function,"Gets the total regularization loss. Args: scope: An optional scope name for filtering the losses to return. name: The name of the returned tensor. Returns: A scalar regularization loss." 10498,get_total_loss,tensorflow/tensorflow/python/ops/losses/util.py,238,function,"Returns a tensor whose value represents the total loss. In particular, this adds any losses you have added with `tf.add_loss()` to any regularization losses that have been added by regularization parameters on layers constructors e.g. `tf.layers`. Be very sure to use this if you are constructing a loss_op manually. Otherwise regularization arguments on `tf.layers` methods will not function. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. scope: An optional scope name for filtering the losses to return. Note that this filters the losses added with `tf.add_loss()` as well as the regularization losses to that scope. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable." 10499,LossesUtilTest,tensorflow/tensorflow/python/ops/losses/util_test.py,28,class, 10500,max,tensorflow/tensorflow/python/ops/numpy_ops/__init__.py,185,function, 10501,min,tensorflow/tensorflow/python/ops/numpy_ops/__init__.py,190,function, 10502,round,tensorflow/tensorflow/python/ops/numpy_ops/__init__.py,195,function, 10503,empty,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,49,function, 10504,empty_like,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,54,function, 10505,zeros,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,59,function, 10506,zeros_like,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,68,function, 10507,ones,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,83,function, 10508,ones_like,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,92,function, 10509,eye,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,103,function, 10510,identity,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,135,function, 10511,full,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,140,function, 10512,full_like,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,152,function,"order, subok and shape arguments mustn't be changed." 10513,_array_internal,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,168,function,Main implementation of np.array(). 10514,array,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,225,function,"Since Tensors are immutable, a copy is made only if val is placed on a different device than the current one. Even if `copy` is False, a new Tensor may need to be built to satisfy `dtype` and `ndim`. This is used only if `val` is an ndarray or a Tensor." 10515,asarray,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,241,function, 10516,asanyarray,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,250,function, 10517,ascontiguousarray,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,255,function, 10518,arange,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,261,function,"Returns `step`-separated values in the range [start, stop). Args: start: Start of the interval. Included in the range. stop: End of the interval. If not specified, `start` is treated as 0 and `start` value is used as `stop`. If specified, it is not included in the range if `step` is integer. When `step` is floating point, it may or may not be included. step: The difference between 2 consecutive values in the output range. It is recommended to use `linspace` instead of using non-integer values for `step`. dtype: Optional. Type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`. If not provided, the largest type of `start`, `stop`, `step` is used. Raises: ValueError: If step is zero." 10519,diag,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,303,function,Raises an error if input is not 1- or 2-d. 10520,diagonal,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,338,function, 10521,diagflat,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,369,function, 10522,_promote_dtype,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,374,function, 10523,_promote_dtype_binary,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,383,function, 10524,all,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,393,function, 10525,any,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,400,function, 10526,compress,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,407,function, 10527,copy,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,438,function, 10528,_maybe_promote_to_int,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,442,function, 10529,cumprod,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,454,function, 10530,cumsum,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,470,function, 10531,imag,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,486,function, 10532,_reduce,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,497,function,"A general reduction function. Args: tf_fn: the TF reduction function. a: the array to be reduced. axis: (optional) the axis along which to do the reduction. If None, all dimensions are reduced. dtype: (optional) the dtype of the result. keepdims: (optional) whether to keep the reduced dimension(s). promote_int: how to promote integer and bool inputs. There are three choices. (1) `_TO_INT_` always promotes them to np.int_ or np.uint; (2) `_TO_FLOAT` always promotes them to a float type (determined by dtypes.default_float_type); (3) None: don't promote. tf_bool_fn: (optional) the TF reduction function for bool inputs. It will only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype is `np.bool_` and `preserve_bool` is True. preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype is `np.bool_` (some reductions such as np.sum convert bools to integers, while others such as np.max preserve bools. Returns: An ndarray." 10533,sum,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,566,function, 10534,prod,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,577,function, 10535,mean,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,588,function, 10536,amax,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,599,function, 10537,amin,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,612,function, 10538,var,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,625,function, 10539,std,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,672,function, 10540,ravel,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,683,function, 10541,real,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,695,function, 10542,repeat,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,703,function, 10543,around,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,734,function, 10544,reshape,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,756,function,order argument can only b 'C' or 'F'. 10545,_reshape_method_wrapper,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,776,function, 10546,expand_dims,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,788,function, 10547,squeeze,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,794,function, 10548,transpose,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,800,function, 10549,swapaxes,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,808,function, 10550,moveaxis,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,824,function,"Raises ValueError if source, destination not in (-ndim(a), ndim(a))." 10551,pad,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,888,function,"Only supports modes 'constant', 'reflect' and 'symmetric' currently." 10552,take,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,905,function,"out argument is not supported, and default mode is clip." 10553,where,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,932,function,Raises ValueError if exactly one of x or y is not None. 10554,select,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,945,function, 10555,shape,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,962,function, 10556,ndim,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,968,function, 10557,isscalar,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,974,function, 10558,_boundaries_to_sizes,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,978,function,"Converting boundaries of splits to sizes of splits. Args: a: the array to be split. boundaries: the boundaries, as in np.split. axis: the axis along which to split. Returns: A list of sizes of the splits, as in tf.split." 10559,split,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1009,function, 10560,_split_on_axis,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1017,function, 10561,broadcast_to,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1032,function, 10562,stack,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1037,function, 10563,hstack,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1052,function, 10564,vstack,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1066,function, 10565,dstack,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1076,function, 10566,_pad_left_to,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1085,function, 10567,_atleast_nd,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1093,function,"Reshape arrays to be at least `n`-dimensional. Args: n: The minimal rank. new_shape: a function that takes `n` and the old shape and returns the desired new shape. *arys: ndarray(s) to be reshaped. Returns: The reshaped array(s)." 10568,atleast_1d,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1123,function, 10569,atleast_2d,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1128,function, 10570,atleast_3d,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1133,function, 10571,nonzero,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1150,function, 10572,diag_indices,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1164,function, 10573,tri,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1177,function, 10574,tril,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1204,function, 10575,triu,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1226,function, 10576,flip,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1248,function, 10577,flipud,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1261,function, 10578,fliplr,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1266,function, 10579,roll,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1271,function, 10580,rot90,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1284,function, 10581,vander,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1304,function, 10582,ix_,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1343,function, 10583,broadcast_arrays,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1375,function, 10584,sign,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1388,function, 10585,take_along_axis,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1409,function, 10586,_as_index,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1472,function,"Helper function to parse idx as an index. Args: idx: index need_scalar: If idx needs to be a scalar value. Returns: A pair, (indx, bool). First one is the parsed index and can be a tensor, or scalar integer / Dimension. Second one is True if rank is known to be 0. Raises: IndexError: For incorrect indices." 10587,_slice_helper,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1512,function,Helper function for __getitem__. 10588,_as_spec_tuple,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1654,function,Convert slice_spec to tuple. 10589,_getitem,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops.py,1671,function,Implementation of ndarray.__getitem__. 10590,ArrayCreationTest,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops_test.py,38,class, 10591,ArrayMethodsTest,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops_test.py,525,class, 10592,ArrayManipulationTest,tensorflow/tensorflow/python/ops/numpy_ops/np_array_ops_test.py,1058,class, 10593,convert_to_tensor,tensorflow/tensorflow/python/ops/numpy_ops/np_arrays.py,37,function,"Wrapper over `tf.convert_to_tensor`. Args: value: value to convert dtype: (optional) the type we would like it to be converted to. dtype_hint: (optional) soft preference for the type we would like it to be converted to. `tf.convert_to_tensor` will attempt to convert value to this type first, but will not fail if conversion is not possible falling back to inferring the type instead. Returns: Value converted to tf.Tensor." 10594,NdarraySpec,tensorflow/tensorflow/python/ops/numpy_ops/np_arrays.py,61,class,Type specification for a `tf.experiemntal.numpy.ndarray`. 10595,ndarray,tensorflow/tensorflow/python/ops/numpy_ops/np_arrays.py,99,class,"Equivalent of numpy.ndarray backed by TensorFlow tensors. This does not support all features of NumPy ndarrays e.g. strides and memory order since, unlike NumPy, the backing storage is not a raw memory buffer. TODO(srbs): Clearly specify which attributes and methods are not supported or if there are any differences in behavior." 10596,tensor_to_ndarray,tensorflow/tensorflow/python/ops/numpy_ops/np_arrays.py,326,function, 10597,ndarray_to_tensor,tensorflow/tensorflow/python/ops/numpy_ops/np_arrays.py,330,function, 10598,ArrayTest,tensorflow/tensorflow/python/ops/numpy_ops/np_arrays_test.py,38,class, 10599,is_allow_float64,tensorflow/tensorflow/python/ops/numpy_ops/np_dtypes.py,69,function, 10600,set_allow_float64,tensorflow/tensorflow/python/ops/numpy_ops/np_dtypes.py,73,function, 10601,canonicalize_dtype,tensorflow/tensorflow/python/ops/numpy_ops/np_dtypes.py,78,function, 10602,_result_type,tensorflow/tensorflow/python/ops/numpy_ops/np_dtypes.py,87,function, 10603,_get_cached_dtype,tensorflow/tensorflow/python/ops/numpy_ops/np_dtypes.py,92,function,Returns an np.dtype for the TensorFlow DType. 10604,default_float_type,tensorflow/tensorflow/python/ops/numpy_ops/np_dtypes.py,104,function,"Gets the default float type. Returns: If `is_allow_float64()` is true, returns float64; otherwise returns float32." 10605,public_name,tensorflow/tensorflow/python/ops/numpy_ops/np_export.py,24,function, 10606,np_export,tensorflow/tensorflow/python/ops/numpy_ops/np_export.py,28,function, 10607,np_export_constant,tensorflow/tensorflow/python/ops/numpy_ops/np_export.py,32,function, 10608,ReadmeTest,tensorflow/tensorflow/python/ops/numpy_ops/np_interop_test.py,28,class, 10609,InteropTest,tensorflow/tensorflow/python/ops/numpy_ops/np_interop_test.py,71,class, 10610,FunctionTest,tensorflow/tensorflow/python/ops/numpy_ops/np_interop_test.py,319,class, 10611,VariableTest,tensorflow/tensorflow/python/ops/numpy_ops/np_interop_test.py,366,class, 10612,LogicTest,tensorflow/tensorflow/python/ops/numpy_ops/np_logic_test.py,31,class, 10613,make_numpy_compatible,tensorflow/tensorflow/python/ops/numpy_ops/np_logic_test.py,104,function, 10614,dot,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,54,function, 10615,_bin_op,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,71,function, 10616,add,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,81,function, 10617,subtract,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,93,function, 10618,multiply,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,98,function, 10619,true_divide,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,110,function, 10620,divide,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,134,function, 10621,floor_divide,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,139,function, 10622,mod,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,152,function, 10623,remainder,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,165,function, 10624,divmod,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,170,function, 10625,maximum,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,175,function, 10626,minimum,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,194,function, 10627,clip,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,206,function, 10628,matmul,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,221,function, 10629,tensordot,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,242,function, 10630,inner,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,247,function, 10631,cross,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,260,function, 10632,vdot,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,336,function, 10633,power,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,346,function, 10634,float_power,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,351,function, 10635,arctan2,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,356,function, 10636,nextafter,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,361,function, 10637,heaviside,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,366,function, 10638,hypot,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,380,function, 10639,kron,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,385,function, 10640,outer,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,416,function, 10641,logaddexp,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,426,function, 10642,logaddexp2,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,436,function, 10643,polyval,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,446,function, 10644,isclose,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,464,function, 10645,allclose,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,482,function, 10646,_tf_gcd,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,487,function, 10647,gcd,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,517,function, 10648,lcm,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,523,function, 10649,_bitwise_binary_op,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,537,function, 10650,bitwise_and,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,554,function, 10651,bitwise_or,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,559,function, 10652,bitwise_xor,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,564,function, 10653,bitwise_not,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,569,function, 10654,_scalar,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,579,function,"Computes the tf_fn(x) for each element in `x`. Args: tf_fn: function that takes a single Tensor argument. x: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `ops.convert_to_tensor`. promote_to_float: whether to cast the argument to a float dtype (`np_dtypes.default_float_type`) if it is not already. Returns: An ndarray with the same shape as `x`. The default output dtype is determined by `np_dtypes.default_float_type`, unless x is an ndarray with a floating point type, in which case the output type is same as x.dtype." 10655,log,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,601,function, 10656,exp,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,606,function, 10657,sqrt,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,611,function, 10658,abs,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,616,function, 10659,absolute,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,621,function, 10660,fabs,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,626,function, 10661,ceil,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,631,function, 10662,floor,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,636,function, 10663,conj,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,641,function, 10664,negative,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,646,function, 10665,reciprocal,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,651,function, 10666,signbit,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,656,function, 10667,sin,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,667,function, 10668,cos,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,672,function, 10669,tan,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,677,function, 10670,sinh,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,682,function, 10671,cosh,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,687,function, 10672,tanh,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,692,function, 10673,arcsin,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,697,function, 10674,arccos,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,702,function, 10675,arctan,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,707,function, 10676,arcsinh,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,712,function, 10677,arccosh,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,717,function, 10678,arctanh,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,722,function, 10679,deg2rad,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,727,function, 10680,rad2deg,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,736,function, 10681,angle,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,746,function, 10682,cbrt,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,762,function, 10683,conjugate,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,773,function, 10684,exp2,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,778,function, 10685,expm1,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,787,function, 10686,fix,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,792,function, 10687,iscomplex,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,801,function, 10688,isreal,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,806,function, 10689,iscomplexobj,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,811,function, 10690,isrealobj,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,817,function, 10691,isnan,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,822,function, 10692,_make_nan_reduction,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,826,function,Helper to generate nan* functions. 10693,nanmean,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,847,function, 10694,isfinite,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,860,function, 10695,isinf,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,865,function, 10696,isneginf,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,870,function, 10697,isposinf,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,875,function, 10698,log2,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,880,function, 10699,log10,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,885,function, 10700,log1p,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,890,function, 10701,positive,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,895,function, 10702,sinc,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,900,function, 10703,square,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,911,function, 10704,diff,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,916,function, 10705,_wrap,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,943,function,Wraps binary ops so they can be added as operator overloads on ndarray. 10706,_comparison,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,978,function,Helper function for comparision. 10707,equal,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,993,function, 10708,not_equal,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,998,function, 10709,greater,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1003,function, 10710,greater_equal,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1008,function, 10711,less,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1013,function, 10712,less_equal,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1018,function, 10713,array_equal,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1023,function, 10714,_logical_binary_op,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1039,function, 10715,logical_and,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1046,function, 10716,logical_or,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1051,function, 10717,logical_xor,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1056,function, 10718,logical_not,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1061,function, 10719,linspace,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1076,function, 10720,logspace,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1115,function, 10721,geomspace,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1126,function, 10722,ptp,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1150,function, 10723,concatenate,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1156,function, 10724,tile,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1167,function, 10725,count_nonzero,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1184,function, 10726,argsort,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1190,function, 10727,sort,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1215,function, 10728,_argminmax,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1230,function, 10729,argmax,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1241,function, 10730,argmin,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1246,function, 10731,append,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1251,function, 10732,average,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1259,function, 10733,trace,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1322,function, 10734,meshgrid,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1340,function,This currently requires copy=True and sparse=False. 10735,einsum,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops.py,1364,function, 10736,MathTest,tensorflow/tensorflow/python/ops/numpy_ops/np_math_ops_test.py,34,class, 10737,seed,tensorflow/tensorflow/python/ops/numpy_ops/np_random.py,37,function,"Sets the seed for the random number generator. Uses `tf.set_random_seed`. Args: s: an integer." 10738,randn,tensorflow/tensorflow/python/ops/numpy_ops/np_random.py,54,function,"Returns samples from a normal distribution. Uses `tf.random_normal`. Args: *args: The shape of the output array. Returns: An ndarray with shape `args` and dtype `float64`." 10739,uniform,tensorflow/tensorflow/python/ops/numpy_ops/np_random.py,73,function, 10740,random,tensorflow/tensorflow/python/ops/numpy_ops/np_random.py,85,function, 10741,rand,tensorflow/tensorflow/python/ops/numpy_ops/np_random.py,90,function, 10742,randint,tensorflow/tensorflow/python/ops/numpy_ops/np_random.py,95,function, 10743,SeedTest,tensorflow/tensorflow/python/ops/numpy_ops/np_random_test.py,35,class, 10744,RandomTestBase,tensorflow/tensorflow/python/ops/numpy_ops/np_random_test.py,44,class, 10745,RandNTest,tensorflow/tensorflow/python/ops/numpy_ops/np_random_test.py,65,class, 10746,UniformTest,tensorflow/tensorflow/python/ops/numpy_ops/np_random_test.py,81,class, 10747,RandomTest,tensorflow/tensorflow/python/ops/numpy_ops/np_random_test.py,109,class, 10748,RandTest,tensorflow/tensorflow/python/ops/numpy_ops/np_random_test.py,121,class, 10749,RandIntTest,tensorflow/tensorflow/python/ops/numpy_ops/np_random_test.py,133,class, 10750,RandNDistriutionTest,tensorflow/tensorflow/python/ops/numpy_ops/np_random_test.py,147,class, 10751,_canonicalize_axis,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,44,function, 10752,_canonicalize_axes,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,48,function, 10753,_supports_signature,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,60,function, 10754,_to_tf_type,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,64,function,"Converts a native python or numpy type to TF DType. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A tensorflow `DType`." 10755,_to_numpy_type,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,76,function,"Converts a native python or TF DType to numpy type. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A NumPy `dtype`." 10756,isscalar,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,90,function,Returns whether `val` is a scalar value or scalar Tensor. 10757,_has_docstring,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,104,function, 10758,_add_blank_line,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,109,function, 10759,_np_signature,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,116,function,An enhanced inspect.signature that can handle numpy.ufunc. 10760,_is_compatible_param_kind,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,170,function, 10761,_prepare_np_fun_name_and_fun,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,180,function,"Mutually propagates information between `np_fun_name` and `np_fun`. If one is None and the other is not, we'll try to make the former not None in a best effort. Args: np_fun_name: name for the np_fun symbol. At least one of np_fun or np_fun_name shoud be set. np_fun: the numpy function whose docstring will be used. Returns: Processed `np_fun_name` and `np_fun`." 10762,_np_doc_helper,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,210,function,Helper to get docs. 10763,get_np_doc_form,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,231,function,"Gets the form of the original numpy docstrings. Returns: See `set_np_doc_form` for the list of valid values." 10764,set_np_doc_form,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,240,function,"Selects the form of the original numpy docstrings. This function sets a global variable that controls how a tf-numpy symbol's docstring should refer to the original numpy docstring. If `value` is `'inlined'`, the numpy docstring will be verbatim copied into the tf-numpy docstring. Otherwise, a link to the original numpy docstring will be added. Which numpy version the link points to depends on `value`: * `'stable'`: the current stable version; * `'dev'`: the current development version; * pattern `\d+(\.\d+(\.\d+)?)?`: `value` will be treated as a version number, e.g. '1.16'. Args: value: the value to set the global variable to." 10765,_add_np_doc,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,260,function,"Appends the numpy docstring to `doc`, according to `set_np_doc_form`. See `set_np_doc_form` for how it controls the form of the numpy docstring. Args: doc: the docstring to be appended to. np_fun_name: the name of the numpy function. np_f: (optional) the numpy function. Returns: `doc` with numpy docstring appended." 10766,is_sig_mismatch_an_error,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,305,function, 10767,set_is_sig_mismatch_an_error,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,309,function, 10768,np_doc,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,314,function,"Attachs numpy docstring to a function. Args: np_fun_name: name for the np_fun symbol. At least one of np_fun or np_fun_name shoud be set. np_fun: (optional) the numpy function whose docstring will be used. export: whether to export this symbol under module `tf.experimental.numpy`. Note that if `export` is `True`, `np_fun` must be a function directly under the `numpy` module, not under any submodule of `numpy` (e.g. `numpy.random`). Returns: A function decorator that attaches the docstring from `np_fun` to the decorated function." 10769,np_doc_only,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,378,function,"Attachs numpy docstring to a function. This differs from np_doc in that it doesn't check for a match in signature. Args: np_fun_name: name for the np_fun symbol. At least one of np_fun or np_fun_name shoud be set. np_fun: (optional) the numpy function whose docstring will be used. export: whether to export this symbol under module `tf.experimental.numpy`. Note that if `export` is `True`, `np_f` must be a function directly under the `numpy` module, not under any submodule of `numpy` (e.g. `numpy.random`). Returns: A function decorator that attaches the docstring from `np_fun` to the decorated function." 10770,finfo,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,410,function,"Note that currently it just forwards to the numpy namesake, while tensorflow and numpy dtypes may have different properties." 10771,_maybe_get_dtype,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,417,function,Returns a numpy type if available from x. Skips if x is numpy.ndarray. 10772,result_type,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,436,function, 10773,_result_type_binary,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,446,function,A specialization of result_type for 2 arguments for performance reasons. 10774,promote_types,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,456,function, 10775,tf_broadcast,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,462,function,"Broadcast tensors. Args: *args: a list of tensors whose shapes are broadcastable against each other. Returns: Tensors broadcasted to the common shape." 10776,get_static_value,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,483,function,"A version of tf.get_static_value that returns None on float dtypes. It returns None on float dtypes in order to avoid breaking gradients. Args: x: a tensor. Returns: Same as `tf.get_static_value`, except that it returns None when `x` has a float dtype." 10777,_maybe_static,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,500,function, 10778,cond,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,512,function,A version of tf.cond that tries to evaluate the condition. 10779,add,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,523,function,A version of tf.add that eagerly evaluates if possible. 10780,subtract,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,528,function,A version of tf.subtract that eagerly evaluates if possible. 10781,greater,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,533,function,A version of tf.greater that eagerly evaluates if possible. 10782,greater_equal,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,538,function,A version of tf.greater_equal that eagerly evaluates if possible. 10783,less_equal,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,543,function,A version of tf.less_equal that eagerly evaluates if possible. 10784,logical_and,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,548,function,A version of tf.logical_and that eagerly evaluates if possible. 10785,logical_or,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,563,function,A version of tf.logical_or that eagerly evaluates if possible. 10786,getitem,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,578,function,A version of __getitem__ that eagerly evaluates if possible. 10787,reduce_all,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,583,function,A version of tf.reduce_all that eagerly evaluates if possible. 10788,reduce_any,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,592,function,A version of tf.reduce_any that eagerly evaluates if possible. 10789,tf_rank,tensorflow/tensorflow/python/ops/numpy_ops/np_utils.py,601,function, 10790,UtilsTest,tensorflow/tensorflow/python/ops/numpy_ops/np_utils_test.py,27,class, 10791,PublicSymbolTest,tensorflow/tensorflow/python/ops/numpy_ops/integration_test/public_symbol_test.py,28,class, 10792,MicroBenchmarks,tensorflow/tensorflow/python/ops/numpy_ops/integration_test/benchmarks/micro_benchmarks.py,46,class,Main micro benchmark class. 10793,MLP,tensorflow/tensorflow/python/ops/numpy_ops/integration_test/benchmarks/numpy_mlp.py,27,class,"MLP model. T = Relu(Add(MatMul(A, B), C)) R = Relu(Add(MatMul(T, D), E))" 10794,MLP,tensorflow/tensorflow/python/ops/numpy_ops/integration_test/benchmarks/tf_numpy_mlp.py,29,class,"MLP model. T = Relu(Add(MatMul(A, B), C)) R = Relu(Add(MatMul(T, D), E))" 10795,ArrayTest,tensorflow/tensorflow/python/ops/parallel_for/array_test.py,36,class, 10796,for_loop,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops.py,44,function,"Runs `loop_fn` `iters` times and stacks the outputs. Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and stacks corresponding outputs of the different runs. Args: loop_fn: A function that takes an int32 scalar tf.Tensor object representing the iteration number, and returns a possibly nested structure of tensor objects. The shape of these outputs should not depend on the input. loop_fn_dtypes: dtypes for the outputs of `loop_fn`. iters: Number of iterations for which to run `loop_fn`. parallel_iterations: The number of iterations that can be dispatched in parallel. This knob can be used to control the total memory usage. Returns: Returns a nested structure of stacked output tensor objects with the same nested structure as the output of `loop_fn`." 10797,_flatten_first_two_dims,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops.py,110,function,Flattens the first two dimensions of x into a single dimension. 10798,_is_under_xla_context,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops.py,121,function,Check if we are currently inside an XLA compile context. 10799,pfor,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops.py,136,function,"Equivalent to running `loop_fn` `iters` times and stacking the outputs. `pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters` times, with input from 0 to `iters - 1`, and stacking corresponding output of each iteration. However the implementation does not use a `tf.while_loop`. Instead it adds new operations to the graph that collectively compute the same value as what running `loop_fn` in a loop would compute. This is an experimental feature and currently has a lot of limitations: - There should be no data dependency between the different iterations. For example, a future iteration should not depend on a value or side-effect of a previous iteration. - Stateful kernels may mostly not be supported since these often imply a data dependency or ordering of the iterations. We do support a limited set of such stateful kernels though (like RandomFoo, Variable operations like reads, etc). - Conversion works only on a limited set of kernels for which a converter has been registered. - `loop_fn` has limited support for control flow operations. `tf.cond` in particular is not supported. - `loop_fn` should return nested structure of Tensors or Operations. However if an Operation is returned, it should have zero outputs. - The shape and dtype of `loop_fn` outputs should not depend on the input to loop_fn. Args: loop_fn: A function that takes an int32 scalar tf.Tensor object representing the iteration number, and optionally a keyword argument `pfor_config` set to a PForConfig object. It returns a possibly nested structure of Tensor or Operation objects. Note that if setting `parallel_iterations` argument to something other than None, `loop_fn` may be called more than once during graph construction. So it may need to avoid mutating global state. iters: Number of iterations for which to run `loop_fn`. fallback_to_while_loop: If true, on failing to vectorize an operation, pfor fallbacks to using a `tf.while_loop` to dispatch the iterations. parallel_iterations: A knob to control how many iterations are vectorized and dispatched in parallel. The default value of None corresponds to vectorizing all the iterations. If `parallel_iterations` is smaller than `iters`, then chunks of at most that many iterations are dispatched in sequence. This knob can be used to control the total memory usage. Returns: Returns a nested structure of stacked tensor objects with the same nested structure as the output of `loop_fn`. Raises: ValueError: If parallel_iterations is not None and not an integer > 1." 10800,_loop_fn_has_config,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops.py,210,function,Test if `loop_fn` has a `pfor_config` argument. 10801,_pfor_impl,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops.py,228,function,Implementation of pfor. 10802,vectorized_map,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops.py,353,function,"Parallel map on the list of tensors unpacked from `elems` on dimension 0. This method works similar to `tf.map_fn` but is optimized to run much faster, possibly with a much larger memory footprint. The speedups are obtained by vectorization (see [Auto-Vectorizing TensorFlow Graphs: Jacobians, Auto-Batching and Beyond](https://arxiv.org/pdf/1903.04243.pdf)). The idea behind vectorization is to semantically launch all the invocations of `fn` in parallel and fuse corresponding operations across all these invocations. This fusion is done statically at graph generation time and the generated code is often similar in performance to a manually fused version. Because `tf.vectorized_map` fully parallelizes the batch, this method will generally be significantly faster than using `tf.map_fn`, especially in eager mode. However this is an experimental feature and currently has a lot of limitations: - There should be no data dependency between the different semantic invocations of `fn`, i.e. it should be safe to map the elements of the inputs in any order. - Stateful kernels may mostly not be supported since these often imply a data dependency. We do support a limited set of such stateful kernels though (like RandomFoo, Variable operations like reads, etc). - `fn` has limited support for control flow operations. - `fn` should return nested structure of Tensors or Operations. However if an Operation is returned, it should have zero outputs. - The shape and dtype of any intermediate or output tensors in the computation of `fn` should not depend on the input to `fn`. Examples: ```python def outer_product(a): return tf.tensordot(a, a, 0) batch_size = 100 a = tf.ones((batch_size, 32, 32)) c = tf.vectorized_map(outer_product, a) assert c.shape == (batch_size, 32, 32, 32, 32) ``` ```python # Computing per-example gradients batch_size = 10 num_features = 32 layer = tf.keras.layers.Dense(1) def model_fn(arg): with tf.GradientTape() as g: inp, label = arg inp = tf.expand_dims(inp, 0) label = tf.expand_dims(label, 0) prediction = layer(inp) loss = tf.nn.l2_loss(label - prediction) return g.gradient(loss, (layer.kernel, layer.bias)) inputs = tf.random.uniform([batch_size, num_features]) labels = tf.random.uniform([batch_size, 1]) per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels)) assert per_example_gradients[0].shape == (batch_size, num_features, 1) assert per_example_gradients[1].shape == (batch_size, 1) ``` Args: fn: The callable to be performed. It accepts one argument, which will have the same (possibly nested) structure as `elems`, and returns a possibly nested structure of Tensors and Operations, which may be different than the structure of `elems`. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be mapped over by `fn`. fallback_to_while_loop: If true, on failing to vectorize an operation, the unsupported op is wrapped in a tf.while_loop to execute the map iterations. Note that this fallback only happens for unsupported ops and other parts of `fn` are still vectorized. If false, on encountering an unsupported op, a ValueError is thrown. Note that the fallbacks can result in slowdowns since vectorization often yields speedup of one to two orders of magnitude. Returns: A tensor or (possibly nested) sequence of tensors. Each tensor packs the results of applying fn to tensors unpacked from elems along the first dimension, from first to last. Raises: ValueError: If vectorization fails and fallback_to_while_loop is False." 10803,PForTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,73,class, 10804,IndexedSlicesTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,156,class, 10805,ReductionTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,179,class, 10806,BitwiseTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,282,class, 10807,ImageTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,325,class, 10808,NNTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,356,class, 10809,RandomTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,610,class, 10810,StatelessRandomTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,705,class, 10811,LoggingTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,740,class, 10812,TensorArrayTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,763,class, 10813,TensorListTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,892,class, 10814,StackTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1022,class, 10815,WhileV1Test,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1093,class, 10816,dynamic_lstm_input_fn,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1288,function, 10817,create_dynamic_lstm,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1298,function, 10818,WhileV2Test,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1353,class, 10819,NestedControlFlowTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1492,class, 10820,StatelessIfTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1551,class, 10821,IfTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1599,class, 10822,RNNTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1616,class, 10823,Benchmarks,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1634,class, 10824,SparseTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1764,class, 10825,ParsingTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1872,class, 10826,PartitionedCallTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,1923,class, 10827,SpectralTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,2009,class, 10828,VariableTest,tensorflow/tensorflow/python/ops/parallel_for/control_flow_ops_test.py,2071,class, 10829,jacobian,tensorflow/tensorflow/python/ops/parallel_for/gradients.py,28,function,"Computes jacobian of `output` w.r.t. `inputs`. Args: output: A tensor. inputs: A tensor or a nested structure of tensor objects. use_pfor: If true, uses pfor for computing the jacobian. Else uses tf.while_loop. parallel_iterations: A knob to control how many iterations and dispatched in parallel. This knob can be used to control the total memory usage. Returns: A tensor or a nested structure of tensors with the same structure as `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has shape [x_1, ..., x_m], the corresponding jacobian has shape [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is sparse (IndexedSlices), jacobian function currently makes it dense and returns a Tensor instead. This may change in the future." 10830,batch_jacobian,tensorflow/tensorflow/python/ops/parallel_for/gradients.py,83,function,"Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`. e.g. x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) y = x * x jacobian = batch_jacobian(y, x) # => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]] Args: output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should only depend on `inp[i,...]`. inp: A tensor with shape [b, x1, ..., x_m] use_pfor: If true, uses pfor for computing the Jacobian. Else uses a tf.while_loop. parallel_iterations: A knob to control how many iterations are vectorized and dispatched in parallel. The default value of None, when use_pfor is true, corresponds to vectorizing all the iterations. When use_pfor is false, the default value of None corresponds to parallel_iterations=10. This knob can be used to control the total memory usage. Returns: A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]` is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked per-example jacobians. Raises: ValueError: if first dimension of `output` and `inp` do not match." 10831,FullyConnectedModel,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,51,class, 10832,fully_connected_model_fn,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,66,function, 10833,lstm_model_fn,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,72,function, 10834,dynamic_lstm_model_fn,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,85,function, 10835,create_fc_batch_jacobian,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,103,function, 10836,create_lstm_batch_jacobian,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,111,function, 10837,create_dynamic_lstm_batch_jacobian,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,119,function, 10838,create_lstm_batch_hessian,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,133,function, 10839,create_lstm_hessian,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,145,function, 10840,create_fc_per_eg_grad,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,161,function, 10841,create_lstm_per_eg_grad,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,185,function, 10842,Mnist,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,217,class, 10843,create_mnist_autobatch,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,264,function, 10844,create_mnist_per_eg_grad,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,280,function, 10845,create_mnist_batch_jacobian,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,303,function, 10846,create_mnist_per_eg_jacobian,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,313,function, 10847,create_fc_per_eg_jacobians,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,332,function, 10848,GradientsTest,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,355,class, 10849,GradientsBenchmarks,tensorflow/tensorflow/python/ops/parallel_for/gradients_test.py,571,class, 10850,MathTest,tensorflow/tensorflow/python/ops/parallel_for/math_test.py,41,class, 10851,LinalgTest,tensorflow/tensorflow/python/ops/parallel_for/math_test.py,655,class, 10852,_stack,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,77,function,stacks `t` `length` times. 10853,_is_stateful_pfor_op,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,129,function, 10854,WhileOp,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,144,class,Object for storing state for converting the outputs of a while_loop. 10855,ConversionNotImplementedError,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,715,class, 10856,_PforInput,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,719,class,Input object passed to registered pfor converters. 10857,RegisterPFor,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,846,class,"Utility to register converters for pfor. Usage: @RegisterPFor(foo_op_type) def _foo_converter(pfor_input): ... The above will register conversion function `_foo_converter` for handling conversion of `foo_op_type`. These converters are called during vectorization of a `pfor` loop body. For each operation node in this loop body, the vectorization process will call the converter corresponding to the operation type of the node. During conversion, the registered function will be called with a single argument `pfor_input`, of type `PForInput`, which will contain state needed for the conversion. When the converter is called for a node, all its inputs should already have been converted and these converted values are stored in `pfor_input.inputs`. This registered function should output a list of WrappedTensor objects with the same length as the number of outputs of the node being converted. If the node had zero outputs, then it should return an ops.Operation object. These new sets of nodes should implement the functionality of running that operation for the number of iterations specified by `pfor_input.pfor.loop_len_vector[0]` where the inputs of the node for each iteration are picked from `pfor_inputs.inputs()`. One tricky aspect of the conversion process is keeping track of, and leveraging loop invariance of computation. Each converted input is a WrappedTensor which indicates whether the input was loop invariant or not. If the converted value is loop invariant, its rank should match the rank of the corresponding tensor in the loop body, else its rank is larger by 1. The converter should look at the loop invariance of the inputs and generate new nodes based on that. Note that the converter will not be called if all inputs are loop invariant and the operation is not stateful. The converter should determine if its own output is loop invariant and `wrap` its output accordingly. Example: Here, the converter is trying to convert a Reshape node in the loop body. This node will have two inputs: the tensor to reshape, and the new shape. The example here only handles the case where the shape is loop invariant. @RegisterPFor(""Reshape"") def _convert_reshape(pfor_input): # We assume that input is not loop invariant. Call to `stacked_input` # asserts that and returns the converted value. This value will have a rank # larger by 1 compared to the rank of the input in the loop body. t = pfor_input.stacked_input(0) # We assume that shape input is loop invariant. Call to `unstacked_input` # asserts that and returns the converted value. shape = pfor_input.unstacked_input(1) # We compute `new_shape` by prepending the number of iterations to the # original shape. new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0) # The vectorized output involves reshaping the converted input `t` using # `new_shape`. new_output = array_ops.reshape(t, new_shape) # The converted output is marked as not loop invariant using the call to # wrap. return wrap(new_output, True)" 10858,RegisterPForWithArgs,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,925,class,"Utility to register converters for pfor. Usage: @RegisteRPFor(foo_op_type, foo=value, ....) def _foo_converter(pfor_input, foo=None, ....): ... See RegisterPFor for details on the conversion function. `RegisterPForWithArgs` allows binding extra arguments to the conversion function at registration time." 10859,_create_op,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,953,function,Utility to create an op. 10860,wrap,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,984,function,Helper to create a WrappedTensor object. 10861,_fallback_converter,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,996,function, 10862,PForConfig,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1028,class,A configuration object used to communicate with loop body function. 10863,PFor,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1152,class,"Implementation of rewrite of parallel-for loops. This class takes a DAG or a set of DAGs representing the body of a parallel-for loop, and adds new operations to the graph that implements functionality equivalent to running that loop body for a specified number of iterations. This new set of nodes may or may not use a tensorflow loop construct. The process of conversion does not delete or change any existing operations. It only adds operations that efficiently implement the equivalent functionality. We refer to the added ops as ""converted ops"". The conversion process uses a simple greedy heuristic. It walks the loop body and tries to express the functionality of running each node in a loop with a new set of nodes. When converting an op several cases are possible: - The op is not inside the loop body. Hence it can be used as is. - The op does not depend on the iteration number and is stateless. In this case, it can be used as is. - The op is not stateful, and depends on iteration number only through control dependencies. In this case, we can create a single op with same inputs and attributes, but with ""converted"" control dependencies. - The op is not stateful, and all its inputs are loop invariant. In this case, similar to above, we can create a single op with same inputs and attributes, but with ""converted"" control dependencies. - The op is stateful or at least one of the inputs is not loop invariant. In this case, we run the registered converter for that op to create a set of converted ops. All nodes in the set will have converted control dependencies corresponding to control dependencies of the original op. If the op returned multiple outputs, ""converted outputs"" could be produced by different ops in this set." 10864,_convert_adjust_contrastv2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1619,function, 10865,_convert_adjust_hue,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1626,function, 10866,_convert_adjust_saturation,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1633,function, 10867,_flatten_first_two_dims,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1642,function,Merges first two dimensions. 10868,_unflatten_first_dim,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1649,function,"Splits first dimension into [first_dim, -1]." 10869,_inputs_with_flattening,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1656,function,Stacks and flattens first dim of inputs at indices `input_indices`. 10870,_convert_flatten_batch,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1688,function, 10871,_convert_batch_to_space_nd,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1704,function, 10872,_convert_space_to_batch_nd,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1730,function, 10873,_channel_flatten_input,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1750,function,"Merge the stack dimension with the channel dimension. If S is pfor's stacking dimension, then, - for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose should be cheap. - for SNHWC, we transpose to NHWCS. We then merge the S and C dimension. Args: x: ops.Tensor to transform. data_format: ""NCHW"" or ""NHWC"". Returns: A 3-element tuple with the transformed value, along with the shape for reshape and order for transpose required to transform back." 10874,_convert_fused_batch_norm,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1803,function, 10875,_convert_fused_batch_norm_grad,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1855,function, 10876,_convert_flatten_batch_shape_input,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1882,function, 10877,_convert_conv2d_backprop_filter,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1900,function, 10878,_convert_softmax,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1960,function, 10879,_convert_identity,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1972,function, 10880,_convert_identity_n,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1978,function, 10881,_convert_reshape,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1986,function, 10882,_convert_fill,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,1994,function, 10883,_convert_broadcast_to,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2009,function, 10884,_convert_expanddims,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2029,function, 10885,_convert_searchsorted,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2038,function, 10886,_convert_matrix_band_part,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2049,function, 10887,_convert_matrix_set_diag,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2059,function, 10888,_convert_matrix_diag_v2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2072,function, 10889,_convert_diag,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2087,function, 10890,_convert_matrix_diag_part_v2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2101,function, 10891,_convert_matrix_set_diag_v2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2116,function, 10892,_convert_diag_part,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2130,function, 10893,_convert_one_hot,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2142,function, 10894,_convert_slice,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2155,function, 10895,_convert_tile,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2165,function, 10896,_convert_pack,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2173,function, 10897,_convert_unpack,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2183,function, 10898,_convert_pad,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2193,function, 10899,_convert_split,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2201,function, 10900,_convert_split_v,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2210,function, 10901,_convert_squeeze,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2219,function, 10902,_convert_reverse,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2227,function, 10903,_convert_transpose,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2236,function, 10904,_convert_zeroslike,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2244,function, 10905,_convert_gather,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2252,function, 10906,_convert_gather_nd,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2318,function, 10907,_convert_concatv2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2328,function, 10908,_convert_strided_slice,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2339,function, 10909,_convert_strided_slice_grad,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2372,function, 10910,_convert_check_numerics,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2408,function, 10911,_convert_matmul,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2418,function, 10912,_convert_batch_mat_mul,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2474,function, 10913,_convert_batch_mat_mul_v2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2491,function, 10914,_convert_reduction,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2509,function, 10915,_convert_argmax_argmin,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2520,function, 10916,_convert_bucketize,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2529,function, 10917,_convert_clip_by_value,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2536,function, 10918,_convert_cumfoo,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2546,function, 10919,_convert_biasadd,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2557,function, 10920,_convert_unsortedsegmentsum,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2588,function, 10921,_flatten_array_with_offset,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2614,function,"Flattens a rank 2 tensor, adding an offset to each row." 10922,_convert_sparse_segment,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2635,function, 10923,_convert_sparse_segment_grad,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2673,function, 10924,_convert_cast,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2691,function, 10925,_convert_cwise,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2804,function, 10926,_convert_leaky_relu,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2815,function, 10927,_convert_equal,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2822,function, 10928,_convert_not_equal,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2832,function, 10929,_convert_approximate_equal,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2842,function, 10930,_convert_shape,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2851,function, 10931,_convert_shape_n,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2859,function, 10932,_convert_size,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2869,function, 10933,_convert_rank,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2878,function, 10934,_convert_addn,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2883,function, 10935,_convert_cross,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2890,function, 10936,_convert_biasaddgrad,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2898,function, 10937,_convert_grads,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2926,function, 10938,_convert_select,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2940,function, 10939,_convert_selectv2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2960,function, 10940,_transpose_dim_to_front,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2972,function, 10941,_convert_random,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,2986,function, 10942,_convert_random_with_param,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3005,function, 10943,_convert_multinomial,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3033,function, 10944,_convert_stateless_multinomial,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3075,function, 10945,_convert_einsum,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3090,function, 10946,_convert_cholesky,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3129,function, 10947,_convert_log_matrix_determinant,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3135,function, 10948,_convert_matrix_inverse,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3141,function, 10949,_convert_matrix_solve,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3148,function, 10950,_convert_matrix_triangular_solve,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3159,function, 10951,_convert_self_adjoint_eig,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3171,function, 10952,_convert_assert,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3183,function, 10953,_convert_print,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3194,function, 10954,_convert_tensor_array_v3,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3246,function, 10955,_convert_tensor_array_size_v3,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3269,function, 10956,_handle_inside_pfor,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3278,function,Returns True if handle was created inside the pfor loop. 10957,_unstack_flow,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3295,function, 10958,_convert_tensor_array_read_v3,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3302,function, 10959,_convert_tensor_array_write_v3,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3349,function, 10960,_transpose_first_two_dims,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3395,function, 10961,_convert_tensor_array_gather_v3,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3407,function, 10962,_convert_tensor_array_scatter_v3,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3449,function, 10963,_convert_tensor_array_grad_v3,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3492,function, 10964,_stack_tensor_list_shape,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3514,function, 10965,_tile_variant,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3533,function,stacks `t` `length` times. 10966,_untile_variant,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3541,function, 10967,_convert_tensor_list_reserve,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3546,function, 10968,_convert_tensor_list_element_shape,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3560,function, 10969,_convert_tensor_list_length,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3570,function, 10970,_stack_tensor_list,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3575,function, 10971,_convert_tensor_list_get_item,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3592,function, 10972,_convert_tensor_array_set_item,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3632,function, 10973,_convert_tensor_list_stack,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3663,function, 10974,_convert_tensor_list_gather,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3681,function, 10975,_convert_tensor_list_scatter,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3723,function, 10976,_convert_tensor_list_from_tensor,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3740,function, 10977,_stack_cache_key,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3786,function,Create cache key corresponding to a stack handle. 10978,_stack_handle_inside_pfor,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3797,function, 10979,_convert_stack_push_v2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3806,function, 10980,_convert_stack_pop_v2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3834,function, 10981,_convert_decode_csv,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3853,function, 10982,_convert_parse_single_example,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3874,function, 10983,_convert_parse_example_v2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3895,function, 10984,_convert_function_call,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3929,function, 10985,_convert_partitioned_call,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3958,function, 10986,_partition_inputs_for_indices,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3975,function, 10987,_outputs_for_branch,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,3985,function, 10988,_convert_if,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,4015,function, 10989,WhileV2,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,4067,class,Object for vectorizing V2 while_loop op. 10990,_convert_while,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,4389,function, 10991,_convert_fft,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,4403,function, 10992,_convert_rfft,tensorflow/tensorflow/python/ops/parallel_for/pfor.py,4413,function, 10993,PForTestCase,tensorflow/tensorflow/python/ops/parallel_for/test_util.py,29,class,Base class for test cases. 10994,PForTest,tensorflow/tensorflow/python/ops/parallel_for/xla_control_flow_ops_test.py,40,class, 10995,_make_unstacked,tensorflow/tensorflow/python/ops/parallel_for/xla_control_flow_ops_test.py,125,function, 10996,WhileV2Test,tensorflow/tensorflow/python/ops/parallel_for/xla_control_flow_ops_test.py,142,class, 10997,RaggedConvertToTensorOrRaggedTensorTest,tensorflow/tensorflow/python/ops/ragged/convert_to_tensor_or_ragged_tensor_op_test.py,33,class, 10998,boolean_mask,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,45,function,"Applies a boolean mask to `data` without flattening the mask dimensions. Returns a potentially ragged tensor that is formed by retaining the elements in `data` where the corresponding value in `mask` is `True`. * `output[a1...aA, i, b1...bB] = data[a1...aA, j, b1...bB]` Where `j` is the `i`th `True` entry of `mask[a1...aA]`. Note that `output` preserves the mask dimensions `a1...aA`; this differs from `tf.boolean_mask`, which flattens those dimensions. Args: data: A potentially ragged tensor. mask: A potentially ragged boolean tensor. `mask`'s shape must be a prefix of `data`'s shape. `rank(mask)` must be known statically. name: A name prefix for the returned tensor (optional). Returns: A potentially ragged tensor that is formed by retaining the elements in `data` where the corresponding value in `mask` is `True`. * `rank(output) = rank(data)`. * `output.ragged_rank = max(data.ragged_rank, rank(mask) - 1)`. Raises: ValueError: if `rank(mask)` is not known statically; or if `mask.shape` is not a prefix of `data.shape`. #### Examples: >>> # Aliases for True & False so data and mask line up. >>> T, F = (True, False) >>> tf.ragged.boolean_mask( # Mask a 2D Tensor. ... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], ... mask=[[T, F, T], [F, F, F], [T, F, F]]).to_list() [[1, 3], [], [7]] >>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor. ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), ... tf.ragged.constant([[F, F, T], [F], [T, T]])).to_list() [[3], [], [5, 6]] >>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor. ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), ... tf.ragged.constant([True, False, True])).to_list() [[1, 2, 3], [5, 6]]" 10999,tile,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,211,function,"Constructs a `RaggedTensor` by tiling a given `RaggedTensor`. The values of `input` are replicated `multiples[i]` times along the `i`th dimension (for each dimension `i`). For every dimension `axis` in `input`, the length of each output element in that dimension is the length of corresponding input element multiplied by `multiples[axis]`. Args: input: A `RaggedTensor`. multiples: A 1-D integer `Tensor`. Length must be the same as the number of dimensions in `input`. name: A name for the operation (optional). Returns: A `RaggedTensor` with the same type, rank, and ragged_rank as `input`. #### Example: >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> tf.tile(rt, [3, 2]).to_list() [[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]]" 11000,_tile_ragged_values,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,253,function,"Builds flat_values tensor for a tiled `RaggedTensor`. Returns a tensor that repeats the values in `rt_input.flat_values` in the appropriate pattern to construct a `RaggedTensor` that tiles `rt_input` as specified by `multiples`. Args: rt_input: The `RaggedTensor` whose values should be repeated. multiples: A 1-D integer `tensor`, indicating how many times each dimension should be repeated. const_multiples: Optional constant value for multiples. Used to skip tiling dimensions where `multiples=1`. Returns: A `Tensor` with the same type and rank as `rt_input.flat_values`. #### Example: >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> _tile_ragged_values(rt, tf.constant([3, 2])).numpy() array([1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3], dtype=int32)" 11001,_tile_ragged_splits,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,312,function,"Builds nested_split tensors for a tiled `RaggedTensor`. Returns a list of split tensors that can be used to construct the `RaggedTensor` that tiles `rt_input` as specified by `multiples`. Args: rt_input: The `RaggedTensor` that is being tiled. multiples: A 1-D integer `tensor`, indicating how many times each dimension should be repeated. const_multiples: Optional constant value for multiples. Used to skip tiling dimensions where `multiples=1`. Returns: A list of 1-D integer `Tensor`s (one for each ragged dimension in `rt_input`). #### Example: >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> _tile_ragged_splits(rt, [3, 2]) []" 11002,expand_dims,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,384,function,"Inserts a dimension with shape 1 into a potentially ragged tensor's shape. Given a potentially ragged tenor `input`, this operation inserts a dimension with size 1 at the dimension `axis` of `input`'s shape. The following table gives some examples showing how `ragged.expand_dims` impacts the shapes of different input tensors. Ragged dimensions are indicated by enclosing them in parentheses. input.shape | axis | result.shape ----------------------- | ---- | ----------------------------- `[D1, D2]` | `0` | `[1, D1, D2]` `[D1, D2]` | `1` | `[D1, 1, D2]` `[D1, D2]` | `2` | `[D1, D2, 1]` `[D1, (D2), (D3), D4]` | `0` | `[1, D1, (D2), (D3), D4]` `[D1, (D2), (D3), D4]` | `1` | `[D1, 1, (D2), (D3), D4]` `[D1, (D2), (D3), D4]` | `2` | `[D1, (D2), 1, (D3), D4]` `[D1, (D2), (D3), D4]` | `3` | `[D1, (D2), (D3), 1, D4]` `[D1, (D2), (D3), D4]` | `4` | `[D1, (D2), (D3), D4, 1]` Args: input: The potentially tensor that should be expanded with a new dimension. axis: An integer constant indicating where the new dimension should be inserted. name: A name for the operation (optional). Returns: A tensor with the same values as `input`, with an added dimension of size 1 at `axis`. #### Examples: >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> print(rt.shape) (2, None) >>> expanded = tf.expand_dims(rt, axis=0) >>> print(expanded.shape, expanded) (1, 2, None) >>> expanded = tf.expand_dims(rt, axis=1) >>> print(expanded.shape, expanded) (2, 1, None) >>> expanded = tf.expand_dims(rt, axis=2) >>> print(expanded.shape, expanded) (2, None, 1) " 11003,size,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,458,function,"Returns the size of a potentially ragged tensor. The size of a ragged tensor is the size of its inner values. #### Example: >>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy() 3 Args: input: A potentially ragged `Tensor`. out_type: The numeric output type for the operation. name: A name for the operation (optional). Returns: A Tensor of type `out_type`." 11004,rank,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,485,function,"Returns the rank of a RaggedTensor. Returns a 0-D `int32` `Tensor` representing the rank of `input`. #### Example: >>> # shape of tensor 't' is [2, None, None] >>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]]) >>> tf.rank(t).numpy() 3 Args: input: A `RaggedTensor` name: A name for the operation (optional). Returns: A `Tensor` of type `int32`." 11005,ragged_one_hot,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,514,function,Applies tf.one_hot along the values of a RaggedTensor. 11006,stack_dynamic_partitions,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,544,function,"Stacks dynamic partitions of a Tensor or RaggedTensor. Returns a RaggedTensor `output` with `num_partitions` rows, where the row `output[i]` is formed by stacking all slices `data[j1...jN]` such that `partitions[j1...jN] = i`. Slices of `data` are stacked in row-major order. If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to `tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`. #### Example: >>> data = ['a', 'b', 'c', 'd', 'e'] >>> partitions = [ 3, 0, 2, 2, 3] >>> num_partitions = 5 >>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions) Args: data: A `Tensor` or `RaggedTensor` containing the values to stack. partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the partition that each slice of `data` should be added to. `partitions.shape` must be a prefix of `data.shape`. Values must be greater than or equal to zero, and less than `num_partitions`. `partitions` is not required to be sorted. num_partitions: An `int32` or `int64` scalar specifying the number of partitions to output. This determines the number of rows in `output`. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` containing the stacked partitions. The returned tensor has the same dtype as `data`, and its shape is `[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a ragged dimension whose length is the number of data slices stacked for each `partition`." 11007,reverse,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,648,function,"Reverses a RaggedTensor along the specified axes. #### Example: >>> data = tf.ragged.constant([ ... [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10], [11, 12]]]) >>> tf.reverse(data, axis=[0, 2]) Args: tensor: A 'RaggedTensor' to reverse. axis: A list or tuple of 'int' or a constant 1D 'tf.Tensor'. The indices of the axes to reverse. name: A name prefix for the returned tensor (optional). Returns: A 'RaggedTensor'." 11008,cross,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,706,function,"Generates feature cross from a list of tensors. The input tensors must have `rank=2`, and must all have the same number of rows. The result is a `RaggedTensor` with the same number of rows as the inputs, where `result[row]` contains a list of all combinations of values formed by taking a single value from each input's corresponding row (`inputs[i][row]`). Values are combined by joining their strings with '_X_'. E.g.: >>> tf.ragged.cross([tf.ragged.constant([['a'], ['b', 'c']]), ... tf.ragged.constant([['d'], ['e']]), ... tf.ragged.constant([['f'], ['g']])]) Args: inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`. name: Optional name for the op. Returns: A 2D `RaggedTensor` of type `string`." 11009,cross_hashed,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,733,function,"Generates hashed feature cross from a list of tensors. The input tensors must have `rank=2`, and must all have the same number of rows. The result is a `RaggedTensor` with the same number of rows as the inputs, where `result[row]` contains a list of all combinations of values formed by taking a single value from each input's corresponding row (`inputs[i][row]`). Values are combined by hashing together their fingerprints. E.g.: >>> tf.ragged.cross_hashed([tf.ragged.constant([['a'], ['b', 'c']]), ... tf.ragged.constant([['d'], ['e']]), ... tf.ragged.constant([['f'], ['g']])], ... num_buckets=100) Args: inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`. num_buckets: A non-negative `int` that used to bucket the hashed values. If `num_buckets != 0`, then `output = hashed_value % num_buckets`. hash_key: Integer hash_key that will be used by the `FingerprintCat64` function. If not given, a default key is used. name: Optional name for the op. Returns: A 2D `RaggedTensor` of type `int64`." 11010,_cross_internal,tensorflow/tensorflow/python/ops/ragged/ragged_array_ops.py,771,function,Generates feature cross from a list of ragged and dense tensors. 11011,RaggedBatchGatherOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py,37,class, 11012,batch_gather,tensorflow/tensorflow/python/ops/ragged/ragged_batch_gather_ops.py,27,function,"Gathers slices from `params` according to `indices` with batch dims. This operation is similar to `gather`, but it assumes that the leading `N` dimensions of `indices` and `params` are batch dimensions, and performs a gather within each batch. In particular, when using this operation with `N` batch dimensions `B1...BN`: * `indices` has shape `[B1...BN, I]` * `params` has shape `[B1...BN, P1...PM]`. * `result` has shape `[B1...BN, I, P2...PM]`. * `result[b1...bN, i, p2...pM] = params[b1...bN, indices[b1...bN, i], p2...pM]` Args: params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`, `M>0`). indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`). name: A name for the operation (optional). Returns: A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`. `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`. #### Example: >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> indices = tf.ragged.constant([[1, 2, 0], [], [], [0, 0]]) >>> tf.compat.v1.batch_gather(params, indices) " 11013,batch_gather_with_default,tensorflow/tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py,38,function,"Same as `batch_gather` but inserts `default_value` for invalid indices. This operation is similar to `batch_gather` except that it will substitute the value for invalid indices with `default_value` as the contents. See `batch_gather` for more details. Args: params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`, `M>0`). indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`). default_value: A value to be inserted in places where `indices` are out of bounds. Must be the same dtype as params and either a scalar or rank 1. name: A name for the operation (optional). Returns: A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`. `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`. #### Example: >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]]) >>> batch_gather_with_default(params, indices, 'FOO') " 11014,_get_pad_shape,tensorflow/tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py,144,function,Gets the RaggedTensorDynamicShape for the pad tensor. 11015,RaggedBooleanMaskOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_boolean_mask_op_test.py,34,class, 11016,RaggedConcatOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_concat_op_test.py,35,class, 11017,concat,tensorflow/tensorflow/python/ops/ragged/ragged_concat_ops.py,34,function,"Concatenates potentially ragged tensors along one dimension. Given a list of tensors with the same rank `K` (`K >= axis`), returns a rank-`K` `RaggedTensor` `result` such that `result[i0...iaxis]` is the concatenation of `[rt[i0...iaxis] for rt in values]`. Args: values: A list of potentially ragged tensors. May not be empty. All `values` must have the same rank and the same dtype; but unlike `tf.concat`, they can have arbitrary shapes. axis: A python integer, indicating the dimension along which to concatenate. (Note: Unlike `tf.concat`, the `axis` parameter must be statically known.) Negative values are supported only if the rank of at least one `values` value is statically known. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` with rank `K`. `result.ragged_rank=max(axis, max(rt.ragged_rank for rt in values]))`. Raises: ValueError: If `values` is empty, if `axis` is out of bounds or if the input tensors have different ranks. #### Example: >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) >>> tf.concat([t1, t2], axis=0) >>> tf.concat([t1, t2], axis=1) " 11018,stack,tensorflow/tensorflow/python/ops/ragged/ragged_concat_ops.py,76,function,"Stacks a list of rank-`R` tensors into one rank-`(R+1)` `RaggedTensor`. Given a list of tensors or ragged tensors with the same rank `R` (`R >= axis`), returns a rank-`R+1` `RaggedTensor` `result` such that `result[i0...iaxis]` is `[value[i0...iaxis] for value in values]`. #### Examples: >>> # Stacking two ragged tensors. >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) >>> tf.ragged.stack([t1, t2], axis=0) >>> tf.ragged.stack([t1, t2], axis=1) >>> # Stacking two dense tensors with different sizes. >>> t3 = tf.constant([[1, 2, 3], [4, 5, 6]]) >>> t4 = tf.constant([[5], [6], [7]]) >>> tf.ragged.stack([t3, t4], axis=0) Args: values: A list of `tf.Tensor` or `tf.RaggedTensor`. May not be empty. All `values` must have the same rank and the same dtype; but unlike `tf.stack`, they can have arbitrary dimension sizes. axis: A python integer, indicating the dimension along which to stack. (Note: Unlike `tf.stack`, the `axis` parameter must be statically known.) Negative values are supported only if the rank of at least one `values` value is statically known. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` with rank `R+1`. `result.ragged_rank=1+max(axis, max(rt.ragged_rank for rt in values]))`. Raises: ValueError: If `values` is empty, if `axis` is out of bounds or if the input tensors have different ranks." 11019,_ragged_stack_concat_helper,tensorflow/tensorflow/python/ops/ragged/ragged_concat_ops.py,123,function,"Helper function to concatenate or stack ragged tensors. Args: rt_inputs: A list of RaggedTensors or Tensors to combine. axis: The axis along which to concatenate or stack. stack_values: A boolean -- if true, then stack values; otherwise, concatenate them. Returns: A RaggedTensor. Raises: ValueError: If rt_inputs is empty, or if axis is out of range." 11020,_ragged_stack_concat_axis_0,tensorflow/tensorflow/python/ops/ragged/ragged_concat_ops.py,209,function,"Helper function to concatenate or stack ragged tensors along axis 0. Args: rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. stack_values: Boolean. If true, then stack values; otherwise, concatenate them. Returns: A RaggedTensor." 11021,_ragged_stack_concat_axis_1,tensorflow/tensorflow/python/ops/ragged/ragged_concat_ops.py,244,function,"Helper function to concatenate or stack ragged tensors along axis 1. Args: rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. stack_values: Boolean. If true, then stack values; otherwise, concatenate them. Returns: A RaggedTensor." 11022,_copy_row_shape,tensorflow/tensorflow/python/ops/ragged/ragged_concat_ops.py,295,function,Sets splits.shape to [rt[shape[0]+1] for each rt in rt_inputs. 11023,_increase_ragged_rank_to,tensorflow/tensorflow/python/ops/ragged/ragged_concat_ops.py,302,function,Adds ragged dimensions to `rt_input` so it has the desired ragged rank. 11024,_concat_ragged_splits,tensorflow/tensorflow/python/ops/ragged/ragged_concat_ops.py,315,function,Concatenates a list of RaggedTensor splits to form a single splits. 11025,auto_cast_partition_dtype,tensorflow/tensorflow/python/ops/ragged/ragged_config.py,22,function,"Whether incompatible row-partitioning dtypes should be auto-converted. If true, then operations that combine RaggedTensors but have different row-partitioning tensor dtypes will be automatically cast to a compatible dtype (`tf.int64`). If false, then such operations will result in an error. Returns: `bool`" 11026,RaggedConstOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_const_op_test.py,33,class, 11027,_normalize_pylist,tensorflow/tensorflow/python/ops/ragged/ragged_const_op_test.py,404,function,Convert all (possibly nested) np.arrays contained in item to list. 11028,RaggedConstantValueOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_constant_value_op_test.py,32,class, 11029,_normalize_pylist,tensorflow/tensorflow/python/ops/ragged/ragged_constant_value_op_test.py,319,function,Convert all (possibly nested) np.arrays contained in item to list. 11030,from_tensor,tensorflow/tensorflow/python/ops/ragged/ragged_conversion_ops.py,30,function, 11031,to_tensor,tensorflow/tensorflow/python/ops/ragged/ragged_conversion_ops.py,48,function, 11032,ragged_to_dense,tensorflow/tensorflow/python/ops/ragged/ragged_conversion_ops.py,55,function,Create a dense tensor from a ragged tensor. 11033,_ragged_tensor_to_tensor_grad,tensorflow/tensorflow/python/ops/ragged/ragged_conversion_ops.py,61,function,Gradient for RaggedToTensor op. 11034,_rank_ignoring_leading_dims_with_size_1,tensorflow/tensorflow/python/ops/ragged/ragged_conversion_ops.py,110,function,"Returns `rank(value)`, ignoring any leading dimensions with size 1." 11035,to_sparse,tensorflow/tensorflow/python/ops/ragged/ragged_conversion_ops.py,140,function, 11036,from_sparse,tensorflow/tensorflow/python/ops/ragged/ragged_conversion_ops.py,144,function, 11037,sparse_const,tensorflow/tensorflow/python/ops/ragged/ragged_cross_op_test.py,40,function, 11038,RaggedCrossOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_cross_op_test.py,55,class, 11039,_get_arg_infos,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,64,function,"Returns an `_ArgInfo` for each argument of `func` specified by `arg_names`. Args: func: The function whose arguments should be described. arg_names: The names of the arguments to get info for. Returns: A tuple of `_ArgInfo`s." 11040,_is_convertible_to_tensor,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,90,function,Returns true if `value` is convertible to a `Tensor`. 11041,UnaryRaggedElementwiseDispatcher,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,107,class,OpDispatcher for unary ops that map a base op across ragged values. 11042,BinaryRaggedElementwiseDispatcher,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,163,class,"OpDispatcher for binary ops that map a base op across ragged values. Supports broadcasting." 11043,RaggedDispatcher,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,234,class,"OpDispatcher for ragged ops. Dispatches to a wrapped op-handler if at least one of the `tensor_args` arguments is a RaggedTensor or a RaggedTensorValue; and all of the `tensor_args` arguments are convertible to Tensor or RaggedTensor." 11044,_ragged_gather_v1,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,415,function, 11045,_ragged_gather_nd_v1,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,426,function, 11046,_ragged_expand_dims_v1,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,434,function, 11047,_ragged_size_v1,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,440,function, 11048,_ragged_squeeze_v1,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,444,function, 11049,_ragged_dynamic_partition,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,450,function,RaggedTensor Dispatch override for tf.dynamic_partition. 11050,_ragged_nn_dropout_v1,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,459,function, 11051,_ragged_nn_dropout_v2,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,469,function, 11052,register_dispatchers,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,528,function,Constructs & registers OpDispatchers for ragged ops. 11053,_ragged_op_signature,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,554,function,"Returns a signature for the given op, marking ragged args in bold." 11054,_op_is_in_tf_version,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,581,function, 11055,ragged_op_list,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch.py,591,function,Returns a string listing operators that have dispathers registered. 11056,RaggedDispatchTest,tensorflow/tensorflow/python/ops/ragged/ragged_dispatch_test.py,145,class, 11057,RaggedSegmentStackOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py,36,class, 11058,RaggedTensorTest,tensorflow/tensorflow/python/ops/ragged/ragged_eager_test.py,29,class, 11059,RaggedExpandDimsOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_expand_dims_op_test.py,30,class, 11060,constant,tensorflow/tensorflow/python/ops/ragged/ragged_factory_ops.py,39,function,"Constructs a constant RaggedTensor from a nested Python list. Example: >>> tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) All scalar values in `pylist` must have the same nesting depth `K`, and the returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar values, then `K` is one greater than the maximum depth of empty lists in `pylist`. All scalar values in `pylist` must be compatible with `dtype`. Args: pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that is not a `list`, `tuple` or `np.ndarray` must be a scalar value compatible with `dtype`. dtype: The type of elements for the returned `RaggedTensor`. If not specified, then a default is chosen based on the scalar values in `pylist`. ragged_rank: An integer specifying the ragged rank of the returned `RaggedTensor`. Must be nonnegative and less than `K`. Defaults to `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K - 1 - len(inner_shape))` if `inner_shape` is specified. inner_shape: A tuple of integers specifying the shape for individual inner values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank` is not specified. If `ragged_rank` is specified, then a default is chosen based on the contents of `pylist`. name: A name prefix for the returned tensor (optional). row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits. One of `tf.int32` or `tf.int64`. Returns: A potentially ragged tensor with rank `K` and the specified `ragged_rank`, containing the values from `pylist`. Raises: ValueError: If the scalar values in `pylist` have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with `pylist`." 11061,constant_value,tensorflow/tensorflow/python/ops/ragged/ragged_factory_ops.py,92,function,"Constructs a RaggedTensorValue from a nested Python list. Warning: This function returns a `RaggedTensorValue`, not a `RaggedTensor`. If you wish to construct a constant `RaggedTensor`, use [`ragged.constant(...)`](constant.md) instead. Example: >>> tf.compat.v1.ragged.constant_value([[1, 2], [3], [4, 5, 6]]) tf.RaggedTensorValue(values=array([1, 2, 3, 4, 5, 6]), row_splits=array([0, 2, 3, 6])) All scalar values in `pylist` must have the same nesting depth `K`, and the returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no scalar values, then `K` is one greater than the maximum depth of empty lists in `pylist`. All scalar values in `pylist` must be compatible with `dtype`. Args: pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that is not a `list` or `tuple` must be a scalar value compatible with `dtype`. dtype: `numpy.dtype`. The type of elements for the returned `RaggedTensor`. If not specified, then a default is chosen based on the scalar values in `pylist`. ragged_rank: An integer specifying the ragged rank of the returned `RaggedTensorValue`. Must be nonnegative and less than `K`. Defaults to `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K - 1 - len(inner_shape))` if `inner_shape` is specified. inner_shape: A tuple of integers specifying the shape for individual inner values in the returned `RaggedTensorValue`. Defaults to `()` if `ragged_rank` is not specified. If `ragged_rank` is specified, then a default is chosen based on the contents of `pylist`. row_splits_dtype: data type for the constructed `RaggedTensorValue`'s row_splits. One of `numpy.int32` or `numpy.int64`. Returns: A `tf.RaggedTensorValue` or `numpy.array` with rank `K` and the specified `ragged_rank`, containing the values from `pylist`. Raises: ValueError: If the scalar values in `pylist` have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with `pylist`." 11062,_constant_value,tensorflow/tensorflow/python/ops/ragged/ragged_factory_ops.py,150,function,"Constructs a constant RaggedTensor or RaggedTensorValue. Args: ragged_factory: A factory function with the signature: `ragged_factory(values, row_splits)` inner_factory: A factory function with the signature: `inner_factory(pylist, dtype, shape, name)` pylist: A nested `list`, `tuple` or `np.ndarray`. dtype: Data type for returned value. ragged_rank: Ragged rank for returned value. inner_shape: Inner value shape for returned value. Returns: A value returned by `ragged_factory` or `inner_factory`. Raises: ValueError: If the scalar values in `pylist` have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with `pylist`." 11063,_find_scalar_and_max_depth,tensorflow/tensorflow/python/ops/ragged/ragged_factory_ops.py,246,function,"Finds nesting depth of scalar values in pylist. Args: pylist: A nested python `list` or `tuple`. Returns: A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting depth of scalar values in `pylist`, or `None` if `pylist` contains no scalars. `max_depth` is the maximum depth of `pylist` (including empty lists). Raises: ValueError: If pylist has inconsistent nesting depths for scalars." 11064,_default_inner_shape_for_pylist,tensorflow/tensorflow/python/ops/ragged/ragged_factory_ops.py,277,function,Computes a default inner shape for the given python list. 11065,placeholder,tensorflow/tensorflow/python/ops/ragged/ragged_factory_ops.py,318,function,"Creates a placeholder for a `tf.RaggedTensor` that will always be fed. **Important**: This ragged tensor will produce an error if evaluated. Its value must be fed using the `feed_dict` optional argument to `Session.run()`, `Tensor.eval()`, or `Operation.run()`. @compatibility{eager} Placeholders are not compatible with eager execution. Args: dtype: The data type for the `RaggedTensor`. ragged_rank: The ragged rank for the `RaggedTensor` value_shape: The shape for individual flat values in the `RaggedTensor`. name: A name for the operation (optional). Returns: A `RaggedTensor` that may be used as a handle for feeding a value, but not evaluated directly. Raises: RuntimeError: if eager execution is enabled" 11066,RaggedTensorToSparseOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py,32,class, 11067,RaggedTensorFromTensorOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_from_tensor_op_test.py,35,class, 11068,map_flat_values,tensorflow/tensorflow/python/ops/ragged/ragged_functional_ops.py,33,function,"Applies `op` to the values of one or more RaggedTensors. Replaces any `RaggedTensor` in `args` or `kwargs` with its `flat_values` tensor, and then calls `op`. Returns a `RaggedTensor` that is constructed from the input `RaggedTensor`s' `nested_row_splits` and the value returned by the `op`. If the input arguments contain multiple `RaggedTensor`s, then they must have identical `nested_row_splits`. Examples: >>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]]) >>> map_flat_values(tf.ones_like, rt).to_list() [[1, 1, 1], [], [1, 1], [1]] >>> map_flat_values(tf.multiply, rt, rt).to_list() [[1, 4, 9], [], [16, 25], [36]] >>> map_flat_values(tf.add, rt, 5).to_list() [[6, 7, 8], [], [9, 10], [11]] Args: op: The operation that should be applied to the RaggedTensor `flat_values`. `op` is typically an element-wise operation (such as math_ops.add), but any operation that preserves the size of the outermost dimension can be used. I.e., `shape[0]` of the value returned by `op` must match `shape[0]` of the `RaggedTensor`s' `flat_values` tensors. *args: Arguments for `op`. **kwargs: Keyword arguments for `op`. Returns: A `RaggedTensor` whose `ragged_rank` matches the `ragged_rank` of all input `RaggedTensor`s. Raises: ValueError: If args contains no `RaggedTensors`, or if the `nested_splits` of the input `RaggedTensor`s are not identical." 11069,_replace_ragged_with_flat_values,tensorflow/tensorflow/python/ops/ragged/ragged_functional_ops.py,97,function,"Replace RaggedTensors with their flat_values, and record their splits. Returns a copy of `value`, with any nested `RaggedTensor`s replaced by their `flat_values` tensor. Looks inside lists, tuples, and dicts. Appends each `RaggedTensor`'s `nested_splits` to `nested_splits_lists`. Args: value: The value that should be transformed by replacing `RaggedTensors`. nested_splits_lists: An output parameter used to record the `nested_splits` for any `RaggedTensors` that were replaced. Returns: A copy of `value` with nested `RaggedTensors` replaced by their `values`." 11070,RaggedGatherNdOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py,35,class, 11071,RaggedGatherOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_gather_op_test.py,41,class, 11072,gather,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,36,function,"Gathers ragged slices from `params` axis `0` according to `indices`. See `tf.gather` for full documentation. (This version has the same API as `tf.gather`, but supports ragged `params` and `indices`.) Examples: >>> params = tf.constant(['a', 'b', 'c', 'd', 'e']) >>> indices = tf.constant([3, 1, 2, 1, 0]) >>> ragged_params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> ragged_indices = tf.ragged.constant([[3, 1, 2], [1], [], [0]]) >>> tf.gather(params, ragged_indices) >>> tf.gather(ragged_params, indices) >>> tf.gather(ragged_params, ragged_indices) Args: params: The potentially ragged tensor from which to gather values. Must be at least rank 1. indices: The potentially ragged tensor indicating which values to gather. Must have dtype `int32` or `int64`. Values must be in the range `[0, params.shape[0]]`. validate_indices: Ignored. axis: The axis in `params` to gather `indices` from. batch_dims: The number of batch dimensions. name: A name for the operation (optional). Returns: A `RaggedTensor`, where `output.dtype=params.dtype` and `output.shape=indices.shape + params.shape[1:]` and `output.ragged_rank=indices.shape.ndims + params.ragged_rank`. Raises: ValueError: If indices.shape.ndims is not known statically." 11073,_gather,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,114,function,"Helper that implements the body for ragged gather(). Assumes that `params` and `indices` have been converted to tensors or ragged tensors, and that `axis` and `batch_dims` have been normalized to be positive. (So these conversions & normalizations can be skipped in recursive calls to _gather). Args: params: The tensor from which to gather values. indices: The indices of values to gather. axis: The axis in `params` to gather `indices` from. batch_dims: The number of batch dimensions. Returns: A potentially ragged tensor." 11074,_batch_gather,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,176,function,"Helper that implements the body for ragged gather() when batch_dims>0. Args: params: The tensor from which to gather values. indices: The indices of values to gather. axis: The axis in `params` to gather `indices` from. batch_dims: The number of batch dimensions. Returns: A potentially ragged tensor." 11075,_axis_gather,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,258,function,"Helper that implements ragged gather when axis>0 and batch_dims==0. Args: params: The tensor from which to gather values. indices: The indices of values to gather. axis: The axis in `params` to gather `indices` from. Returns: A potentially ragged tensor." 11076,_flatten_dims_0_and_1,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,298,function,Returns a copy of `t` with the outer two dimensions merged. 11077,_row_starts,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,307,function,Returns the start indices for the rows in `t`. 11078,_increase_rank_to,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,316,function,Adds *trailing* size-1 dimensions to `t` until it has the given rank. 11079,gather_nd,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,330,function,"Gather slices from `params` using `n`-dimensional indices. This operation is similar to `gather`, but it uses the innermost dimension of `indices` to define a slice into `params`. In particular, if: * `indices` has shape `[A1...AN, I]` * `params` has shape `[B1...BM]` Then: * `result` has shape `[A1...AN, B_{I+1}...BM]`. * `result[a1...aN] = params[indices[a1...aN, :]]` Args: params: A potentially ragged tensor with shape `[A1...AN, I]`. indices: A potentially ragged tensor with shape `[B1...BM]`. batch_dims: Must be zero. name: A name for the operation (optional). Returns: A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`. #### Examples: >>> params = tf.ragged.constant( ... [ [ ['000', '001'], ['010' ] ], ... [ ['100' ], ['110', '111', '112'], ['120'] ], ... [ [ ], ['210' ] ] ]) >>> # Gather 2D slices from a 3D tensor >>> tf.gather_nd(params, [[2], [0]]) >>> # Gather 1D slices from a 3D tensor >>> tf.gather_nd(params, [[2, 1], [0, 0]]) >>> # Gather scalars from a 3D tensor >>> tf.gather_nd(params, [[0, 0, 1], [1, 1, 2]]).numpy() array([b'001', b'112'], dtype=object)" 11080,_ragged_gather_grad,tensorflow/tensorflow/python/ops/ragged/ragged_gather_ops.py,473,function,Gradient for RaggedGather op. 11081,ragged_tensor_getitem,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,35,function,"Returns the specified piece of this RaggedTensor. Supports multidimensional indexing and slicing, with one restriction: indexing into a ragged inner dimension is not allowed. This case is problematic because the indicated value may exist in some rows but not others. In such cases, it's not obvious whether we should (1) report an IndexError; (2) use a default value; or (3) skip that value and return a tensor with fewer rows than we started with. Following the guiding principles of Python (""In the face of ambiguity, refuse the temptation to guess""), we simply disallow this operation. Args: self: The RaggedTensor to slice. key: Indicates which piece of the RaggedTensor to return, using standard Python semantics (e.g., negative values index from the end). `key` may have any of the following types: * `int` constant * Scalar integer `Tensor` * `slice` containing integer constants and/or scalar integer `Tensor`s * `Ellipsis` * `tf.newaxis` * `tuple` containing any of the above (for multidimensional indexing) Returns: A `Tensor` or `RaggedTensor` object. Values that include at least one ragged dimension are returned as `RaggedTensor`. Values that include no ragged dimensions are returned as `Tensor`. See above for examples of expressions that return `Tensor`s vs `RaggedTensor`s. Raises: ValueError: If `key` is out of bounds. ValueError: If `key` is not supported. TypeError: If the indices in `key` have an unsupported type. Examples: >>> # A 2-D ragged tensor with 1 ragged dimension. >>> rt = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e'], ['f'], ['g']]) >>> rt[0].numpy() # First row (1-D `Tensor`) array([b'a', b'b', b'c'], dtype=object) >>> rt[:3].to_list() # First three rows (2-D RaggedTensor) [[b'a', b'b', b'c'], [b'd', b'e'], [b'f']] >>> rt[3, 0].numpy() # 1st element of 4th row (scalar) b'g' >>> # A 3-D ragged tensor with 2 ragged dimensions. >>> rt = tf.ragged.constant([[[1, 2, 3], [4]], ... [[5], [], [6]], ... [[7]], ... [[8, 9], [10]]]) >>> rt[1].to_list() # Second row (2-D RaggedTensor) [[5], [], [6]] >>> rt[3, 0].numpy() # First element of fourth row (1-D Tensor) array([8, 9], dtype=int32) >>> rt[:, 1:3].to_list() # Items 1-3 of each row (3-D RaggedTensor) [[[4]], [[], [6]], [], [[10]]] >>> rt[:, -1:].to_list() # Last item of each row (3-D RaggedTensor) [[[4]], [[6]], [[7]], [[10]]]" 11082,_ragged_getitem,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,106,function,"Helper for indexing and slicing ragged tensors with __getitem__(). Extracts the specified piece of the `rt_input`. See `RaggedTensor.__getitem__` for examples and restrictions. Args: rt_input: The `RaggedTensor` from which a piece should be returned. key_list: The list of keys specifying which piece to return. Each key corresponds with a separate dimension. Returns: The indicated piece of rt_input. Raises: ValueError: If `key_list` is not supported. TypeError: If any keys in `key_list` have an unsupported type." 11083,_slice_ragged_row_dimension,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,190,function,"Slice the outer dimension of `rt_input` according to the given `slice`. Args: rt_input: The `RaggedTensor` to slice. row_key: The `slice` object that should be used to slice `rt_input`. Returns: A `RaggedTensor` containing the indicated slice of `rt_input`." 11084,_ragged_getitem_inner_dimensions,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,230,function,"Retrieve inner dimensions, keeping outermost dimension unchanged. Args: rt_input: The `RaggedTensor` or `Tensor` from which a piece should be extracted. key_list: The __getitem__ keys for slicing the inner dimensions. Returns: A `RaggedTensor`. Raises: ValueError: If key_list is not supported." 11085,_slice_length,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,343,function,"Computes the number of elements in a slice of a value with a given length. Returns the equivalent of: `len(range(value_length)[slice_key])` Args: value_length: Scalar int `Tensor`: the length of the value being sliced. slice_key: A `slice` object used to slice elements from the the value. Returns: The number of elements in the sliced value." 11086,_expand_ellipsis,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,364,function,"Expands the ellipsis at the start of `key_list`. Assumes that the first element of `key_list` is Ellipsis. This will either remove the Ellipsis (if it corresponds to zero indices) or prepend a new `slice(None, None, None)` (if it corresponds to more than zero indices). Args: key_list: The arguments to `__getitem__()`. num_remaining_dims: The number of dimensions remaining. Returns: A copy of `key_list` with he ellipsis expanded. Raises: ValueError: If ragged_rank.shape.ndims is None IndexError: If there are too many elements in `key_list`." 11087,_tensors_in_key_list,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,392,function,Generates all Tensors in the given slice spec. 11088,_build_ragged_tensor_from_value_ranges,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,409,function,"Returns a `RaggedTensor` containing the specified sequences of values. Returns a RaggedTensor `output` where: ```python output.shape[0] = starts.shape[0] output[i] = values[starts[i]:limits[i]:step] ``` Requires that `starts.shape == limits.shape` and `0 <= starts[i] <= limits[i] <= values.shape[0]`. Args: starts: 1D integer Tensor specifying the start indices for the sequences of values to include. limits: 1D integer Tensor specifying the limit indices for the sequences of values to include. step: Integer value specifying the step size for strided slices. values: The set of values to select from. Returns: A `RaggedTensor`. Raises: ValueError: Until the prerequisite ops are checked in." 11089,_if_ge_zero,tensorflow/tensorflow/python/ops/ragged/ragged_getitem.py,459,function,Returns `true_fn() if value >= 0 else false_fn()`. 11090,_SliceBuilder,tensorflow/tensorflow/python/ops/ragged/ragged_getitem_test.py,37,class,"Helper to construct arguments for __getitem__. Usage: _SliceBuilder()[] slice_spec Python generates for ." 11091,_make_tensor_slice_spec,tensorflow/tensorflow/python/ops/ragged/ragged_getitem_test.py,50,function,"Wraps all integers in an extended slice spec w/ a tensor. This function is used to help test slicing when the slice spec contains tensors, rather than integers. Args: slice_spec: The extended slice spec. use_constant: If true, then wrap each integer with a tf.constant. If false, then wrap each integer with a tf.placeholder. Returns: A copy of slice_spec, but with each integer i replaced with tf.constant(i)." 11092,RaggedGetItemTest,tensorflow/tensorflow/python/ops/ragged/ragged_getitem_test.py,118,class, 11093,RaggedMapInnerValuesOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_map_flat_values_op_test.py,34,class, 11094,RaggedMapOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py,38,class, 11095,map_fn,tensorflow/tensorflow/python/ops/ragged/ragged_map_ops.py,30,function,"map on the list of tensors unpacked from `elems` on dimension 0. The simplest version of `map_fn` repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems`. `dtype` is the data type of the return value of `fn`. Users must provide `dtype` if it is different from the data type of `elems`. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `[values.shape[0]] + fn(values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Furthermore, `fn` may emit a different structure than its input. For example, `fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case, the `dtype` parameter is not optional: `dtype` must be a type or (possibly nested) tuple of types matching the output of `fn`. To apply a functional operation to the nonzero elements of a SparseTensor one of the following methods is recommended. First, if the function is expressible as TensorFlow ops, use ```python result = SparseTensor(input.indices, fn(input.values), input.dense_shape) ``` If, however, the function is not expressible as a TensorFlow op, then use ```python result = SparseTensor( input.indices, map_fn(fn, input.values), input.dense_shape) ``` instead. When executing eagerly, map_fn does not execute in parallel even if `parallel_iterations` is set to a value > 1. You can still get the performance benefits of running a function in parallel by using the `tf.contrib.eager.defun` decorator, ```python # Assume the function being used in map_fn is fn. # To ensure map_fn calls fn in parallel, use the defun decorator. @tf.contrib.eager.defun def func(tensor): return tf.map_fn(fn, tensor) ``` Note that if you use the defun decorator, any non-TensorFlow Python code that you may have written in your function won't get executed. See `tf.contrib.eager.defun` for more details. The recommendation would be to debug without defun but switch to defun to get performance benefits of running map_fn in parallel. Args: fn: The callable to be performed. It accepts one argument, which will have the same (possibly nested) structure as `elems`. Its output must have the same structure as `dtype` if one is provided, otherwise it must have the same structure as `elems`. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be applied to `fn`. dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure of Tensors differing from the structure of `elems`, then `dtype` is not optional and must have the same structure as the output of `fn`. Use `RaggedTensorType` to declare an output of type `RaggedTensor`. parallel_iterations: (optional) The number of iterations allowed to run in parallel. When graph building, the default value is 10. While executing eagerly, the default value is set to 1. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. infer_shape: (optional) False disables tests for consistent output shapes. name: (optional) Name prefix for the returned tensors. Returns: A possibly nested sequence of potentially ragged tensors. Each tensor packs the results of applying `fn` to tensors unpacked from `elems` along the first dimension, from first to last. Raises: TypeError: if `fn` is not callable or the structure of the output of `fn` and `dtype` do not match, or if elems is a SparseTensor. ValueError: if the lengths of the output of `fn` and `dtype` do not match. #### Examples: ```python elems = np.array([1, 2, 3, 4, 5, 6]) squares = map_fn(lambda x: x * x, elems) # squares == [1, 4, 9, 16, 25, 36] ``` ```python elems = (np.array([1, 2, 3]), np.array([-1, 1, -1])) alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64) # alternate == [-1, 2, -3] ``` ```python elems = np.array([1, 2, 3]) alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64)) # alternates[0] == [1, 2, 3] # alternates[1] == [-1, -2, -3] ``` ```python elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]]) mean = map_fn(tf.reduce_mean, elems) # mean == [2, 4, 6] ``` ```python elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]], dtype=tf.int64) out = map_fn(fn=lambda x: x+1, elems, dtype=ragged.RaggedTensorType(type=tf.int64, ragged_rank=0)) # out = tf.ragged.constant([[2, 3, 4], [5, 6], [7, 8]]) ```" 11096,_ragged_type_to_spec,tensorflow/tensorflow/python/ops/ragged/ragged_map_ops.py,174,function, 11097,range,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,43,function,"Returns a `RaggedTensor` containing the specified sequences of numbers. Each row of the returned `RaggedTensor` contains a single sequence: ```python ragged.range(starts, limits, deltas)[i] == tf.range(starts[i], limits[i], deltas[i]) ``` If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then `output[i]` will be an empty list. This behavior is consistent with the Python `range` function, but differs from the `tf.range` op, which returns an error for these cases. Examples: >>> tf.ragged.range([3, 5, 2]).to_list() [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]] >>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list() [[0, 1, 2], [], [8, 9, 10, 11]] >>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list() [[0, 2], [], [8, 10]] The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. The vector inputs must all have the same size. Scalar inputs are broadcast to match the size of the vector inputs. Args: starts: Vector or scalar `Tensor`. Specifies the first entry for each range if `limits` is not `None`; otherwise, specifies the range limits, and the first entries default to `0`. limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for each range. deltas: Vector or scalar `Tensor`. Specifies the increment for each range. Defaults to `1`. dtype: The type of the elements of the resulting tensor. If not specified, then a value is chosen based on the other args. name: A name for the operation. row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` tensor. One of `tf.int32` or `tf.int64`. Returns: A `RaggedTensor` of type `dtype` with `ragged_rank=1`." 11098,_infer_matching_dtype,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,112,function,"Infers a matching dtype for tensors, and casts them to that dtype." 11099,_ragged_segment_aggregate,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,157,function,"Aggregates along segments of a RaggedTensor using `unsorted_segment_op`. Returns a RaggedTensor `output` with `num_segments` rows, where the row `output[i]` is formed by combining all rows of `data` whose corresponding `segment_id` is `i`. The values in each row are combined using `unsorted_segment_op`. The length of the row `output[i]` will be the maximum of the lengths of all rows of `data` whose corresponding `segment_id` is `i`. If no `data` rows correspond to a given segment ID, then the output row for that segment ID will be empty. Args: unsorted_segment_op: The tensorflow `op` that should be used to combine values in each row. Must have the same signature and basic behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc. data: A `RaggedTensor` containing the values to be combined. segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or `int32`. `segment_ids.shape` must be a prefix of `data.shape`. `segment_ids` is not required to be sorted. num_segments: An `int32` or `int64` scalar. separator: An optional string. Defaults to None. The separator to use when joining. Only used for string types. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` containing the aggregated values. The returned tensor has the same dtype as `data`, and its shape is `[num_segments] + data.shape[segment_ids.rank:]`. Raises: ValueError: If segment_ids.shape is not a prefix of data.shape." 11100,segment_sum,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,262,function, 11101,segment_prod,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,271,function, 11102,segment_min,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,280,function, 11103,segment_max,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,289,function, 11104,segment_mean,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,298,function,"For docs, see: _RAGGED_SEGMENT_DOCSTRING." 11105,segment_sqrt_n,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,313,function,"For docs, see: _RAGGED_SEGMENT_DOCSTRING." 11106,_set_ragged_segment_docstring,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,329,function, 11107,ragged_reduce_aggregate,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,426,function,"Aggregates across axes of a RaggedTensor using the given `Tensor` ops. Reduces `rt_input` along the dimensions given in `axis`. The rank of the tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified, then all dimensions are reduced, and a scalar value is returned. This op assumes that `reduce_op` and `unsorted_segment_op` are associative; if not, then reducing multiple axes will return incorrect results. (In particular, reducing multiple axes is currently implemented by reducing the axes one at a time.) Args: reduce_op: The tensorflow `op` that should be used to reduce values in uniform dimensions. Must have the same signature and basic behavior as `reduce_sum`, `reduce_max`, etc. unsorted_segment_op: The tensorflow `op` that should be used to combine values in ragged dimensions. Must have the same signature and basic behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc. rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced. axis: The axis or axes to reduce. May be `None` (to reduce all axes), an `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a given set of axes), or a `Tensor` with a constant value. Must be in the range `[0, rt_input.rank)`. keepdims: If true, retains reduced dimensions with length 1. separator: An optional string. Defaults to None. The separator to use when joining. The separator must not be set for non-string data types. (i.e. if separator is not None then it uses string ops) name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` containing the reduced values. The returned tensor has the same dtype as `data`, and its shape is given by removing the dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank` of the returned tensor is given by substracting any ragged dimensions specified in `axis` from `rt_input.ragged_rank`. Raises: ValueError: If `axis` contains a `Tensor` whose value is not constant." 11108,reduce_sum,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,550,function,"For docs, see: _RAGGED_REDUCE_DOCSTRING." 11109,reduce_prod,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,561,function,"For docs, see: _RAGGED_REDUCE_DOCSTRING." 11110,reduce_min,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,572,function,"For docs, see: _RAGGED_REDUCE_DOCSTRING." 11111,reduce_max,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,583,function,"For docs, see: _RAGGED_REDUCE_DOCSTRING." 11112,reduce_mean,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,594,function,"For docs, see: _RAGGED_REDUCE_DOCSTRING." 11113,_cast,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,613,function, 11114,reduce_all,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,618,function,"For docs, see: _RAGGED_REDUCE_DOCSTRING." 11115,reduce_any,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,626,function,"For docs, see: _RAGGED_REDUCE_DOCSTRING." 11116,_set_ragged_reduce_docstring,tensorflow/tensorflow/python/ops/ragged/ragged_math_ops.py,634,function, 11117,RaggedMergeDimsOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_merge_dims_op_test.py,32,class, 11118,RaggedOneHotTest,tensorflow/tensorflow/python/ops/ragged/ragged_one_hot_op_test.py,37,class, 11119,_right,tensorflow/tensorflow/python/ops/ragged/ragged_operators.py,27,function,Right-handed version of an operator: swap args x and y. 11120,_dummy_bool,tensorflow/tensorflow/python/ops/ragged/ragged_operators.py,72,function,Dummy method to prevent a RaggedTensor from being used as a Python bool. 11121,RaggedElementwiseOpsTest,tensorflow/tensorflow/python/ops/ragged/ragged_operators_test.py,27,class, 11122,RaggedPlaceholderOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_placeholder_op_test.py,30,class, 11123,RaggedPrintV2Test,tensorflow/tensorflow/python/ops/ragged/ragged_print_op_test.py,40,class, 11124,RaggedToStringTest,tensorflow/tensorflow/python/ops/ragged/ragged_print_op_test.py,130,class, 11125,RaggedRangeOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_range_op_test.py,28,class, 11126,RaggedRankOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_rank_op_test.py,29,class, 11127,mean,tensorflow/tensorflow/python/ops/ragged/ragged_reduce_op_test.py,38,function, 11128,RaggedReduceOpsTest,tensorflow/tensorflow/python/ops/ragged/ragged_reduce_op_test.py,43,class, 11129,RaggedReverseOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_reverse_op_test.py,30,class, 11130,RaggedRowLengthsOp,tensorflow/tensorflow/python/ops/ragged/ragged_row_lengths_op_test.py,31,class, 11131,RaggedSplitsToSegmentIdsOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_row_splits_to_segment_ids_op_test.py,28,class, 11132,RaggedSplitsToSegmentIdsOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_segment_ids_to_row_splits_op_test.py,28,class, 11133,prod,tensorflow/tensorflow/python/ops/ragged/ragged_segment_op_test.py,35,function, 11134,mean,tensorflow/tensorflow/python/ops/ragged/ragged_segment_op_test.py,43,function, 11135,sqrt_n,tensorflow/tensorflow/python/ops/ragged/ragged_segment_op_test.py,47,function, 11136,RaggedSegmentOpsTest,tensorflow/tensorflow/python/ops/ragged/ragged_segment_op_test.py,52,class, 11137,RaggedSizeOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_size_op_test.py,30,class, 11138,squeeze,tensorflow/tensorflow/python/ops/ragged/ragged_squeeze_op.py,31,function,"Ragged compatible squeeze. If `input` is a `tf.Tensor`, then this calls `tf.squeeze`. If `input` is a `tf.RaggedTensor`, then this operation takes `O(N)` time, where `N` is the number of elements in the squeezed dimensions. Args: input: A potentially ragged tensor. The input to squeeze. axis: An optional list of ints. Defaults to `None`. If the `input` is ragged, it only squeezes the dimensions listed. It fails if `input` is ragged and axis is []. If `input` is not ragged it calls tf.squeeze. Note that it is an error to squeeze a dimension that is not 1. It must be in the range of [-rank(input), rank(input)). name: A name for the operation (optional). Returns: A potentially ragged tensor. Contains the same data as input, but has one or more dimensions of size 1 removed." 11139,RaggedSqueezeTest,tensorflow/tensorflow/python/ops/ragged/ragged_squeeze_op_test.py,34,class, 11140,RaggedStackOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_stack_op_test.py,31,class, 11141,string_bytes_split,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,46,function,"Split string elements of `input` into bytes. Examples: >>> tf.strings.bytes_split('hello').numpy() array([b'h', b'e', b'l', b'l', b'o'], dtype=object) >>> tf.strings.bytes_split(['hello', '123']) Note that this op splits strings into bytes, not unicode characters. To split strings into unicode characters, use `tf.strings.unicode_split`. See also: `tf.io.decode_raw`, `tf.strings.split`, `tf.strings.unicode_split`. Args: input: A string `Tensor` or `RaggedTensor`: the strings to split. Must have a statically known rank (`N`). name: A name for the operation (optional). Returns: A `RaggedTensor` of rank `N+1`: the bytes that make up the source strings." 11142,unicode_encode,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,94,function,"Encodes each sequence of Unicode code points in `input` into a string. `result[i1...iN]` is the string formed by concatenating the Unicode codepoints `input[1...iN, :]`, encoded using `output_encoding`. Args: input: An `N+1` dimensional potentially ragged integer tensor with shape `[D1...DN, num_chars]`. output_encoding: Unicode encoding that should be used to encode each codepoint sequence. Can be `""UTF-8""`, `""UTF-16-BE""`, or `""UTF-32-BE""`. errors: Specifies the response when an invalid codepoint is encountered (optional). One of: * `'replace'`: Replace invalid codepoint with the `replacement_char`. (default) * `'ignore'`: Skip invalid codepoints. * `'strict'`: Raise an exception for any invalid codepoint. replacement_char: The replacement character codepoint to be used in place of any invalid input when `errors='replace'`. Any valid unicode codepoint may be used. The default value is the default unicode replacement character which is 0xFFFD (U+65533). name: A name for the operation (optional). Returns: A `N` dimensional `string` tensor with shape `[D1...DN]`. #### Example: >>> input = tf.ragged.constant( ... [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]) >>> print(unicode_encode(input, 'UTF-8')) tf.Tensor([b'G\xc3\xb6\xc3\xb6dnight' b'\xf0\x9f\x98\x8a'], shape=(2,), dtype=string)" 11143,unicode_decode,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,192,function,"Decodes each string in `input` into a sequence of Unicode code points. `result[i1...iN, j]` is the Unicode codepoint for the `j`th character in `input[i1...iN]`, when decoded using `input_encoding`. Args: input: An `N` dimensional potentially ragged `string` tensor with shape `[D1...DN]`. `N` must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * `'strict'`: Raise an exception for any illegal substrings. * `'replace'`: Replace illegal substrings with `replacement_char`. * `'ignore'`: Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in `input` when `errors='replace'`; and in place of C0 control characters in `input` when `replace_control_characters=True`. replace_control_characters: Whether to replace the C0 control characters `(U+0000 - U+001F)` with the `replacement_char`. name: A name for the operation (optional). Returns: A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`. The returned tensor is a `tf.Tensor` if `input` is a scalar, or a `tf.RaggedTensor` otherwise. #### Example: >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> tf.strings.unicode_decode(input, 'UTF-8').to_list() [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]" 11144,unicode_decode_with_offsets,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,238,function,"Decodes each string into a sequence of code points with start offsets. This op is similar to `tf.strings.decode(...)`, but it also returns the start offset for each character in its respective string. This information can be used to align the characters with the original byte sequence. Returns a tuple `(codepoints, start_offsets)` where: * `codepoints[i1...iN, j]` is the Unicode codepoint for the `j`th character in `input[i1...iN]`, when decoded using `input_encoding`. * `start_offsets[i1...iN, j]` is the start byte offset for the `j`th character in `input[i1...iN]`, when decoded using `input_encoding`. Args: input: An `N` dimensional potentially ragged `string` tensor with shape `[D1...DN]`. `N` must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * `'strict'`: Raise an exception for any illegal substrings. * `'replace'`: Replace illegal substrings with `replacement_char`. * `'ignore'`: Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in `input` when `errors='replace'`; and in place of C0 control characters in `input` when `replace_control_characters=True`. replace_control_characters: Whether to replace the C0 control characters `(U+0000 - U+001F)` with the `replacement_char`. name: A name for the operation (optional). Returns: A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`. * `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`. * `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`. The returned tensors are `tf.Tensor`s if `input` is a scalar, or `tf.RaggedTensor`s otherwise. #### Example: >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8') >>> result[0].to_list() # codepoints [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] >>> result[1].to_list() # offsets [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]" 11145,unicode_split,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,300,function,"Splits each string in `input` into a sequence of Unicode code points. `result[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its `j`th character, when decoded using `input_encoding`. Args: input: An `N` dimensional potentially ragged `string` tensor with shape `[D1...DN]`. `N` must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * `'strict'`: Raise an exception for any illegal substrings. * `'replace'`: Replace illegal substrings with `replacement_char`. * `'ignore'`: Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in `input` when `errors='replace'`. name: A name for the operation (optional). Returns: A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`. The returned tensor is a `tf.Tensor` if `input` is a scalar, or a `tf.RaggedTensor` otherwise. #### Example: >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> tf.strings.unicode_split(input, 'UTF-8').to_list() [[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'], [b'\xf0\x9f\x98\x8a']]" 11146,unicode_split_with_offsets,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,348,function,"Splits each string into a sequence of code points with start offsets. This op is similar to `tf.strings.decode(...)`, but it also returns the start offset for each character in its respective string. This information can be used to align the characters with the original byte sequence. Returns a tuple `(chars, start_offsets)` where: * `chars[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its `j`th character, when decoded using `input_encoding`. * `start_offsets[i1...iN, j]` is the start byte offset for the `j`th character in `input[i1...iN]`, when decoded using `input_encoding`. Args: input: An `N` dimensional potentially ragged `string` tensor with shape `[D1...DN]`. `N` must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * `'strict'`: Raise an exception for any illegal substrings. * `'replace'`: Replace illegal substrings with `replacement_char`. * `'ignore'`: Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in `input` when `errors='replace'`. name: A name for the operation (optional). Returns: A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`. * `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`. * `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`. The returned tensors are `tf.Tensor`s if `input` is a scalar, or `tf.RaggedTensor`s otherwise. #### Example: >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8') >>> result[0].to_list() # character substrings [[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'], [b'\xf0\x9f\x98\x8a']] >>> result[1].to_list() # offsets [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]" 11147,_unicode_decode,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,412,function,Decodes each string into a sequence of codepoints. 11148,string_split_v2,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,472,function,"Split elements of `input` based on `sep` into a `RaggedTensor`. Let N be the size of `input` (typically N will be the batch size). Split each element of `input` based on `sep` and return a `RaggedTensor` containing the split tokens. Empty tokens are ignored. Example: >>> tf.strings.split('hello world').numpy() array([b'hello', b'world'], dtype=object) >>> tf.strings.split(['hello world', 'a b c']) If `sep` is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings. For example, `input` of `""1<>2<><>3""` and `sep` of `""<>""` returns `[""1"", ""2"", """", ""3""]`. If `sep` is None or an empty string, consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Note that the above mentioned behavior matches python's str.split. Args: input: A string `Tensor` of rank `N`, the strings to split. If `rank(input)` is not known statically, then it is assumed to be `1`. sep: `0-D` string `Tensor`, the delimiter string. maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. name: A name for the operation (optional). Raises: ValueError: If sep is not a string. Returns: A `RaggedTensor` of rank `N+1`, the strings split according to the delimiter." 11149,string_split,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,537,function,"Split elements of `source` based on `delimiter`. Let N be the size of `source` (typically N will be the batch size). Split each element of `source` based on `delimiter` and return a `SparseTensor` or `RaggedTensor` containing the split tokens. Empty tokens are ignored. If `sep` is an empty string, each element of the `source` is split into individual strings, each containing one byte. (This includes splitting multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is treated as a set of delimiters with each considered a potential split point. Examples: >>> print(tf.compat.v1.string_split(['hello world', 'a b c'])) SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...), values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...), dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64)) >>> print(tf.compat.v1.string_split(['hello world', 'a b c'], ... result_type=""RaggedTensor"")) Args: source: `1-D` string `Tensor`, the strings to split. sep: `0-D` string `Tensor`, the delimiter character, the string should be length 0 or 1. Default is ' '. skip_empty: A `bool`. If `True`, skip the empty strings from the result. delimiter: deprecated alias for `sep`. result_type: The tensor type for the result: one of `""RaggedTensor""` or `""SparseTensor""`. name: A name for the operation (optional). Raises: ValueError: If delimiter is not a string. Returns: A `SparseTensor` or `RaggedTensor` of rank `2`, the strings split according to the delimiter. The first column of the indices corresponds to the row in `source` and the second column corresponds to the index of the split component in this row." 11150,strings_split_v1,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,599,function,"Split elements of `input` based on `sep`. Let N be the size of `input` (typically N will be the batch size). Split each element of `input` based on `sep` and return a `SparseTensor` or `RaggedTensor` containing the split tokens. Empty tokens are ignored. Examples: >>> print(tf.compat.v1.strings.split(['hello world', 'a b c'])) SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...), values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...), dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64)) >>> print(tf.compat.v1.strings.split(['hello world', 'a b c'], ... result_type=""RaggedTensor"")) If `sep` is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings. For example, `input` of `""1<>2<><>3""` and `sep` of `""<>""` returns `[""1"", ""2"", """", ""3""]`. If `sep` is None or an empty string, consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Note that the above mentioned behavior matches python's str.split. Args: input: A string `Tensor` of rank `N`, the strings to split. If `rank(input)` is not known statically, then it is assumed to be `1`. sep: `0-D` string `Tensor`, the delimiter character. maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. result_type: The tensor type for the result: one of `""RaggedTensor""` or `""SparseTensor""`. source: alias for ""input"" argument. name: A name for the operation (optional). Raises: ValueError: If sep is not a string. Returns: A `SparseTensor` or `RaggedTensor` of rank `N+1`, the strings split according to the delimiter." 11151,reduce_join,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,664,function,"For docs, see: _RAGGED_REDUCE_DOCSTRING." 11152,ngrams,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,673,function,"Create a tensor of n-grams based on `data`. Creates a tensor of n-grams based on `data`. The n-grams are created by joining windows of `width` adjacent strings from the inner axis of `data` using `separator`. The input data can be padded on both the start and end of the sequence, if desired, using the `pad_values` argument. If set, `pad_values` should contain either a tuple of strings or a single string; the 0th element of the tuple will be used to pad the left side of the sequence and the 1st element of the tuple will be used to pad the right side of the sequence. The `padding_width` arg controls how many padding values are added to each side; it defaults to `ngram_width-1`. If this op is configured to not have padding, or if it is configured to add padding with `padding_width` set to less than ngram_width-1, it is possible that a sequence, or a sequence plus padding, is smaller than the ngram width. In that case, no ngrams will be generated for that sequence. This can be prevented by setting `preserve_short_sequences`, which will cause the op to always generate at least one ngram per non-empty sequence. Examples: >>> tf.strings.ngrams([""A"", ""B"", ""C"", ""D""], 2).numpy() array([b'A B', b'B C', b'C D'], dtype=object) >>> tf.strings.ngrams([""TF"", ""and"", ""keras""], 1).numpy() array([b'TF', b'and', b'keras'], dtype=object) Args: data: A Tensor or RaggedTensor containing the source data for the ngrams. ngram_width: The width(s) of the ngrams to create. If this is a list or tuple, the op will return ngrams of all specified arities in list order. Values must be non-Tensor integers greater than 0. separator: The separator string used between ngram elements. Must be a string constant, not a Tensor. pad_values: A tuple of (left_pad_value, right_pad_value), a single string, or None. If None, no padding will be added; if a single string, then that string will be used for both left and right padding. Values must be Python strings. padding_width: If set, `padding_width` pad values will be added to both sides of each sequence. Defaults to `ngram_width`-1. Must be greater than 0. (Note that 1-grams are never padded, regardless of this value.) preserve_short_sequences: If true, then ensure that at least one ngram is generated for each input sequence. In particular, if an input sequence is shorter than `min(ngram_width) + 2*pad_width`, then generate a single ngram containing the entire sequence. If false, then no ngrams are generated for these short input sequences. name: The op name. Returns: A RaggedTensor of ngrams. If `data.shape=[D1...DN, S]`, then `output.shape=[D1...DN, NUM_NGRAMS]`, where `NUM_NGRAMS=S-ngram_width+1+2*padding_width`. Raises: TypeError: if `pad_values` is set to an invalid type. ValueError: if `pad_values`, `padding_width`, or `ngram_width` is set to an invalid value." 11153,string_format,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,826,function,Version of tf.strings.format that handles RaggedTensors. 11154,ragged_tensor_to_string,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,851,function,"Returns a scalar string tensor with the contents of a RaggedTensor. Requires that `rt.shape.rank` is not `None`. Note: this converts the entire `RaggedTensor` into a single string scalar. If you want to convert individual elements, use `tf.strings.as_string(rt)`. >>> rt1 = tf.ragged.constant([[1, 2, 3], [4, 5]]) >>> ragged_tensor_to_string(rt1).numpy() b'[[1, 2, 3], [4, 5]]' >>> rt2 = tf.ragged.constant([[['a'], ['b', 'c']], [['d', 'e', 'f'], []]]) >>> ragged_tensor_to_string(rt2).numpy() b""[[['a'], ['b', 'c']], [['d', 'e', 'f'], []]]"" >>> rt3 = tf.ragged.constant([[1], [2, 3, 4, 5, 6], [], [], [7], [8, 9]]) >>> ragged_tensor_to_string(rt3, summarize=2).numpy() b'[[1], [2, 3, ..., 5, 6], ..., [7], [8, 9]]' Args: rt: The RaggedTensor that should be converted to a string. summarize: If specified, then only the first and last `summarize` elements within each dimension are included in the string. If `-1` or `None`, then all elements are included." 11155,_ragged_tensor_to_string,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,896,function,"Returns a scalar string tensor with the contents of `string_tensor`. Args: string_tensor: A potentially ragged tensor with dtype=string. summarize: Include only the first and last `summarize` elements of each dimension. If `-1` or `None`, then include all elements. Returns: A scalar string Tensor." 11156,_nrows,tensorflow/tensorflow/python/ops/ragged/ragged_string_ops.py,924,function, 11157,RaggedTensor,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,59,class,"Represents a ragged tensor. A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are dimensions whose slices may have different lengths. For example, the inner (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths. Dimensions whose slices all have the same length are called *uniform dimensions*. The outermost dimension of a `RaggedTensor` is always uniform, since it consists of a single slice (and so there is no possibility for differing slice lengths). The total number of dimensions in a `RaggedTensor` is called its *rank*, and the number of ragged dimensions in a `RaggedTensor` is called its *ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation time: it can't depend on the runtime values of `Tensor`s, and can't vary dynamically for different session runs. ### Potentially Ragged Tensors Many ops support both `Tensor`s and `RaggedTensor`s. The term ""potentially ragged tensor"" may be used to refer to a tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank of a `Tensor` is zero. ### Documenting RaggedTensor Shapes When documenting the shape of a RaggedTensor, ragged dimensions can be indicated by enclosing them in parentheses. For example, the shape of a 3-D `RaggedTensor` that stores the fixed-size word embedding for each word in a sentence, for each sentence in a batch, could be written as `[num_sentences, (num_words), embedding_size]`. The parentheses around `(num_words)` indicate that dimension is ragged, and that the length of each element list in that dimension may vary for each item. ### Component Tensors Internally, a `RaggedTensor` consists of a concatenated list of values that are partitioned into variable-length rows. In particular, each `RaggedTensor` consists of: * A `values` tensor, which concatenates the variable-length rows into a flattened list. For example, the `values` tensor for `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`. * A `row_splits` vector, which indicates how those flattened values are divided into rows. In particular, the values for row `rt[i]` are stored in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. Example: >>> print(tf.RaggedTensor.from_row_splits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_splits=[0, 4, 4, 7, 8, 8])) ### Alternative Row-Partitioning Schemes In addition to `row_splits`, ragged tensors provide support for five other row-partitioning schemes: * `row_lengths`: a vector with shape `[nrows]`, which specifies the length of each row. * `value_rowids` and `nrows`: `value_rowids` is a vector with shape `[nvals]`, corresponding one-to-one with `values`, which specifies each value's row index. In particular, the row `rt[row]` consists of the values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an integer scalar that specifies the number of rows in the `RaggedTensor`. (`nrows` is used to indicate trailing empty rows.) * `row_starts`: a vector with shape `[nrows]`, which specifies the start offset of each row. Equivalent to `row_splits[:-1]`. * `row_limits`: a vector with shape `[nrows]`, which specifies the stop offset of each row. Equivalent to `row_splits[1:]`. * `uniform_row_length`: A scalar tensor, specifying the length of every row. This row-partitioning scheme may only be used if all rows have the same length. Example: The following ragged tensors are equivalent, and all represent the nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`. >>> values = [3, 1, 4, 1, 5, 9, 2, 6] >>> rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8]) >>> rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0]) >>> rt3 = RaggedTensor.from_value_rowids( ... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5) >>> rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8]) >>> rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8]) ### Multiple Ragged Dimensions `RaggedTensor`s with multiple ragged dimensions can be defined by using a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor` adds a single ragged dimension. >>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above ... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) >>> outer_rt = RaggedTensor.from_row_splits( ... values=inner_rt, row_splits=[0, 3, 3, 5]) >>> print(outer_rt.to_list()) [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] >>> print(outer_rt.ragged_rank) 2 The factory function `RaggedTensor.from_nested_row_splits` may be used to construct a `RaggedTensor` with multiple ragged dimensions directly, by providing a list of `row_splits` tensors: >>> RaggedTensor.from_nested_row_splits( ... flat_values=[3, 1, 4, 1, 5, 9, 2, 6], ... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list() [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] ### Uniform Inner Dimensions `RaggedTensor`s with uniform inner dimensions can be defined by using a multidimensional `Tensor` for `values`. >>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32), ... row_splits=[0, 2, 5]) >>> print(rt.to_list()) [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]] >>> print(rt.shape) (2, None, 3) ### Uniform Outer Dimensions `RaggedTensor`s with uniform outer dimensions can be defined by using one or more `RaggedTensor` with a `uniform_row_length` row-partitioning tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be constructed with this method from a `RaggedTensor` values with shape `[4, None]`: >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) >>> print(values.shape) (4, None) >>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2) >>> print(rt6) >>> print(rt6.shape) (2, 2, None) Note that `rt6` only contains one ragged dimension (the innermost dimension). In contrast, if `from_row_splits` is used to construct a similar `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions: >>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4]) >>> print(rt7.shape) (2, None, None) Uniform and ragged outer dimensions may be interleaved, meaning that a tensor with any combination of ragged and uniform dimensions may be created. For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could be constructed as follows: ```python t0 = tf.zeros([1000, 2]) # Shape: [1000, 2] t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2] t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2] t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2] t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2] ```" 11158,is_ragged,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2092,function,Returns true if `value` is a ragged tensor or ragged tensor value. 11159,match_row_splits_dtypes,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2098,function,"Return a copy of `tensors` with row_splits all having the same dtype. Args: *tensors: A list of Tensors or RaggedTensors. **kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors), where `dtype` is the data type used by row-splits, and `tensors` is the converted list of `Tensors` and `RaggedTensors`. Returns: The converted list of `Tensors` and `RaggedTensors`." 11160,RaggedTensorSpec,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2149,class,Type specification for a `tf.RaggedTensor`. 11161,convert_to_tensor_or_ragged_tensor,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2412,function,"Converts value to a `RaggedTensor` or `Tensor`. * If `value` is a `RaggedTensor`, then return it as-is. * If `value` is a `RaggedTensorValue`, return a corresponding constant `RaggedTensor`. * Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`. Args: value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing the type is inferred from the type of `value`. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. This argument has no effect if `value` is already a tensor, or when conversion is not possible. name: Optional name to use if a new `Tensor` is created. Returns: A `Tensor` or `RaggedTensor`." 11162,_ragged_tensor_value_from_components,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2459,function, 11163,_ragged_tensor_session_fetch,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2467,function, 11164,_ragged_tensor_session_feed,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2472,function, 11165,_ragged_tensor_session_feed_for_partial_run,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2478,function, 11166,RaggedTensorType,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2490,class,"Encoding of a static type for a `RaggedTensor`. Use this type to express/declare that an output must have the type of `RaggedTensor`." 11167,_assert_sparse_indices_are_ragged_right,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2523,function,"Checks that the given SparseTensor.indices tensor is ragged-right. Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right because the entry `[3, 1]` skips a cell. Args: indices: The SparseTensor indices to check. Returns: A list of control dependency op tensors." 11168,_ragged_tensor_to_sparse_gradient,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2565,function,Gradient for RaggedTensorToSparse. 11169,_assert_monotonic_increasing,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2584,function, 11170,_assert_zero,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2589,function, 11171,_nrows,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2594,function, 11172,merge_dims,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2601,function,"Merges value[outer_axis...inner_axis] into a single dimension. See `RaggedTensor.merge_dims()` for more details. This helper differs from `RaggedTensor.merge_dims()` in that `value` may be a dense or ragged tensor. Args: value: A `RaggedTensor` or `Tensor` outer_axis: `int` inner_axis: `int` Returns: A flattened `RaggedTensor` or `Tensor`." 11173,_prod,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2668,function,Returns the product of the numbers in a list. 11174,_get_row_partition_type_tensor_pairs_tail,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2673,function,"Gets a row partition type tensor pair for the tail. If value_rowid is defined, then it is used. Otherwise, row_splits are used. Args: partition: a RowPartition. Returns: A list of (row_partition_type, row_partition_tensor) pairs." 11175,_get_row_partition_type_tensor_pairs,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2691,function,"Gets a list of the row partitions for rt_input. If value_rowids are defined, then they are used. Otherwise, row_splits are used. If the outermost level has value_rowids defind, then nrows is also added. Args: rt_input: a ragged tensor. Returns: A list of (row_partition_type, row_partition_tensor) pairs." 11176,_shape_as_tensor,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2714,function,"Takes shape and coerces it to a shape as a tensor. If the object is already a tensor, simply passes it on (result is guaranteed to be int64 or int32, but not necessarily dtype). If not, creates a tensor of type dtype. Result is either a scalar equal to -1 if the shape is unknown_rank. Otherwise, it is a vector, where unknown dimensions are represented with a value of -1. In C++, see TensorShapeFromTensor for parsing shapes in kernels, and InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for use in the shape inference function. Args: shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]], Tuple[Optional[Int]]. dtype: tf.int64 or tf.int32 Returns: a scalar or vector tensor of dtype tf.int32 or tf.int64." 11177,_nvals_uniform_row_length,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2753,function,Get the number of values for uniform row length constructor. 11178,_get_optional_partition_dtype,tensorflow/tensorflow/python/ops/ragged/ragged_tensor.py,2765,function,"Returns the partition dtype, or None if None exists." 11179,RaggedTensorBoundingShapeOp,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_bounding_shape_op_test.py,31,class, 11180,RaggedTensorDynamicShape,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_shape.py,35,class,"A collection of tensors encoding the shape of a potentially ragged tensor. Each `RaggedTensorDynamicShape` consists of an ordered list of dimension sizes. There are two dimension types: * ""Uniform dimensions"" are dimensions where all slices have the same length. `RaggedTensorDynamicShape` records the size of each uniform dimension using a single scalar integer. * ""Ragged dimensions"" are dimensions whose slices may have different lengths. `RaggedTensorDynamicShape` records the size of each ragged dimension using an integer vector containing the slice lengths for all the slices across that dimension. Furthermore, there are two ways a dimension might be encoded: * ""Partitioned dimensions"" are dimensions that are encoded using a `RaggedTensor`'s `nested_row_splits`. The outermostmost partitioned dimension must be uniform, and the innermost partitioned dimension must be ragged. * ""Inner dimensions"" are dimensions that are encoded using a `RaggedTensor`'s `flat_values`. Inner dimensions are always uniform. The sizes of partitioned dimensions are recorded using `partitioned_dim_sizes` and `inner_dim_sizes`: * `partitioned_dim_sizes` is a list of tensors (one for each partitioned dimension). * For uniform dimensions, the tensor is an integer scalar specifying the size of all slices across that dimension. * For ragged dimensions, the tensor is an integer vector specifying the size of each slice across that dimension. * `inner_dim_sizes` is a single integer vector, where each element specifies the size of a single inner dimension. Examples: Tensor | Ragged | Partitioned Dim Sizes | Inner Dim : Rank : : Sizes ------------------------------ | ------ | ---------------------- | ---------- `[[1, 2, 3], [4, 5, 6]]` | 0 | | `2, 3` `[[1, 2], [], [3, 4, 5]]` | 1 | `3, (2, 0, 3)` | `[[[1, 2], [3, 4]], [[5, 6]]]` | 1 | `2, (2, 1)` | 2 `[[[1, 2], [3]], [[4, 5]]]` | 2 | `2, (2, 1), (2, 1, 2)` |" 11181,broadcast_dynamic_shape,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_shape.py,444,function,"Returns the shape formed by broadcasting two shapes to be compatible. Args: shape_x: A `RaggedTensorDynamicShape` shape_y: A `RaggedTensorDynamicShape` Returns: A `RaggedTensorDynamicShape`. Raises: ValueError: If `shape_x` and `shape_y` are not broadcast-compatible." 11182,broadcast_to,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_shape.py,476,function,"Broadcasts a potentially ragged tensor to a ragged shape. Tiles `rt_input` as necessary to match the given shape. Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`. Args: rt_input: The potentially ragged tensor to broadcast. shape: A `RaggedTensorDynamicShape` broadcast_inner_dimensions: If false, then inner dimensions will not be tiled. Returns: A potentially ragged tensor whose values are taken from `rt_input`, and whose shape matches `shape`." 11183,_broadcast_to_uniform_shape,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_shape.py,506,function,Broadcasts rt_input to the uniform shape `shape`. 11184,_broadcast_to_ragged_shape,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_shape.py,516,function,Broadcasts rt_input to the ragged shape `dst_shape`. 11185,_ragged_tile_axis,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_shape.py,601,function,Tile a dimension of a RaggedTensor to match a ragged shape. 11186,RaggedTensorShapeTest,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_shape_test.py,34,class, 11187,int32array,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_test.py,47,function, 11188,RaggedTensorTest,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_test.py,52,class, 11189,RaggedTensorSpecTest,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_test.py,1560,class, 11190,RaggedTensorValue,tensorflow/tensorflow/python/ops/ragged/ragged_tensor_value.py,27,class,"Represents the value of a `RaggedTensor`. Warning: `RaggedTensorValue` should only be used in graph mode; in eager mode, the `tf.RaggedTensor` class contains its value directly. See `tf.RaggedTensor` for a description of ragged tensors." 11191,RaggedTileOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_tile_op_test.py,33,class, 11192,RaggedTensorToSparseOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py,35,class, 11193,make_placeholder,tensorflow/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py,46,function, 11194,rebuild_ragged_tensor_with_value_rowids,tensorflow/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py,50,function,"Returns a copy of `rt`, built using `from_value_rowids`. This ensures that RaggedTensor._cached_value_rowids is populated, which triggers a different code-path for converting ragged tensors to tensors. If `feed_dict` and `sess` are specified, then build the new `RaggedTensor` using placeholder tensors, and populate a feed dictionary that can be used to feed the placeholders. Args: rt: The RaggedTensor to copy. feed_dict: If specified, then build the new `RaggedTensor` using placeholders, and populate this dict with entries to feed those placeholders. sess: A session used to evaluate tensors; required if feed_dict is specified. Returns: A copy of `rt`, built using `from_value_rowids`." 11195,RaggedTensorToTensorOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py,91,class, 11196,RaggedToDenseBenchmark,tensorflow/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py,725,class, 11197,assert_splits_match,tensorflow/tensorflow/python/ops/ragged/ragged_util.py,31,function,"Checks that the given splits lists are identical. Performs static tests to ensure that the given splits lists are identical, and returns a list of control dependency op tensors that check that they are fully identical. Args: nested_splits_lists: A list of nested_splits_lists, where each split_list is a list of `splits` tensors from a `RaggedTensor`, ordered from outermost ragged dimension to innermost ragged dimension. Returns: A list of control dependency op tensors. Raises: ValueError: If the splits are not identical." 11198,lengths_to_splits,tensorflow/tensorflow/python/ops/ragged/ragged_util.py,65,function,Returns splits corresponding to the given lengths. 11199,repeat_ranges,tensorflow/tensorflow/python/ops/ragged/ragged_util.py,70,function,"Repeats each range of `params` (as specified by `splits`) `repeats` times. Let the `i`th range of `params` be defined as `params[splits[i]:splits[i + 1]]`. Then this function returns a tensor containing range 0 repeated `repeats[0]` times, followed by range 1 repeated `repeats[1]`, ..., followed by the last range repeated `repeats[-1]` times. Args: params: The `Tensor` whose values should be repeated. splits: A splits tensor indicating the ranges of `params` that should be repeated. repeats: The number of times each range should be repeated. Supports broadcasting from a scalar value. Returns: A `Tensor` with the same rank and type as `params`. #### Example: >>> print(repeat_ranges( ... params=tf.constant(['a', 'b', 'c']), ... splits=tf.constant([0, 2, 3]), ... repeats=tf.constant(3))) tf.Tensor([b'a' b'b' b'a' b'b' b'a' b'b' b'c' b'c' b'c'], shape=(9,), dtype=string)" 11200,RaggedUtilTest,tensorflow/tensorflow/python/ops/ragged/ragged_util_test.py,45,class, 11201,where,tensorflow/tensorflow/python/ops/ragged/ragged_where_op.py,30,function,"Return the elements, either from `x` or `y`, depending on the `condition`. : If both `x` and `y` are `None`: Returns the coordinates of true elements of `condition`. The coordinates are returned in a 2-D tensor with shape `[num_true_values, dim_size(condition)]`, where `result[i]` is the coordinates of the `i`th true value (in row-major order). : If both `x` and `y` are non-`None`: Returns a tensor formed by selecting values from `x` where condition is true, and from `y` when condition is false. In particular: : If `condition`, `x`, and `y` all have the same shape: * `result[i1...iN] = x[i1...iN]` if `condition[i1...iN]` is true. * `result[i1...iN] = y[i1...iN]` if `condition[i1...iN]` is false. : Otherwise: * `condition` must be a vector. * `x` and `y` must have the same number of dimensions. * The outermost dimensions of `condition`, `x`, and `y` must all have the same size. * `result[i] = x[i]` if `condition[i]` is true. * `result[i] = y[i]` if `condition[i]` is false. Args: condition: A potentially ragged tensor of type `bool` x: A potentially ragged tensor (optional). y: A potentially ragged tensor (optional). Must be specified if `x` is specified. Must have the same rank and type as `x`. name: A name of the operation (optional) Returns: : If both `x` and `y` are `None`: A `Tensor` with shape `(num_true, dim_size(condition))`. : Otherwise: A potentially ragged tensor with the same type, rank, and outermost dimension size as `x` and `y`. `result.ragged_rank = max(x.ragged_rank, y.ragged_rank)`. Raises: ValueError: When exactly one of `x` or `y` is non-`None`; or when `condition`, `x`, and `y` have incompatible shapes. #### Examples: >>> # Coordinates where condition is true. >>> condition = tf.ragged.constant([[True, False, True], [False, True]]) >>> print(where(condition)) tf.Tensor( [[0 0] [0 2] [1 1]], shape=(3, 2), dtype=int64) >>> # Elementwise selection between x and y, based on condition. >>> condition = tf.ragged.constant([[True, False, True], [False, True]]) >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']]) >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']]) >>> print(where(condition, x, y)) >>> # Row selection between x and y, based on condition. >>> condition = [True, False] >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']]) >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']]) >>> print(where(condition, x, y)) " 11202,_elementwise_where,tensorflow/tensorflow/python/ops/ragged/ragged_where_op.py,111,function,"Ragged version of tf.where(condition, x, y)." 11203,_coordinate_where,tensorflow/tensorflow/python/ops/ragged/ragged_where_op.py,137,function,Ragged version of tf.where(condition). 11204,_nrows,tensorflow/tensorflow/python/ops/ragged/ragged_where_op.py,160,function, 11205,RaggedWhereOpTest,tensorflow/tensorflow/python/ops/ragged/ragged_where_op_test.py,29,class, 11206,RowPartition,tensorflow/tensorflow/python/ops/ragged/row_partition.py,51,class,"Partitioning of a sequence of values into contiguous subsequences (""rows""). A `RowPartition` describes how a sequence with `nvals` items should be divided into `nrows` contiguous subsequences (""rows""). For example, a `RowPartition` could be used to partition the vector `[1, 2, 3, 4, 5]` into subsequences `[[1, 2], [3], [], [4, 5]]`. Note that `RowPartition` stores information about how values are partitioned, but does not include the partitioned values themselves. `tf.RaggedTensor` is used to pair a `values` tensor with one or more `RowPartition`s, providing a complete encoding for a ragged tensor (i.e. a tensor with variable-length dimensions). `RowPartition`s may be defined using several different schemes: * `row_lengths`: an integer vector with shape `[nrows]`, which specifies the length of each row. * `row_splits`: an integer vector with shape `[nrows+1]`, specifying the ""split points"" between each row. * `row_starts`: an integer vector with shape `[nrows]`, which specifies the start offset for each row. Equivalent to `row_splits[:-1]`. * `row_limits`: an integer vector with shape `[nrows]`, which specifies the stop offset for each row. Equivalent to `row_splits[1:]`. * `value_rowids` is an integer vector with shape `[nvals]`, corresponding one-to-one with sequence values, which specifies the row that each value belongs to. If the partition has empty trailing rows, then `nrows` must also be specified. * `uniform_row_length` is an integer scalar, specifying the length of every row. This scheme may only be used if all rows have the same length. For example, the following `RowPartition`s all represent the partitioning of 8 values into 5 sublists as follows: `[[*, *, *, *], [], [*, *, *], [*], []]`. >>> p1 = RowPartition.from_row_lengths([4, 0, 3, 1, 0]) >>> p2 = RowPartition.from_row_splits([0, 4, 4, 7, 8, 8]) >>> p3 = RowPartition.from_row_starts([0, 4, 4, 7, 8], nvals=8) >>> p4 = RowPartition.from_row_limits([4, 4, 7, 8, 8]) >>> p5 = RowPartition.from_value_rowids([0, 0, 0, 0, 2, 2, 2, 3], nrows=5) For more information about each scheme, see the documentation for the its factory method. For additional examples, see the documentation on `tf.RaggedTensor`. ### Precomputed Encodings `RowPartition` always stores at least one encoding of the partitioning, but it can be configured to cache additional encodings as well. This can avoid unnecessary recomputation in eager mode. (In graph mode, optimizations such as common subexpression elimination will typically prevent these unnecessary recomputations.) To check which encodings are precomputed, use `RowPartition.has_precomputed_`. To cache an additional encoding, use `RowPartition.with_precomputed_`." 11207,RowPartitionSpec,tensorflow/tensorflow/python/ops/ragged/row_partition.py,1029,class,Type specification for a `tf.RowPartition`. 11208,_assert_monotonic_increasing,tensorflow/tensorflow/python/ops/ragged/row_partition.py,1178,function, 11209,_assert_zero,tensorflow/tensorflow/python/ops/ragged/row_partition.py,1183,function, 11210,_cast_if_not_none,tensorflow/tensorflow/python/ops/ragged/row_partition.py,1188,function, 11211,_merge_tensors,tensorflow/tensorflow/python/ops/ragged/row_partition.py,1192,function,"Merge two optional Tensors with equal values into a single Tensor. Args: t1: tf.Tensor or None t2: tf.Tensor or None name: A name for the tensors (for error messages) validate: If true, then check that `t1` is compatible with `t2` (if both are non-None). Returns: A pair `(merged_value, validated)`: * `merged_value` is `t1` if it is not None; or `t2` otherwise. * `validated` is true if we validated that t1 and t2 are equal (either by adding a check, or because t1 is t2)." 11212,RowPartitionTest,tensorflow/tensorflow/python/ops/ragged/row_partition_test.py,39,class, 11213,RowPartitionSpecTest,tensorflow/tensorflow/python/ops/ragged/row_partition_test.py,670,class, 11214,_assert_row_partition_equal,tensorflow/tensorflow/python/ops/ragged/row_partition_test.py,864,function, 11215,row_splits_to_segment_ids,tensorflow/tensorflow/python/ops/ragged/segment_id_ops.py,36,function,"Generates the segmentation corresponding to a RaggedTensor `row_splits`. Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if `splits[j] <= i < splits[j+1]`. Example: >>> print(tf.ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9])) tf.Tensor([0 0 0 2 2 3 4 4 4], shape=(9,), dtype=int64) Args: splits: A sorted 1-D integer Tensor. `splits[0]` must be zero. name: A name prefix for the returned tensor (optional). out_type: The dtype for the return value. Defaults to `splits.dtype`, or `tf.int64` if `splits` does not have a dtype. Returns: A sorted 1-D integer Tensor, with `shape=[splits[-1]]` Raises: ValueError: If `splits` is invalid." 11216,segment_ids_to_row_splits,tensorflow/tensorflow/python/ops/ragged/segment_id_ops.py,80,function,"Generates the RaggedTensor `row_splits` corresponding to a segmentation. Returns an integer vector `splits`, where `splits[0] = 0` and `splits[i] = splits[i-1] + count(segment_ids==i)`. Example: >>> print(tf.ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4])) tf.Tensor([0 3 3 5 6 9], shape=(6,), dtype=int64) Args: segment_ids: A 1-D integer Tensor. num_segments: A scalar integer indicating the number of segments. Defaults to `max(segment_ids) + 1` (or zero if `segment_ids` is empty). out_type: The dtype for the return value. Defaults to `segment_ids.dtype`, or `tf.int64` if `segment_ids` does not have a dtype. name: A name prefix for the returned tensor (optional). Returns: A sorted 1-D integer Tensor, with `shape=[num_segments + 1]`." 11217,StringNgramsTest,tensorflow/tensorflow/python/ops/ragged/string_ngrams_op_test.py,34,class, 11218,StringsReduceJoinOpTest,tensorflow/tensorflow/python/ops/ragged/strings_reduce_join_op_test.py,29,class, 11219,_validate_dct_arguments,tensorflow/tensorflow/python/ops/signal/dct_ops.py,32,function,Checks that DCT/IDCT arguments are compatible and well formed. 11220,dct,tensorflow/tensorflow/python/ops/signal/dct_ops.py,55,function,"Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`. Types I, II, III and IV are supported. Type I is implemented using a length `2N` padded `tf.signal.rfft`. Type II is implemented using a length `2N` padded `tf.signal.rfft`, as described here: [Type 2 DCT using 2N FFT padded (Makhoul)] (https://dsp.stackexchange.com/a/10606). Type III is a fairly straightforward inverse of Type II (i.e. using a length `2N` padded `tf.signal.irfft`). Type IV is calculated through 2N length DCT2 of padded signal and picking the odd indices. @compatibility(scipy) Equivalent to [scipy.fftpack.dct] (https://docs.scipy.org/doc/scipy-1.4.0/reference/generated/scipy.fftpack.dct.html) for Type-I, Type-II, Type-III and Type-IV DCT. @end_compatibility Args: input: A `[..., samples]` `float32`/`float64` `Tensor` containing the signals to take the DCT of. type: The DCT type to perform. Must be 1, 2, 3 or 4. n: The length of the transform. If length is less than sequence length, only the first n elements of the sequence are considered for the DCT. If n is greater than the sequence length, zeros are padded and then the DCT is computed as usual. axis: For future expansion. The axis to compute the DCT along. Must be `-1`. norm: The normalization to apply. `None` for no normalization or `'ortho'` for orthonormal normalization. name: An optional name for the operation. Returns: A `[..., samples]` `float32`/`float64` `Tensor` containing the DCT of `input`. Raises: ValueError: If `type` is not `1`, `2`, `3` or `4`, `axis` is not `-1`, `n` is not `None` or greater than 0, or `norm` is not `None` or `'ortho'`. ValueError: If `type` is `1` and `norm` is `ortho`. [dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform" 11221,idct,tensorflow/tensorflow/python/ops/signal/dct_ops.py,187,function,"Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`. Currently Types I, II, III, IV are supported. Type III is the inverse of Type II, and vice versa. Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is not `'ortho'`. That is: `signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`. When `norm='ortho'`, we have: `signal == idct(dct(signal, norm='ortho'), norm='ortho')`. @compatibility(scipy) Equivalent to [scipy.fftpack.idct] (https://docs.scipy.org/doc/scipy-1.4.0/reference/generated/scipy.fftpack.idct.html) for Type-I, Type-II, Type-III and Type-IV DCT. @end_compatibility Args: input: A `[..., samples]` `float32`/`float64` `Tensor` containing the signals to take the DCT of. type: The IDCT type to perform. Must be 1, 2, 3 or 4. n: For future expansion. The length of the transform. Must be `None`. axis: For future expansion. The axis to compute the DCT along. Must be `-1`. norm: The normalization to apply. `None` for no normalization or `'ortho'` for orthonormal normalization. name: An optional name for the operation. Returns: A `[..., samples]` `float32`/`float64` `Tensor` containing the IDCT of `input`. Raises: ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is not `-1`, or `norm` is not `None` or `'ortho'`. [idct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms" 11222,_infer_fft_length_for_rfft,tensorflow/tensorflow/python/ops/signal/fft_ops.py,33,function,Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`. 11223,_infer_fft_length_for_irfft,tensorflow/tensorflow/python/ops/signal/fft_ops.py,46,function,Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`. 11224,_maybe_pad_for_rfft,tensorflow/tensorflow/python/ops/signal/fft_ops.py,64,function,Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims. 11225,_rfft_wrapper,tensorflow/tensorflow/python/ops/signal/fft_ops.py,112,function,Wrapper around gen_spectral_ops.rfft* that infers fft_length argument. 11226,_irfft_wrapper,tensorflow/tensorflow/python/ops/signal/fft_ops.py,146,function,Wrapper around gen_spectral_ops.irfft* that infers fft_length argument. 11227,_fft_size_for_grad,tensorflow/tensorflow/python/ops/signal/fft_ops.py,204,function, 11228,_fft_grad,tensorflow/tensorflow/python/ops/signal/fft_ops.py,209,function, 11229,_ifft_grad,tensorflow/tensorflow/python/ops/signal/fft_ops.py,215,function, 11230,_fft2d_grad,tensorflow/tensorflow/python/ops/signal/fft_ops.py,223,function, 11231,_ifft2d_grad,tensorflow/tensorflow/python/ops/signal/fft_ops.py,229,function, 11232,_fft3d_grad,tensorflow/tensorflow/python/ops/signal/fft_ops.py,237,function, 11233,_ifft3d_grad,tensorflow/tensorflow/python/ops/signal/fft_ops.py,243,function, 11234,_rfft_grad_helper,tensorflow/tensorflow/python/ops/signal/fft_ops.py,250,function,Returns a gradient function for an RFFT of the provided rank. 11235,_irfft_grad_helper,tensorflow/tensorflow/python/ops/signal/fft_ops.py,332,function,Returns a gradient function for an IRFFT of the provided rank. 11236,fftshift,tensorflow/tensorflow/python/ops/signal/fft_ops.py,374,function,"Shift the zero-frequency component to the center of the spectrum. This function swaps half-spaces for all axes listed (defaults to all). Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. @compatibility(numpy) Equivalent to numpy.fft.fftshift. https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html @end_compatibility For example: ```python x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) ``` Args: x: `Tensor`, input tensor. axes: `int` or shape `tuple`, optional Axes over which to shift. Default is None, which shifts all axes. name: An optional name for the operation. Returns: A `Tensor`, The shifted tensor." 11237,ifftshift,tensorflow/tensorflow/python/ops/signal/fft_ops.py,419,function,"The inverse of fftshift. Although identical for even-length x, the functions differ by one sample for odd-length x. @compatibility(numpy) Equivalent to numpy.fft.ifftshift. https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftshift.html @end_compatibility For example: ```python x = tf.signal.ifftshift([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]]) x.numpy() # array([[ 4., -4., 3.],[-2., -1., -3.],[ 1., 2., 0.]]) ``` Args: x: `Tensor`, input tensor. axes: `int` or shape `tuple` Axes over which to calculate. Defaults to None, which shifts all axes. name: An optional name for the operation. Returns: A `Tensor`, The shifted tensor." 11238,_mel_to_hertz,tensorflow/tensorflow/python/ops/signal/mel_ops.py,36,function,"Converts frequencies in `mel_values` from the mel scale to linear scale. Args: mel_values: A `Tensor` of frequencies in the mel scale. name: An optional name for the operation. Returns: A `Tensor` of the same shape and type as `mel_values` containing linear scale frequencies in Hertz." 11239,_hertz_to_mel,tensorflow/tensorflow/python/ops/signal/mel_ops.py,54,function,"Converts frequencies in `frequencies_hertz` in Hertz to the mel scale. Args: frequencies_hertz: A `Tensor` of frequencies in Hertz. name: An optional name for the operation. Returns: A `Tensor` of the same shape and type of `frequencies_hertz` containing frequencies in the mel scale." 11240,_validate_arguments,tensorflow/tensorflow/python/ops/signal/mel_ops.py,71,function,Checks the inputs to linear_to_mel_weight_matrix. 11241,linear_to_mel_weight_matrix,tensorflow/tensorflow/python/ops/signal/mel_ops.py,95,function,"Returns a matrix to warp linear scale spectrograms to the [mel scale][mel]. Returns a weight matrix that can be used to re-weight a `Tensor` containing `num_spectrogram_bins` linearly sampled frequency information from `[0, sample_rate / 2]` into `num_mel_bins` frequency information from `[lower_edge_hertz, upper_edge_hertz]` on the [mel scale][mel]. This function follows the [Hidden Markov Model Toolkit (HTK)](http://htk.eng.cam.ac.uk/) convention, defining the mel scale in terms of a frequency in hertz according to the following formula: $$ extrm{mel}(f) = 2595 * extrm{log}_{10}(1 + rac{f}{700})$$ In the returned matrix, all the triangles (filterbanks) have a peak value of 1.0. For example, the returned matrix `A` can be used to right-multiply a spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear scale spectrum values (e.g. STFT magnitudes) to generate a ""mel spectrogram"" `M` of shape `[frames, num_mel_bins]`. # `S` has shape [frames, num_spectrogram_bins] # `M` has shape [frames, num_mel_bins] M = tf.matmul(S, A) The matrix can be used with `tf.tensordot` to convert an arbitrary rank `Tensor` of linear-scale spectral bins into the mel scale. # S has shape [..., num_spectrogram_bins]. # M has shape [..., num_mel_bins]. M = tf.tensordot(S, A, 1) Args: num_mel_bins: Python int. How many bands in the resulting mel spectrum. num_spectrogram_bins: An integer `Tensor`. How many bins there are in the source spectrogram data, which is understood to be `fft_size // 2 + 1`, i.e. the spectrogram only contains the nonredundant FFT bins. sample_rate: An integer or float `Tensor`. Samples per second of the input signal used to create the spectrogram. Used to figure out the frequencies corresponding to each spectrogram bin, which dictates how they are mapped into the mel scale. lower_edge_hertz: Python float. Lower bound on the frequencies to be included in the mel spectrum. This corresponds to the lower edge of the lowest triangular band. upper_edge_hertz: Python float. The desired top edge of the highest frequency band. dtype: The `DType` of the result matrix. Must be a floating point type. name: An optional name for the operation. Returns: A `Tensor` of shape `[num_spectrogram_bins, num_mel_bins]`. Raises: ValueError: If `num_mel_bins`/`num_spectrogram_bins`/`sample_rate` are not positive, `lower_edge_hertz` is negative, frequency edges are incorrectly ordered, `upper_edge_hertz` is larger than the Nyquist frequency. [mel]: https://en.wikipedia.org/wiki/Mel_scale" 11242,mfccs_from_log_mel_spectrograms,tensorflow/tensorflow/python/ops/signal/mfcc_ops.py,31,function,"Computes [MFCCs][mfcc] of `log_mel_spectrograms`. Implemented with GPU-compatible ops and supports gradients. [Mel-Frequency Cepstral Coefficient (MFCC)][mfcc] calculation consists of taking the DCT-II of a log-magnitude mel-scale spectrogram. [HTK][htk]'s MFCCs use a particular scaling of the DCT-II which is almost orthogonal normalization. We follow this convention. All `num_mel_bins` MFCCs are returned and it is up to the caller to select a subset of the MFCCs based on their application. For example, it is typical to only use the first few for speech recognition, as this results in an approximately pitch-invariant representation of the signal. For example: ```python batch_size, num_samples, sample_rate = 32, 32000, 16000.0 # A Tensor of [batch_size, num_samples] mono PCM samples in the range [-1, 1]. pcm = tf.random.normal([batch_size, num_samples], dtype=tf.float32) # A 1024-point STFT with frames of 64 ms and 75% overlap. stfts = tf.signal.stft(pcm, frame_length=1024, frame_step=256, fft_length=1024) spectrograms = tf.abs(stfts) # Warp the linear scale spectrograms into the mel-scale. num_spectrogram_bins = stfts.shape[-1].value lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80 linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix( num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz, upper_edge_hertz) mel_spectrograms = tf.tensordot( spectrograms, linear_to_mel_weight_matrix, 1) mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate( linear_to_mel_weight_matrix.shape[-1:])) # Compute a stabilized log to get log-magnitude mel-scale spectrograms. log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6) # Compute MFCCs from log_mel_spectrograms and take the first 13. mfccs = tf.signal.mfccs_from_log_mel_spectrograms( log_mel_spectrograms)[..., :13] ``` Args: log_mel_spectrograms: A `[..., num_mel_bins]` `float32`/`float64` `Tensor` of log-magnitude mel-scale spectrograms. name: An optional name for the operation. Returns: A `[..., num_mel_bins]` `float32`/`float64` `Tensor` of the MFCCs of `log_mel_spectrograms`. Raises: ValueError: If `num_mel_bins` is not positive. [mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum [htk]: https://en.wikipedia.org/wiki/HTK_(software)" 11243,overlap_and_add,tensorflow/tensorflow/python/ops/signal/reconstruction_ops.py,32,function,"Reconstructs a signal from a framed representation. Adds potentially overlapping frames of a signal with shape `[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`. The resulting tensor has shape `[..., output_size]` where output_size = (frames - 1) * frame_step + frame_length Args: signal: A [..., frames, frame_length] `Tensor`. All dimensions may be unknown, and rank must be at least 2. frame_step: An integer or scalar `Tensor` denoting overlap offsets. Must be less than or equal to `frame_length`. name: An optional name for the operation. Returns: A `Tensor` with shape `[..., output_size]` containing the overlap-added frames of `signal`'s inner-most two dimensions. Raises: ValueError: If `signal`'s rank is less than 2, or `frame_step` is not a scalar integer." 11244,_infer_frame_shape,tensorflow/tensorflow/python/ops/signal/shape_ops.py,32,function,Infers the shape of the return value of `frame`. 11245,frame,tensorflow/tensorflow/python/ops/signal/shape_ops.py,60,function,"Expands `signal`'s `axis` dimension into frames of `frame_length`. Slides a window of size `frame_length` over `signal`'s `axis` dimension with a stride of `frame_step`, replacing the `axis` dimension with `[frames, frame_length]` frames. If `pad_end` is True, window positions that are past the end of the `axis` dimension are padded with `pad_value` until the window moves fully past the end of the dimension. Otherwise, only window positions that fully overlap the `axis` dimension are produced. For example: >>> # A batch size 3 tensor of 9152 audio samples. >>> audio = tf.random.normal([3, 9152]) >>> >>> # Compute overlapping frames of length 512 with a step of 180 (frames overlap >>> # by 332 samples). By default, only 49 frames are generated since a frame >>> # with start position j*180 for j > 48 would overhang the end. >>> frames = tf.signal.frame(audio, 512, 180) >>> frames.shape.assert_is_compatible_with([3, 49, 512]) >>> >>> # When pad_end is enabled, the final two frames are kept (padded with zeros). >>> frames = tf.signal.frame(audio, 512, 180, pad_end=True) >>> frames.shape.assert_is_compatible_with([3, 51, 512]) If the dimension along `axis` is N, and `pad_end=False`, the number of frames can be computed by: ```python num_frames = 1 + (N - frame_size) // frame_step ``` If `pad_end=True`, the number of frames can be computed by: ```python num_frames = -(-N // frame_step) # ceiling division ``` Args: signal: A `[..., samples, ...]` `Tensor`. The rank and dimensions may be unknown. Rank must be at least 1. frame_length: The frame length in samples. An integer or scalar `Tensor`. frame_step: The frame hop size in samples. An integer or scalar `Tensor`. pad_end: Whether to pad the end of `signal` with `pad_value`. pad_value: An optional scalar `Tensor` to use where the input signal does not exist when `pad_end` is True. axis: A scalar integer `Tensor` indicating the axis to frame. Defaults to the last axis. Supports negative values for indexing from the end. name: An optional name for the operation. Returns: A `Tensor` of frames with shape `[..., num_frames, frame_length, ...]`. Raises: ValueError: If `frame_length`, `frame_step`, `pad_value`, or `axis` are not scalar." 11246,stft,tensorflow/tensorflow/python/ops/signal/spectral_ops.py,40,function,"Computes the [Short-time Fourier Transform][stft] of `signals`. Implemented with TPU/GPU-compatible ops and supports gradients. Args: signals: A `[..., samples]` `float32`/`float64` `Tensor` of real-valued signals. frame_length: An integer scalar `Tensor`. The window length in samples. frame_step: An integer scalar `Tensor`. The number of samples to step. fft_length: An integer scalar `Tensor`. The size of the FFT to apply. If not provided, uses the smallest power of 2 enclosing `frame_length`. window_fn: A callable that takes a window length and a `dtype` keyword argument and returns a `[window_length]` `Tensor` of samples in the provided datatype. If set to `None`, no windowing is used. pad_end: Whether to pad the end of `signals` with zeros when the provided frame length and step produces a frame that lies partially past its end. name: An optional name for the operation. Returns: A `[..., frames, fft_unique_bins]` `Tensor` of `complex64`/`complex128` STFT values where `fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the FFT). Raises: ValueError: If `signals` is not at least rank 1, `frame_length` is not scalar, or `frame_step` is not scalar. [stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform" 11247,inverse_stft_window_fn,tensorflow/tensorflow/python/ops/signal/spectral_ops.py,101,function,"Generates a window function that can be used in `inverse_stft`. Constructs a window that is equal to the forward window with a further pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to `forward_window_fn` in the case where it would produce an exact inverse. See examples in `inverse_stft` documentation for usage. Args: frame_step: An integer scalar `Tensor`. The number of samples to step. forward_window_fn: window_fn used in the forward transform, `stft`. name: An optional name for the operation. Returns: A callable that takes a window length and a `dtype` keyword argument and returns a `[window_length]` `Tensor` of samples in the provided datatype. The returned window is suitable for reconstructing original waveform in inverse_stft." 11248,inverse_stft,tensorflow/tensorflow/python/ops/signal/spectral_ops.py,163,function,"Computes the inverse [Short-time Fourier Transform][stft] of `stfts`. To reconstruct an original waveform, a complementary window function should be used with `inverse_stft`. Such a window function can be constructed with `tf.signal.inverse_stft_window_fn`. Example: ```python frame_length = 400 frame_step = 160 waveform = tf.random.normal(dtype=tf.float32, shape=[1000]) stft = tf.signal.stft(waveform, frame_length, frame_step) inverse_stft = tf.signal.inverse_stft( stft, frame_length, frame_step, window_fn=tf.signal.inverse_stft_window_fn(frame_step)) ``` If a custom `window_fn` is used with `tf.signal.stft`, it must be passed to `tf.signal.inverse_stft_window_fn`: ```python frame_length = 400 frame_step = 160 window_fn = tf.signal.hamming_window waveform = tf.random.normal(dtype=tf.float32, shape=[1000]) stft = tf.signal.stft( waveform, frame_length, frame_step, window_fn=window_fn) inverse_stft = tf.signal.inverse_stft( stft, frame_length, frame_step, window_fn=tf.signal.inverse_stft_window_fn( frame_step, forward_window_fn=window_fn)) ``` Implemented with TPU/GPU-compatible ops and supports gradients. Args: stfts: A `complex64`/`complex128` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins representing a batch of `fft_length`-point STFTs where `fft_unique_bins` is `fft_length // 2 + 1` frame_length: An integer scalar `Tensor`. The window length in samples. frame_step: An integer scalar `Tensor`. The number of samples to step. fft_length: An integer scalar `Tensor`. The size of the FFT that produced `stfts`. If not provided, uses the smallest power of 2 enclosing `frame_length`. window_fn: A callable that takes a window length and a `dtype` keyword argument and returns a `[window_length]` `Tensor` of samples in the provided datatype. If set to `None`, no windowing is used. name: An optional name for the operation. Returns: A `[..., samples]` `Tensor` of `float32`/`float64` signals representing the inverse STFT for each input STFT in `stfts`. Raises: ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar, `frame_step` is not scalar, or `fft_length` is not scalar. [stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform" 11249,_enclosing_power_of_two,tensorflow/tensorflow/python/ops/signal/spectral_ops.py,283,function,Return 2**N for integer N such that 2**N >= value. 11250,mdct,tensorflow/tensorflow/python/ops/signal/spectral_ops.py,299,function,"Computes the [Modified Discrete Cosine Transform][mdct] of `signals`. Implemented with TPU/GPU-compatible ops and supports gradients. Args: signals: A `[..., samples]` `float32`/`float64` `Tensor` of real-valued signals. frame_length: An integer scalar `Tensor`. The window length in samples which must be divisible by 4. window_fn: A callable that takes a frame_length and a `dtype` keyword argument and returns a `[frame_length]` `Tensor` of samples in the provided datatype. If set to `None`, a rectangular window with a scale of 1/sqrt(2) is used. For perfect reconstruction of a signal from `mdct` followed by `inverse_mdct`, please use `tf.signal.vorbis_window`, `tf.signal.kaiser_bessel_derived_window` or `None`. If using another window function, make sure that w[n]^2 + w[n + frame_length // 2]^2 = 1 and w[n] = w[frame_length - n - 1] for n = 0,...,frame_length // 2 - 1 to achieve perfect reconstruction. pad_end: Whether to pad the end of `signals` with zeros when the provided frame length and step produces a frame that lies partially past its end. norm: If it is None, unnormalized dct4 is used, if it is ""ortho"" orthonormal dct4 is used. name: An optional name for the operation. Returns: A `[..., frames, frame_length // 2]` `Tensor` of `float32`/`float64` MDCT values where `frames` is roughly `samples // (frame_length // 2)` when `pad_end=False`. Raises: ValueError: If `signals` is not at least rank 1, `frame_length` is not scalar, or `frame_length` is not a multiple of `4`. [mdct]: https://en.wikipedia.org/wiki/Modified_discrete_cosine_transform" 11251,inverse_mdct,tensorflow/tensorflow/python/ops/signal/spectral_ops.py,375,function,"Computes the inverse modified DCT of `mdcts`. To reconstruct an original waveform, the same window function should be used with `mdct` and `inverse_mdct`. Example usage: >>> @tf.function ... def compare_round_trip(): ... samples = 1000 ... frame_length = 400 ... halflen = frame_length // 2 ... waveform = tf.random.normal(dtype=tf.float32, shape=[samples]) ... waveform_pad = tf.pad(waveform, [[halflen, 0],]) ... mdct = tf.signal.mdct(waveform_pad, frame_length, pad_end=True, ... window_fn=tf.signal.vorbis_window) ... inverse_mdct = tf.signal.inverse_mdct(mdct, ... window_fn=tf.signal.vorbis_window) ... inverse_mdct = inverse_mdct[halflen: halflen + samples] ... return waveform, inverse_mdct >>> waveform, inverse_mdct = compare_round_trip() >>> np.allclose(waveform.numpy(), inverse_mdct.numpy(), rtol=1e-3, atol=1e-4) True Implemented with TPU/GPU-compatible ops and supports gradients. Args: mdcts: A `float32`/`float64` `[..., frames, frame_length // 2]` `Tensor` of MDCT bins representing a batch of `frame_length // 2`-point MDCTs. window_fn: A callable that takes a frame_length and a `dtype` keyword argument and returns a `[frame_length]` `Tensor` of samples in the provided datatype. If set to `None`, a rectangular window with a scale of 1/sqrt(2) is used. For perfect reconstruction of a signal from `mdct` followed by `inverse_mdct`, please use `tf.signal.vorbis_window`, `tf.signal.kaiser_bessel_derived_window` or `None`. If using another window function, make sure that w[n]^2 + w[n + frame_length // 2]^2 = 1 and w[n] = w[frame_length - n - 1] for n = 0,...,frame_length // 2 - 1 to achieve perfect reconstruction. norm: If ""ortho"", orthonormal inverse DCT4 is performed, if it is None, a regular dct4 followed by scaling of `1/frame_length` is performed. name: An optional name for the operation. Returns: A `[..., samples]` `Tensor` of `float32`/`float64` signals representing the inverse MDCT for each input MDCT in `mdcts` where `samples` is `(frames - 1) * (frame_length // 2) + frame_length`. Raises: ValueError: If `mdcts` is not at least rank 2. [mdct]: https://en.wikipedia.org/wiki/Modified_discrete_cosine_transform" 11252,gcd,tensorflow/tensorflow/python/ops/signal/util_ops.py,32,function,"Returns the greatest common divisor via Euclid's algorithm. Args: a: The dividend. A scalar integer `Tensor`. b: The divisor. A scalar integer `Tensor`. name: An optional name for the operation. Returns: A scalar `Tensor` representing the greatest common divisor between `a` and `b`. Raises: ValueError: If `a` or `b` are not scalar integers." 11253,_check_params,tensorflow/tensorflow/python/ops/signal/window_ops.py,35,function,"Check window_length and dtype params. Args: window_length: A scalar value or `Tensor`. dtype: The data type to produce. Must be a floating point type. Returns: window_length converted to a tensor of type int32. Raises: ValueError: If `dtype` is not a floating point type or window_length is not a scalar." 11254,kaiser_window,tensorflow/tensorflow/python/ops/signal/window_ops.py,58,function,"Generate a [Kaiser window][kaiser]. Args: window_length: A scalar `Tensor` indicating the window length to generate. beta: Beta parameter for Kaiser window, see reference below. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A `Tensor` of shape `[window_length]` of type `dtype`. [kaiser]: https://docs.scipy.org/doc/numpy/reference/generated/numpy.kaiser.html" 11255,kaiser_bessel_derived_window,tensorflow/tensorflow/python/ops/signal/window_ops.py,98,function,"Generate a [Kaiser Bessel derived window][kbd]. Args: window_length: A scalar `Tensor` indicating the window length to generate. beta: Beta parameter for Kaiser window. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A `Tensor` of shape `[window_length]` of type `dtype`. [kbd]: https://en.wikipedia.org/wiki/Kaiser_window#Kaiser%E2%80%93Bessel-derived_(KBD)_window" 11256,vorbis_window,tensorflow/tensorflow/python/ops/signal/window_ops.py,126,function,"Generate a [Vorbis power complementary window][vorbis]. Args: window_length: A scalar `Tensor` indicating the window length to generate. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A `Tensor` of shape `[window_length]` of type `dtype`. [vorbis]: https://en.wikipedia.org/wiki/Modified_discrete_cosine_transform#Window_functions" 11257,hann_window,tensorflow/tensorflow/python/ops/signal/window_ops.py,151,function,"Generate a [Hann window][hann]. Args: window_length: A scalar `Tensor` indicating the window length to generate. periodic: A bool `Tensor` indicating whether to generate a periodic or symmetric window. Periodic windows are typically used for spectral analysis while symmetric windows are typically used for digital filter design. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A `Tensor` of shape `[window_length]` of type `dtype`. Raises: ValueError: If `dtype` is not a floating point type. [hann]: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows" 11258,hamming_window,tensorflow/tensorflow/python/ops/signal/window_ops.py,177,function,"Generate a [Hamming][hamming] window. Args: window_length: A scalar `Tensor` indicating the window length to generate. periodic: A bool `Tensor` indicating whether to generate a periodic or symmetric window. Periodic windows are typically used for spectral analysis while symmetric windows are typically used for digital filter design. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A `Tensor` of shape `[window_length]` of type `dtype`. Raises: ValueError: If `dtype` is not a floating point type. [hamming]: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows" 11259,_raised_cosine_window,tensorflow/tensorflow/python/ops/signal/window_ops.py,203,function,"Helper function for computing a raised cosine window. Args: name: Name to use for the scope. default_name: Default name to use for the scope. window_length: A scalar `Tensor` or integer indicating the window length. periodic: A bool `Tensor` indicating whether to generate a periodic or symmetric window. dtype: A floating point `DType`. a: The alpha parameter to the raised cosine window. b: The beta parameter to the raised cosine window. Returns: A `Tensor` of shape `[window_length]` of type `dtype`. Raises: ValueError: If `dtype` is not a floating point type or `window_length` is not scalar or `periodic` is not scalar." 11260,StructuredTensor,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,43,class,"A multidimensional collection of structures with the same schema. A **`StructuredTensor`** is a multi-dimensional collection of ***structures*** with the same ***schema***, where: * A ***schema*** is a collection of fields, each of which has a name and type. * A ***structure*** maps each field in the schema to a tensor value (which could be a nested StructuredTensor). As an important special case, a 1D `StructuredTensor` encodes a 2D table, where columns are heterogeneous `Tensor`s, and rows are the aligned elements in each of those `Tensor`s. Internally, StructuredTensors use a ""field-major"" encoding: for each leaf field, there is a single tensor that stores the value of that field for all structures in the `StructuredTensor`. ### Examples >>> # A scalar StructuredTensor describing a single person. >>> s1 = StructuredTensor.from_pyval( ... {""age"": 82, ""nicknames"": [""Bob"", ""Bobby""]}) >>> s1.shape TensorShape([]) >>> s1[""age""] >>> # A vector StructuredTensor describing three people. >>> s2 = StructuredTensor.from_pyval([ ... {""age"": 12, ""nicknames"": [""Josaphine""]}, ... {""age"": 82, ""nicknames"": [""Bob"", ""Bobby""]}, ... {""age"": 42, ""nicknames"": [""Elmo""]}]) >>> s2.shape TensorShape([3]) >>> s2[0][""age""] ### Field Paths A *field path* is a tuple of field names, specifying the path to a nested field." 11261,StructuredTensorSpec,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,716,class,Type specification for `StructuredTensor`s. 11262,_convert_to_structured_field_value,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,799,function,"Converts `value` to a Tensor, RaggedTensor, or StructuredTensor." 11263,_find_shape_dtype,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,813,function,"Return a consistent dtype for fields, nrows, & row_partitions." 11264,_merge_nrows,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,834,function,"Merges `nrows` with `nrows(value)`. Checks that `value` has the expected number of rows (`nrows`), and returns `nrows`. If `validate` is true, then add validation ops that check that the `nrows` values match. Args: nrows: scalar integer Tensor. static_nrows: tf.Dimension: static value of nrows, if known. value: Tensor or RaggedTensor or StructuredTensor dtype: dtype for `nrows`. validate: bool -- whether to add validation ops. Returns: A tuple `(nrows, static_nrows)`." 11265,_merge_row_partitions,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,871,function,Merges `row_partitions` with `row_partitions(value)`. 11266,_row_partitions_for_tensor,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,893,function,Returns the row partitions for a tf.Tensor. 11267,_row_partitions_for_ragged_tensor,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,899,function,Returns the row partitions for a tf.RaggedTensor. 11268,_row_partitions_for_uniform_shape,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,910,function,"Returns row partitions for the given shape Tensor. Args: shape: A vector describing a uniform shape. rank: The number of dimensions to generate row partitions for Returns: A list of (rank-1) `RowPartition`s with uniform row length." 11269,_pyval_field_major_to_node_major,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,930,function,"Regroup each field (k, v) from dict-of-list to list-of-dict. Given a ""field-major"" encoding of the StructuredTensor (which maps each key to a single nested list containing the values for all structs), return a corresponding ""node-major"" encoding, consisting of a nested list of dicts. `shape` is used to determine how far to recurse; and if `keys` is empty it is used to determine the sizes for empty lists. Args: keys: The field names (list of string). values: The field values (list of python values). Must have the same length as `keys`. shape: A tuple specifying the shape of the `StructuredTensor`. Returns: A nested list of dict." 11270,_pyval_find_struct_keys_and_depth,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,964,function,"Finds the keys & depth of nested dictionaries in `pyval`. Args: pyval: A nested structure of lists, tuples, and dictionaries. keys: (output parameter) A set, which will be updated with any keys that are found in the nested dictionaries. Returns: The nesting depth of dictionaries in `pyval`, or `None` if `pyval` does not contain any dictionaries. Raises: ValueError: If dictionaries have inconsistent depth." 11271,_pyval_update_fields,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,995,function,"Append the field values from `pyval` to `fields`. Args: pyval: A python `dict`, or nested list/tuple of `dict`, whose value(s) should be appended to `fields`. fields: A dictionary mapping string keys to field values. Field values extracted from `pyval` are appended to this dictionary's values. depth: The depth at which `pyval` should be appended to the field values." 11272,_pyval_empty_list_depth,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,1018,function,"Find the max depth for nested empty lists. Args: pyval: A nested python list. Returns: The maximum depth of empty lists in `pyval`, or None if `pyval` contains anything other than nested empty lists." 11273,_replace_row_partitions,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,1040,function,"Updates `value` to use `new_partitions` as its (outer) row partitions. This is used to ensure that all fields in a `StructuredTensor` use identical `RowPartition` objects for the shared dimensions. In particular, `StructuredTensor.from_fields` first merges all of the row partitions from any fields, and then replaces the outer row partitions of all fields with the merged row partitions (using this function). Args: value: A `Tensor`, `RaggedTensor`, or `StructuredTensor`. new_partitions: A list of row-partitions that should be used by `value`. Must be equivalent to `value`'s current row partitions. Returns: A value that is equivalent to `value`, where outer row partitions have been replaced by `new_partitions`." 11274,_partition_outer_dimension,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,1078,function,"Partitions the outer dimension of `value` using `row_partitions`. Examples: >>> partition = RowPartition.from_row_lengths([2, 0, 1]) >>> _partition_outer_dimension(tf.constant([1, 2, 3]), partition) >>> struct_value = StructuredTensor.from_pyval( ... [{'x': 1}, {'x': 2}, {'x': 3}]) >>> _partition_outer_dimension(struct_value, partition) }, shape=(3, None))> Args: value: Tensor, RaggedTensor, or StructuredTensor row_partition: RowPartition Returns: A value with the same type as `value`, where `result.rank = value.rank + 1`." 11275,_merge_dims,tensorflow/tensorflow/python/ops/structured/structured_tensor.py,1129,function,Merges `outer_axis...inner_axis` of `value` into a single dimension. 11276,_SliceBuilder,tensorflow/tensorflow/python/ops/structured/structured_tensor_slice_test.py,34,class,"Helper to construct arguments for __getitem__. Usage: _SliceBuilder()[] slice_spec Python generates for ." 11277,_make_tensor_slice_spec,tensorflow/tensorflow/python/ops/structured/structured_tensor_slice_test.py,49,function,"Wraps all integers in an extended slice spec w/ a tensor. This function is used to help test slicing when the slice spec contains tensors, rather than integers. Args: slice_spec: The extended slice spec. use_constant: If true, then wrap each integer with a tf.constant. If false, then wrap each integer with a tf.placeholder. Returns: A copy of slice_spec, but with each integer i replaced with tf.constant(i)." 11278,StructuredTensorSliceTest,tensorflow/tensorflow/python/ops/structured/structured_tensor_slice_test.py,126,class, 11279,StructuredTensorSpecTest,tensorflow/tensorflow/python/ops/structured/structured_tensor_spec_test.py,49,class, 11280,StructuredTensorTest,tensorflow/tensorflow/python/ops/structured/structured_tensor_test.py,46,class, 11281,track_usage,tensorflow/tensorflow/python/platform/analytics.py,21,function,"No usage tracking for external library. Args: tool_id: A string identifier for tool to be tracked. tags: list of string tags that will be added to the tracking." 11282,_parse_flags_tolerate_undef,tensorflow/tensorflow/python/platform/app.py,29,function,"Parse args, returning any unknown flags (ABSL defaults to crashing)." 11283,run,tensorflow/tensorflow/python/platform/app.py,35,function,Runs the program with an optional 'main' function and 'argv' list. 11284,main,tensorflow/tensorflow/python/platform/app_test.py,29,function, 11285,_rename_function,tensorflow/tensorflow/python/platform/benchmark.py,56,function,Rename the given function's name appears in the stack trace. 11286,_global_report_benchmark,tensorflow/tensorflow/python/platform/benchmark.py,80,function,"Method for recording a benchmark directly. Args: name: The BenchmarkEntry name. iters: (optional) How many iterations were run cpu_time: (optional) Total cpu time in seconds wall_time: (optional) Total wall time in seconds throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. metrics: (optional) A list of dict representing metrics generated by the benchmark. Each dict should contain keys 'name' and'value'. A dict can optionally contain keys 'min_value' and 'max_value'. Raises: TypeError: if extras is not a dict. IOError: if the benchmark output file already exists." 11287,_BenchmarkRegistrar,tensorflow/tensorflow/python/platform/benchmark.py,161,class,The Benchmark class registrar. Used by abstract Benchmark class. 11288,ParameterizedBenchmark,tensorflow/tensorflow/python/platform/benchmark.py,171,class,Metaclass to generate parameterized benchmarks. 11289,Benchmark,tensorflow/tensorflow/python/platform/benchmark.py,203,class,"Abstract class that provides helper functions for running benchmarks. Any class subclassing this one is immediately registered in the global benchmark registry. Only methods whose names start with the word ""benchmark"" will be run during benchmarking." 11290,benchmark_config,tensorflow/tensorflow/python/platform/benchmark.py,275,function,"Returns a tf.compat.v1.ConfigProto for disabling the dependency optimizer. Returns: A TensorFlow ConfigProto object." 11291,TensorFlowBenchmark,tensorflow/tensorflow/python/platform/benchmark.py,288,class,Abstract class that provides helpers for TensorFlow benchmarks. 11292,_run_benchmarks,tensorflow/tensorflow/python/platform/benchmark.py,423,function,"Run benchmarks that match regex `regex`. This function goes through the global benchmark registry, and matches benchmark class and method names of the form `module.name.BenchmarkClass.benchmarkMethod` to the given regex. If a method matches, it is run. Args: regex: The string regular expression to match Benchmark classes against. Raises: ValueError: If no benchmarks were selected by the input regex." 11293,benchmarks_main,tensorflow/tensorflow/python/platform/benchmark.py,467,function,"Run benchmarks as declared in argv. Args: true_main: True main function to run if benchmarks are not requested. argv: the command line arguments (if None, uses sys.argv)." 11294,BenchmarkTest,tensorflow/tensorflow/python/platform/benchmark_test.py,27,class, 11295,BuildInfoTest,tensorflow/tensorflow/python/platform/build_info_test.py,25,class, 11296,enclosing_tpu_context,tensorflow/tensorflow/python/platform/device_context.py,21,function, 11297,_wrap_define_function,tensorflow/tensorflow/python/platform/flags.py,44,function,Wraps absl.flags's define functions so tf.flags accepts old names. 11298,_FlagValuesWrapper,tensorflow/tensorflow/python/platform/flags.py,64,class,"Wrapper class for absl.flags.FLAGS. The difference is that tf.flags.FLAGS implicitly parses flags with sys.argv when accessing the FLAGS values before it's explicitly parsed, while absl.flags.FLAGS raises an exception." 11299,FlagsTest,tensorflow/tensorflow/python/platform/flags_test.py,51,class, 11300,GFile,tensorflow/tensorflow/python/platform/gfile.py,41,class,"File I/O wrappers without thread locking. The main roles of the `tf.io.gfile` module are: 1. To provide an API that is close to Python's file I/O objects, and 2. To provide an implementation based on TensorFlow's C++ FileSystem API. The C++ FileSystem API supports multiple file system implementations, including local files, Google Cloud Storage (using a `gs://` prefix, and HDFS (using an `hdfs://` prefix). TensorFlow exports these as `tf.io.gfile`, so that you can use these implementations for saving and loading checkpoints, writing to TensorBoard logs, and accessing training data (among other uses). However, if all your files are local, you can use the regular Python file API without any problem. *Note*: though similar to Python's I/O implementation, there are semantic differences to make `tf.io.gfile` more efficient for backing filesystems. For example, a write mode file will not be opened until the first write call, to minimize RPC invocations in network filesystems." 11301,FastGFile,tensorflow/tensorflow/python/platform/gfile.py,68,class,"File I/O wrappers without thread locking. Note, that this is somewhat like builtin Python file I/O, but there are semantic differences to make it more efficient for some backing filesystems. For example, a write mode file will not be opened until the first write call (to minimize RPC invocations in network filesystems)." 11302,g_main,tensorflow/tensorflow/python/platform/googletest.py,53,function,Delegate to absltest.main. 11303,main,tensorflow/tensorflow/python/platform/googletest.py,60,function, 11304,GetTempDir,tensorflow/tensorflow/python/platform/googletest.py,69,function,Return a temporary directory for tests to use. 11305,test_src_dir_path,tensorflow/tensorflow/python/platform/googletest.py,97,function,"Creates an absolute test srcdir path given a relative path. Args: relative_path: a path relative to tensorflow root. e.g. ""contrib/session_bundle/example"". Returns: An absolute path to the linked in runfiles." 11306,StatefulSessionAvailable,tensorflow/tensorflow/python/platform/googletest.py,111,function, 11307,StubOutForTesting,tensorflow/tensorflow/python/platform/googletest.py,116,class,"Support class for stubbing methods out for unit testing. Sample Usage: You want os.path.exists() to always return true during testing. stubs = StubOutForTesting() stubs.Set(os.path, 'exists', lambda x: 1) ... stubs.CleanUp() The above changes os.path.exists into a lambda that returns 1. Once the ... part of the code finishes, the CleanUp() looks up the old value of os.path.exists and restores it." 11308,EventLoaderTest,tensorflow/tensorflow/python/platform/logging_test.py,24,class, 11309,get_default_communication_protocol,tensorflow/tensorflow/python/platform/remote_utils.py,21,function, 11310,load_resource,tensorflow/tensorflow/python/platform/resource_loader.py,35,function,"Load the resource at given path, where path is relative to tensorflow/. Args: path: a string resource path relative to tensorflow/. Returns: The contents of that resource. Raises: IOError: If the path is not found, or the resource can't be opened." 11311,get_data_files_path,tensorflow/tensorflow/python/platform/resource_loader.py,53,function,"Get a direct path to the data files colocated with the script. Returns: The directory where files specified in data attribute of py_test and py_binary are stored." 11312,get_root_dir_with_all_resources,tensorflow/tensorflow/python/platform/resource_loader.py,64,function,"Get a root directory containing all the data attributes in the build rule. Returns: The path to the specified file present in the data attribute of py_test or py_binary. Falls back to returning the same as get_data_files_path if it fails to detect a bazel runfiles directory." 11313,get_path_to_datafile,tensorflow/tensorflow/python/platform/resource_loader.py,104,function,"Get the path to the specified file in the data dependencies. The path is relative to tensorflow/ Args: path: a string resource path relative to tensorflow/ Returns: The path to the specified file present in the data attribute of py_test or py_binary. Raises: IOError: If the path is not found, or the resource can't be opened." 11314,readahead_file_path,tensorflow/tensorflow/python/platform/resource_loader.py,134,function,Readahead files not implemented; simply returns given path. 11315,ResourceLoaderTest,tensorflow/tensorflow/python/platform/resource_loader_test.py,24,class, 11316,preload_check,tensorflow/tensorflow/python/platform/self_check.py,34,function,"Raises an exception if the environment is not correctly configured. Raises: ImportError: If the check detects that the environment is not correctly configured, and attempting to load the TensorFlow runtime will fail." 11317,StacktraceHandlerTest,tensorflow/tensorflow/python/platform/stacktrace_handler_test.py,38,class, 11318,SetupStatusBarInsideGoogle,tensorflow/tensorflow/python/platform/status_bar.py,23,function, 11319,get_include,tensorflow/tensorflow/python/platform/sysconfig.py,33,function,"Get the directory containing the TensorFlow C++ header files. Returns: The directory as string." 11320,get_lib,tensorflow/tensorflow/python/platform/sysconfig.py,48,function,"Get the directory containing the TensorFlow framework library. Returns: The directory as string." 11321,get_compile_flags,tensorflow/tensorflow/python/platform/sysconfig.py,59,function,"Get the compilation flags for custom operators. Returns: The compilation flags." 11322,get_link_flags,tensorflow/tensorflow/python/platform/sysconfig.py,72,function,"Get the link flags for custom operators. Returns: The link flags." 11323,get_build_info,tensorflow/tensorflow/python/platform/sysconfig.py,91,function,"Get a dictionary describing TensorFlow's build environment. Values are generated when TensorFlow is compiled, and are static for each TensorFlow package. The return value is a dictionary with string keys such as: - cuda_version - cudnn_version - is_cuda_build - is_rocm_build - msvcp_dll_names - nvcuda_dll_name - cudart_dll_name - cudnn_dll_name Note that the actual keys and values returned by this function is subject to change across different versions of TensorFlow or across platforms. Returns: A Dictionary describing TensorFlow's build environment." 11324,SysconfigTest,tensorflow/tensorflow/python/platform/sysconfig_test.py,25,class, 11325,main,tensorflow/tensorflow/python/platform/test.py,55,function,Runs all unit tests. 11326,get_temp_dir,tensorflow/tensorflow/python/platform/test.py,62,function,"Returns a temporary directory for use during tests. There is no need to delete the directory after the test. Returns: The temporary directory." 11327,test_src_dir_path,tensorflow/tensorflow/python/platform/test.py,74,function,"Creates an absolute test srcdir path given a relative path. Args: relative_path: a path relative to tensorflow root. e.g. ""core/platform"". Returns: An absolute path to the linked in runfiles." 11328,is_built_with_cuda,tensorflow/tensorflow/python/platform/test.py,88,function,Returns whether TensorFlow was built with CUDA (GPU) support. 11329,is_built_with_rocm,tensorflow/tensorflow/python/platform/test.py,94,function,Returns whether TensorFlow was built with ROCm (GPU) support. 11330,is_built_with_gpu_support,tensorflow/tensorflow/python/platform/test.py,100,function,Returns whether TensorFlow was built with GPU (i.e. CUDA or ROCm) support. 11331,is_built_with_xla,tensorflow/tensorflow/python/platform/test.py,106,function,Returns whether TensorFlow was built with XLA support. 11332,_get_caller,tensorflow/tensorflow/python/platform/tf_logging.py,45,function,Returns a code and frame object for the lowest non-logging stack frame. 11333,_logger_find_caller,tensorflow/tensorflow/python/platform/tf_logging.py,64,function, 11334,_logger_find_caller,tensorflow/tensorflow/python/platform/tf_logging.py,75,function, 11335,_logger_find_caller,tensorflow/tensorflow/python/platform/tf_logging.py,85,function, 11336,get_logger,tensorflow/tensorflow/python/platform/tf_logging.py,94,function,Return TF logger instance. 11337,log,tensorflow/tensorflow/python/platform/tf_logging.py,147,function, 11338,debug,tensorflow/tensorflow/python/platform/tf_logging.py,152,function, 11339,error,tensorflow/tensorflow/python/platform/tf_logging.py,157,function, 11340,fatal,tensorflow/tensorflow/python/platform/tf_logging.py,162,function, 11341,info,tensorflow/tensorflow/python/platform/tf_logging.py,167,function, 11342,warn,tensorflow/tensorflow/python/platform/tf_logging.py,172,function, 11343,warning,tensorflow/tensorflow/python/platform/tf_logging.py,177,function, 11344,TaskLevelStatusMessage,tensorflow/tensorflow/python/platform/tf_logging.py,200,function, 11345,flush,tensorflow/tensorflow/python/platform/tf_logging.py,205,function, 11346,vlog,tensorflow/tensorflow/python/platform/tf_logging.py,211,function, 11347,_GetNextLogCountPerToken,tensorflow/tensorflow/python/platform/tf_logging.py,215,function,"Wrapper for _log_counter_per_token. Args: token: The token for which to look up the count. Returns: The number of times this function has been called with *token* as an argument (starting at 0)" 11348,log_every_n,tensorflow/tensorflow/python/platform/tf_logging.py,231,function,"Log 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg." 11349,log_first_n,tensorflow/tensorflow/python/platform/tf_logging.py,248,function,"Log 'msg % args' at level 'level' only first 'n' times. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg." 11350,log_if,tensorflow/tensorflow/python/platform/tf_logging.py,264,function,Log 'msg % args' at level 'level' only if condition is fulfilled. 11351,_GetFileAndLine,tensorflow/tensorflow/python/platform/tf_logging.py,270,function,"Returns (filename, linenumber) for the stack frame." 11352,google2_log_prefix,tensorflow/tensorflow/python/platform/tf_logging.py,278,function,Assemble a logline prefix using the google2 format. 11353,get_verbosity,tensorflow/tensorflow/python/platform/tf_logging.py,313,function,Return how much logging output will be produced. 11354,set_verbosity,tensorflow/tensorflow/python/platform/tf_logging.py,319,function,Sets the threshold for what messages will be logged. 11355,_get_thread_id,tensorflow/tensorflow/python/platform/tf_logging.py,324,function,"Get id of current thread, suitable for logging as an unsigned quantity." 11356,_graph_string,tensorflow/tensorflow/python/profiler/model_analyzer.py,51,function,Helper to serialize a graph to string. 11357,_build_options,tensorflow/tensorflow/python/profiler/model_analyzer.py,59,function,"Build tfprof.OptionsProto. Args: options: A dictionary of options. Returns: tfprof.OptionsProto." 11358,_build_advisor_options,tensorflow/tensorflow/python/profiler/model_analyzer.py,106,function,"Build tfprof.AdvisorOptionsProto. Args: options: A dictionary of options. See ALL_ADVICE example. Returns: tfprof.AdvisorOptionsProto." 11359,Profiler,tensorflow/tensorflow/python/profiler/model_analyzer.py,126,class,"TensorFlow multi-step profiler. https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md ```python Typical use case: # Currently we are only allowed to create 1 profiler per process. profiler = Profiler(sess.graph) for i in xrange(total_steps): if i % 10000 == 0: run_meta = tf.compat.v1.RunMetadata() _ = sess.run(..., options=tf.compat.v1.RunOptions( trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_meta) profiler.add_step(i, run_meta) # Profile the parameters of your model. profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder .trainable_variables_parameter())) # Or profile the timing of your model operations. opts = option_builder.ProfileOptionBuilder.time_and_memory() profiler.profile_operations(options=opts) # Or you can generate a timeline: opts = (option_builder.ProfileOptionBuilder( option_builder.ProfileOptionBuilder.time_and_memory()) .with_step(i) .with_timeline_output(filename).build()) profiler.profile_graph(options=opts) else: _ = sess.run(...) # Auto detect problems and generate advice. profiler.advise() ```" 11360,profile,tensorflow/tensorflow/python/profiler/model_analyzer.py,310,function,"Profile model. Tutorials and examples can be found in: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/profiler/g3doc/python_api.md Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. run_meta: optional tensorflow.RunMetadata proto. It is necessary to to support run time information profiling, such as time and memory. op_log: tensorflow.tfprof.OpLogProto proto. User can assign ""types"" to graph nodes with op_log. ""types"" allow user to flexibly group and account profiles using options['accounted_type_regexes']. cmd: string. Either 'op', 'scope', 'graph' or 'code'. 'op' view organizes profile using operation type. (e.g. MatMul) 'scope' view organizes profile using graph node name scope. 'graph' view organizes profile using graph node inputs/outputs. 'code' view organizes profile using Python call stack. options: A dict of options. See core/profiler/g3doc/options.md. Returns: If cmd is 'scope' or 'graph', returns GraphNodeProto proto. If cmd is 'op' or 'code', returns MultiGraphNodeProto proto. Side effect: stdout/file/timeline.json depending on options['output']" 11361,advise,tensorflow/tensorflow/python/profiler/model_analyzer.py,385,function,"Auto profile and advise. Builds profiles and automatically check anomalies of various aspects. For more details: https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. run_meta: optional tensorflow.RunMetadata proto. It is necessary to to support run time information profiling, such as time and memory. options: see ALL_ADVICE example above. Default checks everything. Returns: Returns AdviceProto proto" 11362,PrintModelAnalysisTest,tensorflow/tensorflow/python/profiler/model_analyzer_test.py,51,class, 11363,ProfileOptionBuilder,tensorflow/tensorflow/python/profiler/option_builder.py,27,class,"Option Builder for Profiling API. For tutorial on the options, see https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/g3doc/options.md ```python # Users can use pre-built options: opts = ( tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()) # Or, build your own options: opts = (tf.compat.v1.profiler.ProfileOptionBuilder() .with_max_depth(10) .with_min_micros(1000) .select(['accelerator_micros']) .with_stdout_output() .build() # Or customize the pre-built options: opts = (tf.compat.v1.profiler.ProfileOptionBuilder( tf.profiler.ProfileOptionBuilder.time_and_memory()) .with_displaying_options(show_name_regexes=['.*rnn.*']) .build()) # Finally, profiling with the options: _ = tf.compat.v1.profiler.profile(tf.compat.v1.get_default_graph(), run_meta=run_meta, cmd='scope', options=opts) ```" 11364,StringTable,tensorflow/tensorflow/python/profiler/pprof_profiler.py,63,class,Keeps track of strings to add to string_table in pprof proto. 11365,Functions,tensorflow/tensorflow/python/profiler/pprof_profiler.py,104,class,Keeps track of `Function` protos for pprof profile. 11366,Locations,tensorflow/tensorflow/python/profiler/pprof_profiler.py,148,class,"Keeps track of `Location` protos for pprof profile. `Locations` store information about function call locations." 11367,Samples,tensorflow/tensorflow/python/profiler/pprof_profiler.py,205,class,"Keeps track of `Sample` protos for pprof profile. Samples store the following statistics in order: count, all_time, op_time" 11368,PprofProfiler,tensorflow/tensorflow/python/profiler/pprof_profiler.py,258,class,Creates profiles in pprof format. 11369,get_profiles,tensorflow/tensorflow/python/profiler/pprof_profiler.py,389,function,"Generate profiles in pprof format. See https://github.com/google/pprof/blob/master/proto/profile.proto for pprof proto format. Args: graph: A `Graph` object. run_metadata: A `RunMetadata` proto. Returns: A dictionary mapping from device name to pprof proto for that device." 11370,profile,tensorflow/tensorflow/python/profiler/pprof_profiler.py,405,function,"Generate profiles in pprof format. See https://github.com/google/pprof/blob/master/proto/profile.proto for pprof proto format. Args: graph: A `Graph` object. run_metadata: A `RunMetadata` proto. output_dir: (string) Directory to output pprof profile to. Profile files for each device will be stored in compressed serialized proto format. If output_dir is None, profile protos will be printed to stdout instead. Returns: List of output files created by this profile call. (Note: this list will be empty if output_dir is None)" 11371,PprofProfilerTest,tensorflow/tensorflow/python/profiler/pprof_profiler_test.py,34,class, 11372,_profiled_init,tensorflow/tensorflow/python/profiler/profile_context.py,40,function,Overwrites the session.__init__. 11373,_profiled_run,tensorflow/tensorflow/python/profiler/profile_context.py,45,function,Overwrites the session.run(). 11374,ProfileContext,tensorflow/tensorflow/python/profiler/profile_context.py,113,class,"A Context that captures RunMetadata and performs profiling. ```python # Trace steps 100~200, profile at [150, 200] and dump profile at 200. with profile_context.ProfileContext('/tmp/train_dir', trace_steps=range(100, 200, 3), dump_steps=[200]) as pctx: opts = tf.profiler.ProfileOptionBuilder.time_and_memory() pctx.add_auto_profiling('op', opts, [150, 200]) train_loop(). # Tracing only. with profile_context.tfprof.ProfileContext('/tmp/train_dir') as pctx: # Run train/eval loop for at least few hundred steps. Profiles will be # dumped to train_dir. Use web UI or command line to do profiling. train_loop(). # When session object is available, do explicit trace, profile and dump. with profile_context.ProfileContext('/tmp/train_dir', trace_steps=[], dump_steps=[]) as pctx: opts = tf.profiler.ProfileOptionBuilder.time_and_memory() pctx.trace_next_step() _ = session.run(train_op) pctx.profiler.profile_operations(options=opts) ``` Args: profile_dir: Directory to store profiles. trace_steps: A list of session run steps to trace. If None, use pre-defined steps. dump_steps: A list of steps to dump the profile to `profile_dir`. If None, use pre-defined steps. enabled: If false, everything is disabled with minimal overhead. It allows user to only enable profiling when needed. debug: If true, also dumps the raw trace RunMetadata text file to profile_dir. And print debugging message. Useful for bug report." 11375,ProfilerContextTest,tensorflow/tensorflow/python/profiler/profile_context_test.py,37,class, 11376,trace,tensorflow/tensorflow/python/profiler/profiler_client.py,29,function,"Sends grpc requests to profiler server to perform on-demand profiling. This method will block caller thread until it receives tracing result. This method supports CPU, GPU, and Cloud TPU. This method supports profiling a single host for CPU, GPU, TPU, as well as multiple TPU workers. The profiled results will be saved to your specified TensorBoard log directory (e.g. the directory you save your model checkpoints). Use the TensorBoard profile plugin to view the visualization and analysis results. Args: service_addr: gRPC address of profiler service e.g. grpc://localhost:6009. logdir: Path of TensorBoard log directory e.g. /tmp/tb_log. duration_ms: Duration of tracing or monitoring in ms. worker_list: Optional. The list of workers that we are about to profile in the current session (TPU only). num_tracing_attempts: Optional. Automatically retry N times when no trace event is collected (default 3). options: profiler.experimental.ProfilerOptions namedtuple for miscellaneous profiler options. Raises: UnavailableError: If no trace event is collected. Example usage (CPU/GPU): # Start a profiler server before your model runs. ```python tf.profiler.experimental.server.start(6009) # your model code. # Send gRPC request to the profiler server to collect a trace of your model. ```python tf.profiler.experimental.client.trace('grpc://localhost:6009', '/tmp/tb_log', 2000) Example usage (TPU): # Send gRPC request to a TPU worker to collect a trace of your model. A # profiler service has been started in the TPU worker at port 8466. ```python # E.g. your TPU IP address is 10.0.0.2 and you want to profile for 2 seconds. tf.profiler.experimental.client.trace('grpc://10.0.0.2:8466', 'gs://your_tb_dir', 2000) Example usage (Multiple TPUs): # Send gRPC request to a TPU pod to collect a trace of your model on multiple # TPUs. A profiler service has been started in all the TPU workers at the # port 8466. ```python # E.g. your TPU IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you want to # profile for 2 seconds. tf.profiler.experimental.client.trace('grpc://10.0.0.2:8466', 'gs://your_tb_dir', 2000, '10.0.0.3,10.0.0.4') Launch TensorBoard and point it to the same logdir you provided to this API. $ tensorboard --logdir=/tmp/tb_log (or gs://your_tb_dir in the above examples) Open your browser and go to localhost:6006/#profile to view profiling results." 11377,monitor,tensorflow/tensorflow/python/profiler/profiler_client.py,99,function,"Sends grpc requests to profiler server to perform on-demand monitoring. The monitoring result is a light weight performance summary of your model execution. This method will block the caller thread until it receives the monitoring result. This method currently supports Cloud TPU only. Args: service_addr: gRPC address of profiler service e.g. grpc://10.0.0.2:8466. duration_ms: Duration of monitoring in ms. level: Choose a monitoring level between 1 and 2 to monitor your job. Level 2 is more verbose than level 1 and shows more metrics. Returns: A string of monitoring output. Example usage: # Continuously send gRPC requests to the Cloud TPU to monitor the model # execution. ```python for query in range(0, 100): print(tf.profiler.experimental.client.monitor('grpc://10.0.0.2:8466', 1000))" 11378,_strip_prefix,tensorflow/tensorflow/python/profiler/profiler_client.py,128,function, 11379,ProfilerClientTest,tensorflow/tensorflow/python/profiler/profiler_client_test.py,30,class, 11380,ProfilerTest,tensorflow/tensorflow/python/profiler/profiler_test.py,37,class, 11381,ProfilerOptions,tensorflow/tensorflow/python/profiler/profiler_v2.py,50,class,"Options for finer control over the profiler. Use `tf.profiler.ProfilerOptions` to control `tf.profiler` behavior. Fields: host_tracer_level: Adjust CPU tracing level. Values are: 1 - critical info only, 2 - info, 3 - verbose. [default value is 2] python_tracer_level: Toggle tracing of Python function calls. Values are: 1 - enabled, 0 - disabled [default value is 0] device_tracer_level: Adjust device (TPU/GPU) tracing level. Values are: 1 - enabled, 0 - disabled [default value is 1]" 11382,start,tensorflow/tensorflow/python/profiler/profiler_v2.py,78,function,"Start profiling TensorFlow performance. Args: logdir: Profiling results log directory. options: `ProfilerOptions` namedtuple to specify miscellaneous profiler options. See example usage below. Raises: AlreadyExistsError: If a profiling session is already running. Example usage: ```python options = tf.profiler.experimental.ProfilerOptions(host_tracer_level = 3, python_tracer_level = 1, device_tracer_level = 1) tf.profiler.experimental.start('logdir_path', options = options) # Training code here tf.profiler.experimental.stop() ``` To view the profiling results, launch TensorBoard and point it to `logdir`. Open your browser and go to `localhost:6006/#profile` to view profiling results." 11383,stop,tensorflow/tensorflow/python/profiler/profiler_v2.py,127,function,"Stops the current profiling session. The profiler session will be stopped and profile results can be saved. Args: save: An optional variable to save the results to TensorBoard. Default True. Raises: UnavailableError: If there is no active profiling session." 11384,warmup,tensorflow/tensorflow/python/profiler/profiler_v2.py,153,function,"Warm-up the profiler session. The profiler session will set up profiling context, including loading CUPTI library for GPU profiling. This is used for improving the accuracy of the profiling results." 11385,start_server,tensorflow/tensorflow/python/profiler/profiler_v2.py,166,function,"Start a profiler grpc server that listens to given port. The profiler server will exit when the process finishes. The service is defined in tensorflow/core/profiler/profiler_service.proto. Args: port: port profiler server listens to. Example usage: ```python tf.profiler.experimental.server.start('6009') # do your training here." 11386,Profile,tensorflow/tensorflow/python/profiler/profiler_v2.py,181,class,"Context-manager profile API. Profiling will start when entering the scope, and stop and save the results to the logdir when exits the scope. Open TensorBoard profile tab to view results. Example usage: ```python with tf.profiler.experimental.Profile(""/path/to/logdir""): # do some work ```" 11387,ProfilerTest,tensorflow/tensorflow/python/profiler/profiler_v2_test.py,33,class, 11388,_fill_missing_graph_shape,tensorflow/tensorflow/python/profiler/tfprof_logger.py,39,function,Fill Tensor shapes in 'graph' with run time shape from 'run_meta'. 11389,_str_id,tensorflow/tensorflow/python/profiler/tfprof_logger.py,68,function,Maps string to id. 11390,_get_logged_ops,tensorflow/tensorflow/python/profiler/tfprof_logger.py,77,function,"Extract trainable model parameters and FLOPs for ops from a Graph. Args: graph: tf.Graph. run_meta: RunMetadata proto used to complete shape information. add_trace: Whether to add op trace information. add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op type '_trainable_variables'. Returns: logged_ops: dict mapping from op_name to OpLogEntry. string_to_id: dict mapping from string to id." 11391,merge_default_with_oplog,tensorflow/tensorflow/python/profiler/tfprof_logger.py,145,function,"Merge the tfprof default extra info with caller's op_log. Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. op_log: OpLogProto proto. run_meta: RunMetadata proto used to complete shape information. add_trace: Whether to add op trace information. add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op type '_trainable_variables'. Returns: tmp_op_log: Merged OpLogProto proto." 11392,write_op_log,tensorflow/tensorflow/python/profiler/tfprof_logger.py,193,function,"Log provided 'op_log', and add additional model information below. The API also assigns ops in tf.compat.v1.trainable_variables() an op type called '_trainable_variables'. The API also logs 'flops' statistics for ops with op.RegisterStatistics() defined. flops calculation depends on Tensor shapes defined in 'graph', which might not be complete. 'run_meta', if provided, completes the shape information with best effort. Args: graph: tf.Graph. If None and eager execution is not enabled, use default graph. log_dir: directory to write the log file. op_log: (Optional) OpLogProto proto to be written. If not provided, an new one is created. run_meta: (Optional) RunMetadata proto that helps flops computation using run time shape information. add_trace: Whether to add python code trace information. Used to support ""code"" view." 11393,TFProfLoggerTest,tensorflow/tensorflow/python/profiler/tfprof_logger_test.py,27,class, 11394,Trace,tensorflow/tensorflow/python/profiler/trace.py,30,class,"Context manager that generates a trace event in the profiler. A trace event will start when entering the context, and stop and save the result to the profiler when exiting the context. Open TensorBoard Profile tab and choose trace viewer to view the trace event in the timeline. Trace events are created only when the profiler is enabled. More information on how to use the profiler can be found at https://tensorflow.org/guide/profiler Example usage: ```python tf.profiler.experimental.start('logdir') for step in range(num_steps): # Creates a trace event for each training step with the step number. with tf.profiler.experimental.Trace(""Train"", step_num=step): train_fn() tf.profiler.experimental.stop() ```" 11395,traceme_wrapper,tensorflow/tensorflow/python/profiler/traceme.py,24,function, 11396,_zero_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,46,function,Returns zero flops. 11397,_list_product,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,52,function,Computes product of element of the list. 11398,_unary_op_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,64,function,Common code which compute flops for unary operations. 11399,_reciprocal_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,72,function,Compute flops for Reciprocal operation. 11400,_square_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,78,function,Compute flops for Square operation. 11401,_rsqrt_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,84,function,Compute flops for Rsqrt operation. 11402,_log_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,91,function,Compute flops for Log operation. 11403,_neg_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,97,function,Compute flops for Neg operation. 11404,_assign_sub_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,103,function,Compute flops for AssignSub operation. 11405,_assign_add_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,109,function,Compute flops for AssignAdd operation. 11406,_l2_loss_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,115,function,Compute flops for L2Loss operation. 11407,_softmax_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,125,function,Compute flops for Softmax operation. 11408,_binary_per_element_op_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,140,function,Common code which compute flops for binary operations. 11409,_add_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,148,function,Compute flops for Add operation. 11410,_sub_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,154,function,Compute flops for Sub operation. 11411,_mul_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,160,function,Compute flops for Mul operation. 11412,_real_div_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,166,function,Compute flops for RealDiv operation. 11413,_maximum_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,172,function,Compute flops for Maximum operation. 11414,_minimum_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,178,function,Compute flops for Minimum operation. 11415,_pow_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,184,function,Compute flops for Pow operation. 11416,_rsqrt_grad_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,190,function,Compute flops for RsqrtGrad operation. 11417,_greater_equal_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,196,function,Compute flops for GreaterEqual operation. 11418,_greater_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,202,function,Compute flops for Greater operation. 11419,_less_equal_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,208,function,Compute flops for LessEqual operation. 11420,_less_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,214,function,Compute flops for Less operation. 11421,_equal_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,220,function,Compute flops for Equal operation. 11422,_not_equal_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,226,function,Compute flops for NotEqual operation. 11423,_squared_difference_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,232,function,Compute flops for SquaredDifference operation. 11424,_reduction_op_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,241,function,Common code which compute flops for reduction operations. 11425,_mean_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,253,function,Compute flops for Mean operation. 11426,_sum_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,260,function,Compute flops for Sum operation. 11427,_arg_max_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,267,function,Compute flops for ArgMax operation. 11428,_arg_min_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,274,function,Compute flops for ArgMin operation. 11429,_bias_add_grad_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,281,function,Compute flops for BiasAddGrad operation. 11430,_verify_conv_data_format,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,293,function,Verifies data format for pooling and convolutional operations. 11431,_pool_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,300,function,Common code which compute flops for pooling operations. 11432,_avg_pool_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,325,function,Compute flops for AvgPool operation. 11433,_max_pool_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,331,function,Compute flops for MaxPool operation. 11434,_avg_pool_grad_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,337,function,Compute flops for AvgPoolGrad operation. 11435,_max_pool_grad_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,354,function,Compute flops for MaxPoolGrad operation. 11436,_conv_2d_backprop_input_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,382,function,Compute flops for Conv2DBackpropInput operation. 11437,_conv_2d_backprop_filter_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,414,function,Compute flops for Conv2DBackpropFilter operation. 11438,_add_n_flops,tensorflow/tensorflow/python/profiler/internal/flops_registry.py,441,function,Compute flops for AddN operation. 11439,BuildSmallModel,tensorflow/tensorflow/python/profiler/internal/model_analyzer_testlib.py,38,function,Build a small forward conv model. 11440,BuildFullModel,tensorflow/tensorflow/python/profiler/internal/model_analyzer_testlib.py,58,function,"Build the full model with conv,rnn,opt." 11441,BuildSplittableModel,tensorflow/tensorflow/python/profiler/internal/model_analyzer_testlib.py,75,function,Build a small model that can be run partially in each step. 11442,SearchTFProfNode,tensorflow/tensorflow/python/profiler/internal/model_analyzer_testlib.py,95,function,Search a node in the tree. 11443,ProfilerFromFile,tensorflow/tensorflow/python/profiler/internal/model_analyzer_testlib.py,106,function,Initialize a profiler from profile file. 11444,CheckAndRemoveDoc,tensorflow/tensorflow/python/profiler/internal/model_analyzer_testlib.py,114,function, 11445,PrintModelAnalysisTest,tensorflow/tensorflow/python/profiler/internal/print_model_analysis_test.py,52,class, 11446,_extract_node,tensorflow/tensorflow/python/profiler/internal/run_metadata_test.py,45,function, 11447,_run_model,tensorflow/tensorflow/python/profiler/internal/run_metadata_test.py,65,function, 11448,_run_loop_model,tensorflow/tensorflow/python/profiler/internal/run_metadata_test.py,92,function, 11449,RunMetadataTest,tensorflow/tensorflow/python/profiler/internal/run_metadata_test.py,116,class, 11450,_SavedModelBuilder,tensorflow/tensorflow/python/saved_model/builder_impl.py,46,class,"Builds the `SavedModel` protocol buffer and saves variables and assets. The `SavedModelBuilder` class provides the functionality to build a `SavedModel` protocol buffer. Specifically, this allows multiple meta graphs to be saved as part of a single language-neutral `SavedModel`, while sharing variables and assets. To build a SavedModel, the first meta graph must be saved with variables. Subsequent meta graphs will simply be saved with their graph definitions. If assets need to be saved and written or copied to disk, they can be provided when the meta graph def is added. If multiple meta graph defs are associated an asset of the same name, only the first version is retained. Each meta graph added to the SavedModel must be annotated with tags. The tags provide a means to identify the specific meta graph to load and restore, along with the shared set of variables and assets. Typical usage for the `SavedModelBuilder`: ```python ... builder = tf.compat.v1.saved_model.Builder(export_dir) with tf.compat.v1.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph_and_variables(sess, [""foo-tag""], signature_def_map=foo_signatures, assets_list=foo_assets) ... with tf.compat.v1.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph([""bar-tag"", ""baz-tag""]) ... builder.save() ``` Note: This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.builder.SavedModelBuilder or tf.compat.v1.saved_model.Builder. Tensorflow 2.0 will introduce a new object-based method of creating SavedModels." 11451,SavedModelBuilder,tensorflow/tensorflow/python/saved_model/builder_impl.py,432,class, 11452,_maybe_save_assets,tensorflow/tensorflow/python/saved_model/builder_impl.py,624,function,"Saves assets to the meta graph. Args: write_fn: A function callback that writes assets into meta graph. assets_to_add: The list where the asset paths are setup. Returns: A dict of asset basenames for saving to the original full path to the asset. Raises: ValueError: Indicating an invalid filepath tensor." 11453,get_asset_filename_to_add,tensorflow/tensorflow/python/saved_model/builder_impl.py,670,function,"Get a unique basename to add to the SavedModel if this file is unseen. Assets come from users as full paths, and we save them out to the SavedModel as basenames. In some cases, the basenames collide. Here, we dedupe asset basenames by first checking if the file is the same, and, if different, generate and return an index-suffixed basename that can be used to add the asset to the SavedModel. Args: asset_filepath: the full path to the asset that is being saved asset_filename_map: a dict of filenames used for saving the asset in the SavedModel to full paths from which the filenames were derived. Returns: Uniquified filename string if the file is not a duplicate, or the original filename if the file has already been seen and saved." 11454,_get_unique_asset_filename,tensorflow/tensorflow/python/saved_model/builder_impl.py,709,function, 11455,_asset_path_from_tensor,tensorflow/tensorflow/python/saved_model/builder_impl.py,719,function,"Returns the filepath value stored in constant `path_tensor`. Args: path_tensor: Tensor of a file-path. Returns: The string value i.e. path of the tensor, if valid. Raises: TypeError if tensor does not match expected op type, dtype or value." 11456,_add_asset_to_metagraph,tensorflow/tensorflow/python/saved_model/builder_impl.py,743,function,"Builds an asset proto and adds it to the meta graph def. Args: meta_graph_def: The meta graph def to which the asset will be added. asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto." 11457,copy_assets_to_destination_dir,tensorflow/tensorflow/python/saved_model/builder_impl.py,757,function,Copy all assets from source path to destination path. 11458,_add_asset_to_collection,tensorflow/tensorflow/python/saved_model/builder_impl.py,778,function,"Builds an asset proto and adds it to the asset collection of the graph. Args: asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto." 11459,_add_op_to_signature_def_map,tensorflow/tensorflow/python/saved_model/builder_impl.py,795,function, 11460,_is_tensor,tensorflow/tensorflow/python/saved_model/function_deserialization.py,42,function, 11461,_call_concrete_function,tensorflow/tensorflow/python/saved_model/function_deserialization.py,48,function,"Calls a restored Function with structured inputs. This differs from `function.__call__` in that inputs and outputs are structured and that it casts inputs to tensors if needed. Note: this does not checks that non-tensor inputs match. That should be done before via `_concrete_function_callable_with`. Args: function: ConcreteFunction to call. inputs: Structured inputs compatible with `function.graph.structured_input_signature`. Returns: The structured function output." 11462,_try_convert_to_tensor_spec,tensorflow/tensorflow/python/saved_model/function_deserialization.py,80,function,Returns None or TensorSpec obtained if `arg` is converted to tensor. 11463,_concrete_function_callable_with,tensorflow/tensorflow/python/saved_model/function_deserialization.py,91,function,Returns whether concrete `function` can be called with `inputs`. 11464,_deserialize_function_spec_as_nonmethod,tensorflow/tensorflow/python/saved_model/function_deserialization.py,121,function,Deserialize a FunctionSpec object from its proto representation. 11465,setup_bare_concrete_function,tensorflow/tensorflow/python/saved_model/function_deserialization.py,153,function,Makes a restored bare concrete function callable. 11466,RestoredFunction,tensorflow/tensorflow/python/saved_model/function_deserialization.py,170,class,"Wrapper class for a function that has been restored from saved state. See `def_function.Function`." 11467,recreate_function,tensorflow/tensorflow/python/saved_model/function_deserialization.py,192,function,"Creates a `Function` from a `SavedFunction`. Args: saved_function: `SavedFunction` proto. concrete_functions: map from function name to `ConcreteFunction`. As a side effect of this function, the `FunctionSpec` from `saved_function` is added to each `ConcreteFunction` in this map. Returns: A `Function`." 11468,load_function_def_library,tensorflow/tensorflow/python/saved_model/function_deserialization.py,278,function,"Load a set of functions as concrete functions without captured inputs. Functions names are manipulated during load such that they do not overlap with previously created ones. Args: library: FunctionDefLibrary proto message. load_shared_name_suffix: If specified, used to uniquify shared names. Otherwise, a unique name is generated. Returns: Map of original function names in the library to instances of `ConcreteFunction` without captured inputs. Raises: ValueError: if functions dependencies have a cycle." 11469,_restore_gradient_functions,tensorflow/tensorflow/python/saved_model/function_deserialization.py,348,function,Populate function op's _gradient_function with default gradient. 11470,_sort_function_defs,tensorflow/tensorflow/python/saved_model/function_deserialization.py,360,function,Return a topologic sort of FunctionDefs in a library. 11471,fix_node_def,tensorflow/tensorflow/python/saved_model/function_deserialization.py,393,function,Replace functions calls and shared names in `node_def`. 11472,_fix_fdef,tensorflow/tensorflow/python/saved_model/function_deserialization.py,438,function,"Fixes a FunctionDef proto to be loaded in current context. In particular, when loading a function library into an eager context, one must rename the functions to avoid conflicts with existent functions. Args: orig_fdef: FunctionDef proto to fix. It is not modified. functions: map from function name to a ConcreteFunction instance. shared_name_suffix: A unique string for this load which helps to avoid `shared_name` collisions across loads. Two functions from the same load using the same `shared_name` still need to share, but functions from different loads with the same `shared_name` should not. Returns: A fixed copy of the original FunctionDef." 11473,_list_function_deps,tensorflow/tensorflow/python/saved_model/function_deserialization.py,464,function,Find functions referenced in `fdef`. 11474,_clean_function_name,tensorflow/tensorflow/python/saved_model/function_deserialization.py,488,function,Vanity function to keep the function names comprehensible. 11475,_serialize_function_spec,tensorflow/tensorflow/python/saved_model/function_serialization.py,29,function,Serialize a FunctionSpec object into its proto representation. 11476,serialize_concrete_function,tensorflow/tensorflow/python/saved_model/function_serialization.py,52,function,Build a SavedConcreteFunction. 11477,serialize_bare_concrete_function,tensorflow/tensorflow/python/saved_model/function_serialization.py,78,function,Build a SavedBareConcreteFunction. 11478,serialize_function,tensorflow/tensorflow/python/saved_model/function_serialization.py,94,function,Build a SavedFunction proto. 11479,wrap_cached_variables,tensorflow/tensorflow/python/saved_model/function_serialization.py,110,function,"Wraps the concrete function if it uses cached read tensors. This function creates a new concrete function that captures variables instead of the cached read tensors. Args: concrete_function: A Concrete function that maybe captures cached read tensors. Returns: A concrete function that wraps the original concrete function, which captures variables instead. If the original function did not capture any cached values, then the function is not wrapped and the original object is returned." 11480,_unused_handle,tensorflow/tensorflow/python/saved_model/load.py,57,function,Returns a placeholder as a handle that is not supposed to be accessed. 11481,_WrapperFunction,tensorflow/tensorflow/python/saved_model/load.py,71,class,"A class wraps a concrete function to handle different distributed contexts. The reason for wrapping a concrete function is because the _captured_inputs fields used for in-replica context and cross-replica context are different. When `load()` is called from within a tf.distribute.strategy scope, the captured inputs are distributed variables. When using these distributed variables during calling the function, we need different approaches when it is in-replica and when it is not in-replica. When it is in replica, naturally we should use the corresponding component of the distributed variable; when it is not in-replica, calling the function should mean that it is constructing a graph that is not actually going to be used. A typical use case is when constructing a functional model. In this case, return a placeholder with a control dependency to ensure that is never accessed." 11482,Loader,tensorflow/tensorflow/python/saved_model/load.py,109,class,Helper class to load an object-based SavedModel. 11483,_RestoredResource,tensorflow/tensorflow/python/saved_model/load.py,472,class,Restored SavedResource. 11484,_call_attribute,tensorflow/tensorflow/python/saved_model/load.py,508,function, 11485,load,tensorflow/tensorflow/python/saved_model/load.py,513,function,"Load a SavedModel from `export_dir`. Signatures associated with the SavedModel are available as functions: ```python imported = tf.saved_model.load(path) f = imported.signatures[""serving_default""] print(f(x=tf.constant([[1.]]))) ``` Objects exported with `tf.saved_model.save` additionally have trackable objects and functions assigned to attributes: ```python exported = tf.train.Checkpoint(v=tf.Variable(3.)) exported.f = tf.function( lambda x: exported.v * x, input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)]) tf.saved_model.save(exported, path) imported = tf.saved_model.load(path) assert 3. == imported.v.numpy() assert 6. == imported.f(x=tf.constant(2.)).numpy() ``` _Loading Keras models_ Keras models are trackable, so they can be saved to SavedModel. The object returned by `tf.saved_model.load` is not a Keras object (i.e. doesn't have `.fit`, `.predict`, etc. methods). A few attributes and functions are still available: `.variables`, `.trainable_variables` and `.__call__`. ```python model = tf.keras.Model(...) tf.saved_model.save(model, path) imported = tf.saved_model.load(path) outputs = imported(inputs) ``` Use `tf.keras.models.load_model` to restore the Keras model. _Importing SavedModels from TensorFlow 1.x_ SavedModels from `tf.estimator.Estimator` or 1.x SavedModel APIs have a flat graph instead of `tf.function` objects. These SavedModels will be loaded with the following attributes: * `.signatures`: A dictionary mapping signature names to functions. * `.prune(feeds, fetches) `: A method which allows you to extract functions for new subgraphs. This is equivalent to importing the SavedModel and naming feeds and fetches in a Session from TensorFlow 1.x. ```python imported = tf.saved_model.load(path_to_v1_saved_model) pruned = imported.prune(""x:0"", ""out:0"") pruned(tf.ones([])) ``` See `tf.compat.v1.wrap_function` for details. * `.variables`: A list of imported variables. * `.graph`: The whole imported graph. * `.restore(save_path)`: A function that restores variables from a checkpoint saved from `tf.compat.v1.Saver`. _Consuming SavedModels asynchronously_ When consuming SavedModels asynchronously (the producer is a separate process), the SavedModel directory will appear before all files have been written, and `tf.saved_model.load` will fail if pointed at an incomplete SavedModel. Rather than checking for the directory, check for ""saved_model_dir/saved_model.pb"". This file is written atomically as the last `tf.saved_model.save` file operation. Args: export_dir: The SavedModel directory to load from. tags: A tag or sequence of tags identifying the MetaGraph to load. Optional if the SavedModel contains a single MetaGraph, as for those exported from `tf.saved_model.save`. options: Optional, `tf.saved_model.LoadOptions` object that specifies options for loading. Returns: A trackable object with a `signatures` attribute mapping from signature keys to functions. If the SavedModel was exported by `tf.saved_model.load`, it also points to trackable objects, functions, debug info which it has been saved. Raises: ValueError: If `tags` don't match a MetaGraph in the SavedModel." 11486,load_internal,tensorflow/tensorflow/python/saved_model/load.py,606,function,Loader implementation. 11487,LoadContext,tensorflow/tensorflow/python/saved_model/load_context.py,25,class,A context for loading a model. 11488,load_context,tensorflow/tensorflow/python/saved_model/load_context.py,46,function, 11489,get_load_options,tensorflow/tensorflow/python/saved_model/load_context.py,54,function,Returns whether under a load context. 11490,LoadOptions,tensorflow/tensorflow/python/saved_model/load_options.py,25,class,"Options for loading a SavedModel. This function may be used in the `options` argument in functions that load a SavedModel (`tf.saved_model.load`, `tf.keras.models.load_model`)." 11491,cycle,tensorflow/tensorflow/python/saved_model/load_test.py,68,function, 11492,LoadTest,tensorflow/tensorflow/python/saved_model/load_test.py,88,class, 11493,SingleCycleTests,tensorflow/tensorflow/python/saved_model/load_test.py,1814,class, 11494,_Initializer,tensorflow/tensorflow/python/saved_model/load_v1_in_v2.py,40,class,"Represents an initialization operation restored from a SavedModel. Without this object re-export of imported 1.x SavedModels would omit the original SavedModel's initialization procedure. Created when `tf.saved_model.load` loads a TF 1.x-style SavedModel with an initialization op. This object holds a function that runs the initialization. It does not require any manual user intervention; `tf.saved_model.save` will see this object and automatically add it to the exported SavedModel, and `tf.saved_model.load` runs the initialization function automatically." 11495,_EagerSavedModelLoader,tensorflow/tensorflow/python/saved_model/load_v1_in_v2.py,67,class,Loads a SavedModel without using Sessions. 11496,load,tensorflow/tensorflow/python/saved_model/load_v1_in_v2.py,260,function,Load a v1-style SavedModel as an object. 11497,LoadTest,tensorflow/tensorflow/python/saved_model/load_v1_in_v2_test.py,57,class, 11498,parse_saved_model_with_debug_info,tensorflow/tensorflow/python/saved_model/loader_impl.py,43,function,"Reads the savedmodel as well as the graph debug info. Args: export_dir: Directory containing the SavedModel and GraphDebugInfo files. Returns: `SavedModel` and `GraphDebugInfo` protocol buffers. Raises: IOError: If the saved model file does not exist, or cannot be successfully parsed. Missing graph debug info file is fine." 11499,parse_saved_model,tensorflow/tensorflow/python/saved_model/loader_impl.py,72,function,"Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`. Args: export_dir: String or Pathlike, path to the directory containing the SavedModel file. Returns: A `SavedModel` protocol buffer. Raises: IOError: If the file does not exist, or cannot be successfully parsed." 11500,get_asset_tensors,tensorflow/tensorflow/python/saved_model/loader_impl.py,122,function,"Gets the asset tensors, if defined in the meta graph def to load. Args: export_dir: Directory where the SavedModel is located. meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded. import_scope: Optional `string` -- if specified, prepend this followed by '/' to all returned asset tensor names. Returns: A dictionary of asset tensors, keyed by the name of the asset tensor. The value in the map corresponds to the absolute path of the asset file." 11501,_get_main_op_tensor,tensorflow/tensorflow/python/saved_model/loader_impl.py,165,function,"Gets the main op tensor, if one exists. Args: meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded. init_op_key: name of the collection to check; should be one of MAIN_OP_KEY or the deprecated LEGACY_INIT_OP_KEY Returns: The main op tensor, if it exists and `None` otherwise. Raises: RuntimeError: If the collection def corresponding to the main op key has other than exactly one tensor." 11502,_get_op_from_collection,tensorflow/tensorflow/python/saved_model/loader_impl.py,194,function, 11503,_get_op_from_signature_def,tensorflow/tensorflow/python/saved_model/loader_impl.py,198,function,Retrieve op stored in the imported meta graph's signature def. 11504,get_init_op,tensorflow/tensorflow/python/saved_model/loader_impl.py,208,function, 11505,get_train_op,tensorflow/tensorflow/python/saved_model/loader_impl.py,215,function, 11506,maybe_saved_model_directory,tensorflow/tensorflow/python/saved_model/loader_impl.py,230,function,"Checks whether the provided export directory could contain a SavedModel. Note that the method does not load any data by itself. If the method returns `false`, the export directory definitely does not contain a SavedModel. If the method returns `true`, the export directory may contain a SavedModel but provides no guarantee that it can be loaded. Args: export_dir: Absolute string path to possible export location. For example, '/my/foo/model'. Returns: True if the export directory contains SavedModel files, False otherwise." 11507,contains_saved_model,tensorflow/tensorflow/python/saved_model/loader_impl.py,251,function,"Checks whether the provided export directory could contain a SavedModel. Note that the method does not load any data by itself. If the method returns `false`, the export directory definitely does not contain a SavedModel. If the method returns `true`, the export directory may contain a SavedModel but provides no guarantee that it can be loaded. Args: export_dir: Absolute string path to possible export location. For example, '/my/foo/model'. Returns: True if the export directory contains SavedModel files, False otherwise." 11508,load,tensorflow/tensorflow/python/saved_model/loader_impl.py,276,function,"Loads the model from a SavedModel as specified by tags. Args: sess: The TensorFlow session to restore the variables. tags: Set of string tags to identify the required MetaGraphDef. These should correspond to the tags used when saving the variables using the SavedModel `save()` API. export_dir: Directory in which the SavedModel protocol buffer and variables to be loaded are located. import_scope: Optional `string` -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static `MetaGraphDef` protocol buffer that is returned. **saver_kwargs: Optional keyword arguments passed through to Saver. Returns: The `MetaGraphDef` protocol buffer loaded in the provided session. This can be used to further extract signature-defs, collection-defs, etc. Raises: RuntimeError: MetaGraphDef associated with the tags cannot be found." 11509,SavedModelLoader,tensorflow/tensorflow/python/saved_model/loader_impl.py,303,class,Load graphs and restore variable values from a `SavedModel`. 11510,_get_export_dir,tensorflow/tensorflow/python/saved_model/loader_test.py,41,function, 11511,build_graph_helper,tensorflow/tensorflow/python/saved_model/loader_test.py,48,function, 11512,SavedModelLoaderTest,tensorflow/tensorflow/python/saved_model/loader_test.py,67,class, 11513,main_op,tensorflow/tensorflow/python/saved_model/main_op_impl.py,34,function,"Returns a main op to init variables and tables. Returns the main op including the group of ops that initializes all variables, initializes local variables and initialize all tables. Returns: The set of ops to be run as part of the main op upon the load operation." 11514,main_op_with_restore,tensorflow/tensorflow/python/saved_model/main_op_impl.py,57,function,"Returns a main op to init variables, tables and restore the graph. Returns the main op including the group of ops that initializes all variables, initialize local variables, initialize all tables and the restore op name. Args: restore_op_name: Name of the op to use to restore the graph. Returns: The set of ops to be run as part of the main op upon the load operation." 11515,MethodNameUpdater,tensorflow/tensorflow/python/saved_model/method_name_updater.py,37,class,"Updates the method name(s) of the SavedModel stored in the given path. The `MethodNameUpdater` class provides the functionality to update the method name field in the signature_defs of the given SavedModel. For example, it can be used to replace the `predict` `method_name` to `regress`. Typical usages of the `MethodNameUpdater` ```python ... updater = tf.compat.v1.saved_model.MethodNameUpdater(export_dir) # Update all signature_defs with key ""foo"" in all meta graph defs. updater.replace_method_name(signature_key=""foo"", method_name=""regress"") # Update a single signature_def with key ""bar"" in the meta graph def with # tags [""serve""] updater.replace_method_name(signature_key=""bar"", method_name=""classify"", tags=""serve"") updater.save(new_export_dir) ``` Note: This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.builder.MethodNameUpdater." 11516,MethodNameUpdaterTest,tensorflow/tensorflow/python/saved_model/method_name_updater_test.py,122,class, 11517,NotEncodableError,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,57,class,Error raised when a coder cannot encode an object. 11518,StructureCoder,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,61,class,Encoder and decoder for nested structures into protos. 11519,_ListCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,130,class,Codec for lists. 11520,_is_tuple,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,153,function, 11521,_is_named_tuple,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,157,function,"Returns True iff `instance` is a `namedtuple`. Args: instance: An instance of a Python object. Returns: True if `instance` is a `namedtuple`." 11522,_TupleCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,173,class,Codec for tuples. 11523,_DictCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,196,class,Codec for dicts. 11524,_NamedTupleCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,219,class,"Codec for namedtuples. Encoding and decoding a namedtuple reconstructs a namedtuple with a different actual Python type, but with the same `typename` and `fields`." 11525,_Float64Codec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,254,class,Codec for floats. 11526,_Int64Codec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,277,class,Codec for Python integers (limited to 64 bit values). 11527,_StringCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,300,class,"Codec for strings. See StructuredValue.string_value in proto/struct.proto for more detailed explanation." 11528,_NoneCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,327,class,Codec for None. 11529,_BoolCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,350,class,Codec for booleans. 11530,_TensorShapeCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,373,class,Codec for `TensorShape`. 11531,_TensorTypeCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,397,class,Codec for `TensorType`. 11532,_TensorSpecCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,420,class,Codec for `TensorSpec`. 11533,_BoundedTensorSpecCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,455,class,Codec for `BoundedTensorSpec`. 11534,_TypeSpecCodec,tensorflow/tensorflow/python/saved_model/nested_structure_coder.py,494,class,Codec for `tf.TypeSpec`. 11535,NestedStructureTest,tensorflow/tensorflow/python/saved_model/nested_structure_coder_test.py,36,class, 11536,VersionedTypeRegistration,tensorflow/tensorflow/python/saved_model/revived_types.py,25,class,Holds information about one version of a revived type. 11537,register_revived_type,tensorflow/tensorflow/python/saved_model/revived_types.py,108,function,"Register a type for revived objects. Args: identifier: A unique string identifying this class of objects. predicate: A Boolean predicate for this registration. Takes a trackable object as an argument. If True, `type_registration` may be used to save and restore the object. versions: A list of `VersionedTypeRegistration` objects." 11538,serialize,tensorflow/tensorflow/python/saved_model/revived_types.py,140,function,Create a SavedUserObject from a trackable object. 11539,deserialize,tensorflow/tensorflow/python/saved_model/revived_types.py,150,function,"Create a trackable object from a SavedUserObject proto. Args: proto: A SavedUserObject to deserialize. Returns: A tuple of (trackable, assignment_fn) where assignment_fn has the same signature as setattr and should be used to add dependencies to `trackable` when they are available." 11540,registered_identifiers,tensorflow/tensorflow/python/saved_model/revived_types.py,170,function, 11541,get_setter,tensorflow/tensorflow/python/saved_model/revived_types.py,174,function, 11542,CustomTestClass,tensorflow/tensorflow/python/saved_model/revived_types_test.py,28,class, 11543,RegistrationMatchingTest,tensorflow/tensorflow/python/saved_model/revived_types_test.py,56,class, 11544,_AugmentedGraphView,tensorflow/tensorflow/python/saved_model/save.py,76,class,"An extendable graph which also tracks functions attached to objects. Extensions through `add_object` appear in the object graph and any checkpoints generated from it, even if they are not dependencies of the node they were attached to in the saving program. For example a `.signatures` attribute is added to exported SavedModel root objects without modifying the root object itself. Also tracks functions attached to objects in the graph, through the caching `list_functions` method. Enumerating functions only through this method ensures that we get a consistent view of functions, even if object attributes create new functions every time they are accessed." 11545,_SaveableView,tensorflow/tensorflow/python/saved_model/save.py,155,class,"Provides a frozen view over a trackable root. This class helps to create a single stable view over an object to save. The saving code should access properties and functions via this class and not via the original object as there are cases where an object construct their trackable attributes and functions dynamically per call and will yield different objects if invoked more than once. Changes to the graph, for example adding objects, must happen in `checkpoint_view` (an `_AugmentedGraphView`) before the `_SaveableView` is constructed. Changes after the `_SaveableView` has been constructed will be ignored." 11546,_tensor_dict_to_tensorinfo,tensorflow/tensorflow/python/saved_model/save.py,364,function, 11547,_map_captures_to_created_tensors,tensorflow/tensorflow/python/saved_model/save.py,371,function,"Maps eager tensors captured by a function to Graph resources for export. Args: original_captures: A dictionary mapping from tensors captured by the function to interior placeholders for those tensors (inside the function body). resource_map: A dictionary mapping from resource tensors owned by the eager context to resource tensors in the exported graph. Returns: A list of stand-in tensors which belong to the exported graph, corresponding to the function's captures. Raises: AssertionError: If the function references a resource which is not part of `resource_map`." 11548,_map_function_arguments_to_created_inputs,tensorflow/tensorflow/python/saved_model/save.py,418,function,"Creates exterior placeholders in the exported graph for function arguments. Functions have two types of inputs: tensors captured from the outside (eager) context, and arguments to the function which we expect to receive from the user at each call. `_map_captures_to_created_tensors` replaces captured tensors with stand-ins (typically these are resource dtype tensors associated with variables). `_map_function_inputs_to_created_inputs` runs over every argument, creating a new placeholder for each which will belong to the exported graph rather than the function body. Args: function_arguments: A list of argument placeholders in the function body. signature_key: The name of the signature being exported, for error messages. function_name: The name of the function, for error messages. Returns: A tuple of (mapped_inputs, exterior_placeholders) mapped_inputs: A list with entries corresponding to `function_arguments` containing all of the inputs of the function gathered from the exported graph (both captured resources and arguments). exterior_argument_placeholders: A dictionary mapping from argument names to placeholders in the exported graph, containing the explicit arguments to the function which a user is expected to provide. Raises: ValueError: If argument names are not unique." 11549,_call_function_with_mapped_captures,tensorflow/tensorflow/python/saved_model/save.py,487,function,"Calls `function` in the exported graph, using mapped resource captures." 11550,_generate_signatures,tensorflow/tensorflow/python/saved_model/save.py,500,function,"Validates and calls `signature_functions` in the default graph. Args: signature_functions: A dictionary mapping string keys to concrete TensorFlow functions (e.g. from `signature_serialization.canonicalize_signatures`) which will be used to generate SignatureDefs. resource_map: A dictionary mapping from resource tensors in the eager context to resource tensors in the Graph being exported. This dictionary is used to re-bind resources captured by functions to tensors which will exist in the SavedModel. Returns: Each function in the `signature_functions` dictionary is called with placeholder Tensors, generating a function call operation and output Tensors. The placeholder Tensors, the function call operation, and the output Tensors from the function call are part of the default Graph. This function then returns a dictionary with the same structure as `signature_functions`, with the concrete functions replaced by SignatureDefs implicitly containing information about how to call each function from a TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference the generated placeholders and Tensor outputs by name. The caller is expected to include the default Graph set while calling this function as a MetaGraph in a SavedModel, including the returned SignatureDefs as part of that MetaGraph." 11551,_trace_resource_initializers,tensorflow/tensorflow/python/saved_model/save.py,546,function,Create concrete functions from `CapturableResource` objects. 11552,_process_asset,tensorflow/tensorflow/python/saved_model/save.py,581,function,Add `trackable_asset` to `asset_info` and `resource_map`. 11553,_fill_meta_graph_def,tensorflow/tensorflow/python/saved_model/save.py,613,function,"Generates a MetaGraph which calls `signature_functions`. Args: meta_graph_def: The MetaGraphDef proto to fill. saveable_view: The _SaveableView being exported. signature_functions: A dictionary mapping signature keys to concrete functions containing signatures to add to the MetaGraph. namespace_whitelist: List of strings containing whitelisted op namespaces. Returns: A tuple of (_AssetInfo, Graph) containing the captured assets and exported Graph generated from tracing the saveable_view." 11554,_verify_ops,tensorflow/tensorflow/python/saved_model/save.py,696,function,Verifies that all namespaced ops in the graph are whitelisted. 11555,_serialize_object_graph,tensorflow/tensorflow/python/saved_model/save.py,722,function,Save a SavedObjectGraph proto for `root`. 11556,_write_object_proto,tensorflow/tensorflow/python/saved_model/save.py,744,function,Saves an object into SavedObject proto. 11557,_export_debug_info,tensorflow/tensorflow/python/saved_model/save.py,785,function,"Exports debug information from a graph. Args: exported_graph: A Graph that has been created by tracing a saveable view. Returns: Corresponding GraphDebugInfo with traces for ops in all functions of the exported_graph." 11558,save,tensorflow/tensorflow/python/saved_model/save.py,810,function,"Exports the Trackable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md). Example usage: ```python class Adder(tf.Module): @tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)]) def add(self, x): return x + x + 1. to_export = Adder() tf.saved_model.save(to_export, '/tmp/adder') ``` The resulting SavedModel is then servable with an input named ""x"", its value having any shape and dtype float32. The optional `signatures` argument controls which methods in `obj` will be available to programs which consume `SavedModel`s, for example, serving APIs. Python functions may be decorated with `@tf.function(input_signature=...)` and passed as signatures directly, or lazily with a call to `get_concrete_function` on the method decorated with `@tf.function`. If the `signatures` argument is omitted, `obj` will be searched for `@tf.function`-decorated methods. If exactly one `@tf.function` is found, that method will be used as the default signature for the SavedModel. This behavior is expected to change in the future, when a corresponding `tf.saved_model.load` symbol is added. At that point signatures will be completely optional, and any `@tf.function` attached to `obj` or its dependencies will be exported for use with `load`. When invoking a signature in an exported SavedModel, `Tensor` arguments are identified by name. These names will come from the Python function's argument names by default. They may be overridden by specifying a `name=...` argument in the corresponding `tf.TensorSpec` object. Explicit naming is required if multiple `Tensor`s are passed through a single argument to the Python function. The outputs of functions used as `signatures` must either be flat lists, in which case outputs will be numbered, or a dictionary mapping string keys to `Tensor`, in which case the keys will be used to name outputs. Signatures are available in objects returned by `tf.saved_model.load` as a `.signatures` attribute. This is a reserved attribute: `tf.saved_model.save` on an object with a custom `.signatures` attribute will raise an exception. Since `tf.keras.Model` objects are also Trackable, this function can be used to export Keras models. For example, exporting with a signature specified: ```python class Model(tf.keras.Model): @tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)]) def serve(self, serialized): ... m = Model() tf.saved_model.save(m, '/tmp/saved_model/') ``` Exporting from a function without a fixed signature: ```python class Model(tf.keras.Model): @tf.function def call(self, x): ... m = Model() tf.saved_model.save( m, '/tmp/saved_model/', signatures=m.call.get_concrete_function( tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name=""inp""))) ``` `tf.keras.Model` instances constructed from inputs and outputs already have a signature and so do not require a `@tf.function` decorator or a `signatures` argument. If neither are specified, the model's forward pass is exported. ```python x = input_layer.Input((4,), name=""x"") y = core.Dense(5, name=""out"")(x) model = training.Model(x, y) tf.saved_model.save(model, '/tmp/saved_model/') # The exported SavedModel takes ""x"" with shape [None, 4] and returns ""out"" # with shape [None, 5] ``` Variables must be tracked by assigning them to an attribute of a tracked object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers from `tf.keras.layers`, optimizers from `tf.train`) track their variables automatically. This is the same tracking scheme that `tf.train.Checkpoint` uses, and an exported `Checkpoint` object may be restored as a training checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's ""variables/"" subdirectory. Currently, variables are the only stateful objects supported by `tf.saved_model.save`, but others (e.g. tables) will be supported in the future. `tf.function` does not hard-code device annotations from outside the function body, instead of using the calling context's device. This means for example that exporting a model that runs on a GPU and serving it on a CPU will generally work, with some exceptions. `tf.device` annotations inside the body of the function will be hard-coded in the exported model; this type of annotation is discouraged. Device-specific operations, e.g. with ""cuDNN"" in the name or with device-specific layouts, may cause issues. Currently a `DistributionStrategy` is another exception: active distribution strategies will cause device placements to be hard-coded in a function. Exporting a single-device computation and importing under a `DistributionStrategy` is not currently supported, but may be in the future. SavedModels exported with `tf.saved_model.save` [strip default-valued attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes) automatically, which removes one source of incompatibilities when the consumer of a SavedModel is running an older TensorFlow version than the producer. There are however other sources of incompatibilities which are not handled automatically, such as when the exported model contains operations which the consumer does not have definitions for. A single tf.function can generate many ConcreteFunctions. If a downstream tool wants to refer to all concrete functions generated by a single tf.function you can use the `function_aliases` argument to store a map from the alias name to all concrete function names. E.g. ```python class MyModel: @tf.function def func(): ... @tf.function def serve(): ... func() model = MyModel() signatures = { 'serving_default': model.serve.get_concrete_function(), } options = tf.saved_model.SaveOptions(function_aliases={ 'my_func': func, }) tf.saved_model.save(model, export_dir, signatures, options) ``` Args: obj: A trackable object to export. export_dir: A directory in which to write the SavedModel. signatures: Optional, one of three types: * a `tf.function` with an input signature specified, which will use the default serving signature key, * the result of `f.get_concrete_function` on a `@tf.function`-decorated function `f`, in which case `f` will be used to generate a signature for the SavedModel under the default serving signature key, * a dictionary, which maps signature keys to either `tf.function` instances with input signatures or concrete functions. Keys of such a dictionary may be arbitrary strings, but will typically be from the `tf.saved_model.signature_constants` module. options: Optional, `tf.saved_model.SaveOptions` object that specifies options for saving. Raises: ValueError: If `obj` is not trackable. @compatibility(eager) Not well supported when graph building. From TensorFlow 1.x, `tf.compat.v1.enable_eager_execution()` should run first. Calling tf.saved_model.save in a loop when graph building from TensorFlow 1.x will add new save operations to the default graph each iteration. May not be called from within a function body. @end_compatibility" 11559,export_meta_graph,tensorflow/tensorflow/python/saved_model/save.py,1038,function,"Exports the MetaGraph proto to a file. This function goes through the same procedures saved_model.save goes to produce the given object's MetaGraph, then saves it to the given file. It skips saving checkpoint information, and is useful when all one wants is the graph defining the model. Args: obj: A trackable object to build the MetaGraph from. filename: The file into which to write the MetaGraph. signatures: Optional, either a `tf.function` with an input signature specified or the result of `f.get_concrete_function` on a `@tf.function`-decorated function `f`, in which case `f` will be used to generate a signature for the SavedModel under the default serving signature key. `signatures` may also be a dictionary, in which case it maps from signature keys to either `tf.function` instances with input signatures or concrete functions. The keys of such a dictionary may be arbitrary strings, but will typically be from the `tf.saved_model.signature_constants` module. options: Optional, `tf.saved_model.SaveOptions` object that specifies options for saving." 11560,_build_meta_graph_impl,tensorflow/tensorflow/python/saved_model/save.py,1075,function,Creates a MetaGraph containing the resources and functions of an object. 11561,_build_meta_graph,tensorflow/tensorflow/python/saved_model/save.py,1140,function,Creates a MetaGraph under a SaveContext. 11562,SaveContext,tensorflow/tensorflow/python/saved_model/save_context.py,25,class,A context for building a graph of SavedModel. 11563,save_context,tensorflow/tensorflow/python/saved_model/save_context.py,53,function, 11564,in_save_context,tensorflow/tensorflow/python/saved_model/save_context.py,63,function,Returns whether under a save context. 11565,get_save_options,tensorflow/tensorflow/python/saved_model/save_context.py,68,function,Returns the save options if under a save context. 11566,SaveContextTest,tensorflow/tensorflow/python/saved_model/save_context_test.py,28,class, 11567,VariablePolicy,tensorflow/tensorflow/python/saved_model/save_options.py,29,class,"Enum defining options for variable handling when saving. NONE No policy applied: Distributed variables are saved as one variable, with no device attached. SAVE_VARIABLE_DEVICES When saving variables, also save their device assignment. This is useful if one wants to hardcode devices in saved models, but it also makes them non-portable if soft device placement is disabled (more details in `tf.config.set_soft_device_placement`). This is currently not fully supported by `saved_model.load`, and is mainly intended to be used when one will be reading the saved model at a lower API level. In the example below, the graph saved by the call to `saved_model.save` will have the variable devices correctly specified: ```python exported = tf.train.Checkpoint() with tf.device('/GPU:0'): exported.x_gpu = tf.Variable(1.0) with tf.device('/CPU:0'): exported.x_cpu = tf.Variable(1.0) tf.saved_model.save(exported, export_dir, options = tf.saved_model.SaveOptions( experimental_variable_policy= tf.saved_model.experimental.VariablePolicy.SAVE_VARIABLE_DEVICES)) ``` Distributed variables are still saved as one variable under this policy. EXPAND_DISTRIBUTED_VARIABLES Distributed variables will be explicitly expanded into their respective distributed replicas, and their assigned devices will be saved. This is useful when one wants to use the model for training in environments where the original distribution strategy is not available. Checkpoints are currently incompatible with this option, so it is not implemented in `saved_model.save` (only the internal `saved_model.export_meta_graph` API supports it for now)." 11568,SaveOptions,tensorflow/tensorflow/python/saved_model/save_options.py,97,class,"Options for saving to SavedModel. This function may be used in the `options` argument in functions that save a SavedModel (`tf.saved_model.save`, `tf.keras.models.save_model`)." 11569,_validate_namespace_whitelist,tensorflow/tensorflow/python/saved_model/save_options.py,175,function,Validates namespace whitelist argument. 11570,_run_signature,tensorflow/tensorflow/python/saved_model/save_test.py,62,function, 11571,_import_and_infer,tensorflow/tensorflow/python/saved_model/save_test.py,77,function,Import a SavedModel into a TF 1.x-style graph and run `signature_key`. 11572,SaveTest,tensorflow/tensorflow/python/saved_model/save_test.py,88,class, 11573,VariablePolicyEnumTest,tensorflow/tensorflow/python/saved_model/save_test.py,598,class, 11574,SavingOptionsTest,tensorflow/tensorflow/python/saved_model/save_test.py,638,class, 11575,AssetTests,tensorflow/tensorflow/python/saved_model/save_test.py,751,class, 11576,ExportMetaGraphTests,tensorflow/tensorflow/python/saved_model/save_test.py,843,class, 11577,tearDownModule,tensorflow/tensorflow/python/saved_model/saved_model_test.py,55,function, 11578,SavedModelTestBase,tensorflow/tensorflow/python/saved_model/saved_model_test.py,59,class, 11579,SavedModelTest,tensorflow/tensorflow/python/saved_model/saved_model_test.py,84,class, 11580,SavedModelV1Test,tensorflow/tensorflow/python/saved_model/saved_model_test.py,1342,class, 11581,build_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,39,function,"Utility function to build a SignatureDef protocol buffer. Args: inputs: Inputs of the SignatureDef defined as a proto map of string to tensor info. outputs: Outputs of the SignatureDef defined as a proto map of string to tensor info. method_name: Method name of the SignatureDef as a string. Returns: A SignatureDef protocol buffer constructed based on the supplied arguments." 11582,regression_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,71,function,"Creates regression signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Regress API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A string `Tensor`, expected to accept serialized tf.Examples. predictions: A float `Tensor`. Returns: A regression-flavored signature_def. Raises: ValueError: If examples is `None`." 11583,classification_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,119,function,"Creates classification signature from given examples and predictions. This function produces signatures intended for use with the TensorFlow Serving Classify API (tensorflow_serving/apis/prediction_service.proto), and so constrains the input and output types to those allowed by TensorFlow Serving. Args: examples: A string `Tensor`, expected to accept serialized tf.Examples. classes: A string `Tensor`. Note that the ClassificationResponse message requires that class labels are strings, not integers or anything else. scores: a float `Tensor`. Returns: A classification-flavored signature_def. Raises: ValueError: If examples is `None`." 11584,predict_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,178,function,"Creates prediction signature from given inputs and outputs. This function produces signatures intended for use with the TensorFlow Serving Predict API (tensorflow_serving/apis/prediction_service.proto). This API imposes no constraints on the input and output types. Args: inputs: dict of string to `Tensor`. outputs: dict of string to `Tensor`. Returns: A prediction-flavored signature_def. Raises: ValueError: If inputs or outputs is `None`." 11585,supervised_train_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,212,function, 11586,supervised_eval_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,219,function, 11587,_supervised_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,226,function,"Creates a signature for training and eval data. This function produces signatures that describe the inputs and outputs of a supervised process, such as training or evaluation, that results in loss, metrics, and the like. Note that this function only requires inputs to be not None. Args: method_name: Method name of the SignatureDef as a string. inputs: dict of string to `Tensor`. loss: dict of string to `Tensor` representing computed loss. predictions: dict of string to `Tensor` representing the output predictions. metrics: dict of string to `Tensor` representing metric ops. Returns: A train- or eval-flavored signature_def. Raises: ValueError: If inputs or outputs is `None`." 11588,is_valid_signature,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,275,function,Determine whether a SignatureDef can be served by TensorFlow Serving. 11589,_is_valid_predict_signature,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,284,function,Determine whether the argument is a servable 'predict' SignatureDef. 11590,_is_valid_regression_signature,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,295,function,Determine whether the argument is a servable 'regress' SignatureDef. 11591,_is_valid_classification_signature,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,317,function,Determine whether the argument is a servable 'classify' SignatureDef. 11592,op_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,350,function,"Creates a signature def with the output pointing to an op. Note that op isn't strictly enforced to be an Op object, and may be a Tensor. It is recommended to use the build_signature_def() function for Tensors. Args: op: An Op (or possibly Tensor). key: Key to graph element in the SignatureDef outputs. Returns: A SignatureDef with a single output pointing to the op." 11593,load_op_from_signature_def,tensorflow/tensorflow/python/saved_model/signature_def_utils_impl.py,368,function,"Load an Op from a SignatureDef created by op_signature_def(). Args: signature_def: a SignatureDef proto key: string key to op in the SignatureDef outputs. import_scope: Scope used to import the op Returns: Op (or possibly Tensor) in the graph with the same name as saved in the SignatureDef. Raises: NotFoundError: If the op could not be found in the graph." 11594,_make_signature,tensorflow/tensorflow/python/saved_model/signature_def_utils_test.py,48,function, 11595,SignatureDefUtilsTest,tensorflow/tensorflow/python/saved_model/signature_def_utils_test.py,61,class, 11596,_get_signature,tensorflow/tensorflow/python/saved_model/signature_serialization.py,39,function, 11597,_valid_signature,tensorflow/tensorflow/python/saved_model/signature_serialization.py,48,function,Returns whether concrete function can be converted to a signature. 11598,_validate_inputs,tensorflow/tensorflow/python/saved_model/signature_serialization.py,63,function, 11599,find_function_to_export,tensorflow/tensorflow/python/saved_model/signature_serialization.py,71,function,"Function to export, None if no suitable function was found." 11600,canonicalize_signatures,tensorflow/tensorflow/python/saved_model/signature_serialization.py,96,function,Converts `signatures` into a dictionary of concrete functions. 11601,_is_flat,tensorflow/tensorflow/python/saved_model/signature_serialization.py,153,function, 11602,_normalize_outputs,tensorflow/tensorflow/python/saved_model/signature_serialization.py,164,function,Construct an output dictionary from unnormalized function outputs. 11603,_SignatureMap,tensorflow/tensorflow/python/saved_model/signature_serialization.py,200,class,A collection of SavedModel signatures. 11604,create_signature_map,tensorflow/tensorflow/python/saved_model/signature_serialization.py,245,function,Creates an object containing `signatures`. 11605,validate_saveable_view,tensorflow/tensorflow/python/saved_model/signature_serialization.py,265,function,Performs signature-related sanity checks on `saveable_view`. 11606,simple_save,tensorflow/tensorflow/python/saved_model/simple_save.py,35,function,"Convenience function to build a SavedModel suitable for serving. In many common cases, saving models for serving will be as simple as: simple_save(session, export_dir, inputs={""x"": x, ""y"": y}, outputs={""z"": z}) Although in many cases it's not necessary to understand all of the many ways to configure a SavedModel, this method has a few practical implications: - It will be treated as a graph for inference / serving (i.e. uses the tag `saved_model.SERVING`) - The SavedModel will load in TensorFlow Serving and supports the [Predict API](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/predict.proto). To use the Classify, Regress, or MultiInference APIs, please use either [tf.Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator) or the lower level [SavedModel APIs](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md). - Some TensorFlow ops depend on information on disk or other information called ""assets"". These are generally handled automatically by adding the assets to the `GraphKeys.ASSET_FILEPATHS` collection. Only assets in that collection are exported; if you need more custom behavior, you'll need to use the [SavedModelBuilder](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/builder.py). More information about SavedModel and signatures can be found here: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md. Args: session: The TensorFlow session from which to save the meta graph and variables. export_dir: The path to which the SavedModel will be stored. inputs: dict mapping string input names to tensors. These are added to the SignatureDef as the inputs. outputs: dict mapping string output names to tensors. These are added to the SignatureDef as the outputs. legacy_init_op: Legacy support for op or group of ops to execute after the restore op upon a load." 11607,SimpleSaveTest,tensorflow/tensorflow/python/saved_model/simple_save_test.py,33,class, 11608,build_tensor_info,tensorflow/tensorflow/python/saved_model/utils_impl.py,51,function,"Utility function to build TensorInfo proto from a Tensor. Args: tensor: Tensor or SparseTensor whose name, dtype and shape are used to build the TensorInfo. For SparseTensors, the names of the three constituent Tensors are used. Returns: A TensorInfo protocol buffer constructed based on the supplied argument. Raises: RuntimeError: If eager execution is enabled." 11609,build_tensor_info_internal,tensorflow/tensorflow/python/saved_model/utils_impl.py,70,function,Utility function to build TensorInfo proto from a Tensor. 11610,_build_composite_tensor_info_internal,tensorflow/tensorflow/python/saved_model/utils_impl.py,88,function,Utility function to build TensorInfo proto from a CompositeTensor. 11611,build_tensor_info_from_op,tensorflow/tensorflow/python/saved_model/utils_impl.py,101,function,"Utility function to build TensorInfo proto from an Op. Note that this function should be used with caution. It is strictly restricted to TensorFlow internal use-cases only. Please make sure you do need it before using it. This utility function overloads the TensorInfo proto by setting the name to the Op's name, dtype to DT_INVALID and tensor_shape as None. One typical usage is for the Op of the call site for the defunned function: ```python @function.defun def some_variable_initialization_fn(value_a, value_b): a = value_a b = value_b value_a = constant_op.constant(1, name=""a"") value_b = constant_op.constant(2, name=""b"") op_info = utils.build_op_info( some_variable_initialization_fn(value_a, value_b)) ``` Args: op: An Op whose name is used to build the TensorInfo. The name that points to the Op could be fetched at run time in the Loader session. Returns: A TensorInfo protocol buffer constructed based on the supplied argument." 11612,get_tensor_from_tensor_info,tensorflow/tensorflow/python/saved_model/utils_impl.py,143,function,"Returns the Tensor or CompositeTensor described by a TensorInfo proto. Args: tensor_info: A TensorInfo proto describing a Tensor or SparseTensor or CompositeTensor. graph: The tf.Graph in which tensors are looked up. If None, the current default graph is used. import_scope: If not None, names in `tensor_info` are prefixed with this string before lookup. Returns: The Tensor or SparseTensor or CompositeTensor in `graph` described by `tensor_info`. Raises: KeyError: If `tensor_info` does not correspond to a tensor in `graph`. ValueError: If `tensor_info` is malformed." 11613,get_element_from_tensor_info,tensorflow/tensorflow/python/saved_model/utils_impl.py,186,function,"Returns the element in the graph described by a TensorInfo proto. Args: tensor_info: A TensorInfo proto describing an Op or Tensor by name. graph: The tf.Graph in which tensors are looked up. If None, the current default graph is used. import_scope: If not None, names in `tensor_info` are prefixed with this string before lookup. Returns: Op or tensor in `graph` described by `tensor_info`. Raises: KeyError: If `tensor_info` does not correspond to an op or tensor in `graph`" 11614,get_or_create_variables_dir,tensorflow/tensorflow/python/saved_model/utils_impl.py,210,function,"Return variables sub-directory, or create one if it doesn't exist." 11615,get_variables_dir,tensorflow/tensorflow/python/saved_model/utils_impl.py,218,function,Return variables sub-directory in the SavedModel. 11616,get_variables_path,tensorflow/tensorflow/python/saved_model/utils_impl.py,225,function,"Return the variables path, used as the prefix for checkpoint files." 11617,get_or_create_assets_dir,tensorflow/tensorflow/python/saved_model/utils_impl.py,232,function,"Return assets sub-directory, or create one if it doesn't exist." 11618,get_assets_dir,tensorflow/tensorflow/python/saved_model/utils_impl.py,242,function,Return path to asset directory in the SavedModel. 11619,get_or_create_debug_dir,tensorflow/tensorflow/python/saved_model/utils_impl.py,249,function,"Returns path to the debug sub-directory, creating if it does not exist." 11620,get_debug_dir,tensorflow/tensorflow/python/saved_model/utils_impl.py,259,function,Returns path to the debug sub-directory in the SavedModel. 11621,UtilsTest,tensorflow/tensorflow/python/saved_model/utils_test.py,39,class, 11622,ExportOutput,tensorflow/tensorflow/python/saved_model/model_utils/export_output.py,32,class,"Represents an output of a model that can be served. These typically correspond to model heads." 11623,ClassificationOutput,tensorflow/tensorflow/python/saved_model/model_utils/export_output.py,102,class,"Represents the output of a classification head. Either classes or scores or both must be set. The classes `Tensor` must provide string labels, not integer class IDs. If only classes is set, it is interpreted as providing top-k results in descending order. If only scores is set, it is interpreted as providing a score for every class in order of class ID. If both classes and scores are set, they are interpreted as zipped, so each score corresponds to the class at the same index. Clients should not depend on the order of the entries." 11624,RegressionOutput,tensorflow/tensorflow/python/saved_model/model_utils/export_output.py,170,class,Represents the output of a regression head. 11625,PredictOutput,tensorflow/tensorflow/python/saved_model/model_utils/export_output.py,202,class,"Represents the output of a generic prediction head. A generic prediction need not be either a classification or a regression. Named outputs must be provided as a dict from string to `Tensor`," 11626,_SupervisedOutput,tensorflow/tensorflow/python/saved_model/model_utils/export_output.py,235,class,Represents the output of a supervised training or eval process. 11627,TrainOutput,tensorflow/tensorflow/python/saved_model/model_utils/export_output.py,386,class,"Represents the output of a supervised training process. This class generates the appropriate signature def for exporting training output by type-checking and wrapping loss, predictions, and metrics values." 11628,EvalOutput,tensorflow/tensorflow/python/saved_model/model_utils/export_output.py,398,class,"Represents the output of a supervised eval process. This class generates the appropriate signature def for exporting eval output by type-checking and wrapping loss, predictions, and metrics values." 11629,ExportOutputTest,tensorflow/tensorflow/python/saved_model/model_utils/export_output_test.py,37,class, 11630,MockSupervisedOutput,tensorflow/tensorflow/python/saved_model/model_utils/export_output_test.py,231,class,So that we can test the abstract class methods directly. 11631,SupervisedOutputTest,tensorflow/tensorflow/python/saved_model/model_utils/export_output_test.py,238,class, 11632,ExportTest,tensorflow/tensorflow/python/saved_model/model_utils/export_test.py,37,class, 11633,build_all_signature_defs,tensorflow/tensorflow/python/saved_model/model_utils/export_utils.py,63,function,"Build `SignatureDef`s for all export outputs. Args: receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying input nodes where this receiver expects to be fed by default. Typically, this is a single placeholder expecting serialized `tf.Example` protos. export_outputs: a dict of ExportOutput instances, each of which has an as_signature_def instance method that will be called to retrieve the signature_def for all export output tensors. receiver_tensors_alternatives: a dict of string to additional groups of receiver tensors, each of which may be a `Tensor` or a dict of string to `Tensor`. These named receiver tensor alternatives generate additional serving signatures, which may be used to feed inputs at different points within the input receiver subgraph. A typical usage is to allow feeding raw feature `Tensor`s *downstream* of the tf.io.parse_example() op. Defaults to None. serving_only: boolean; if true, resulting signature defs will only include valid serving signatures. If false, all requested signatures will be returned. Returns: signature_def representing all passed args. Raises: ValueError: if export_outputs is not a dict" 11634,_log_signature_report,tensorflow/tensorflow/python/saved_model/model_utils/export_utils.py,152,function,Log a report of which signatures were produced. 11635,get_timestamped_export_dir,tensorflow/tensorflow/python/saved_model/model_utils/export_utils.py,193,function,"Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name." 11636,get_temp_export_dir,tensorflow/tensorflow/python/saved_model/model_utils/export_utils.py,230,function,"Builds a directory name based on the argument but starting with 'temp-'. This relies on the fact that TensorFlow Serving ignores subdirectories of the base directory that can't be parsed as integers. Args: timestamped_export_dir: the name of the eventual export directory, e.g. /foo/bar/ Returns: A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-." 11637,export_outputs_for_mode,tensorflow/tensorflow/python/saved_model/model_utils/export_utils.py,250,function,"Util function for constructing a `ExportOutput` dict given a mode. The returned dict can be directly passed to `build_all_signature_defs` helper function as the `export_outputs` argument, used for generating a SignatureDef map. Args: mode: A `ModeKeys` specifying the mode. serving_export_outputs: Describes the output signatures to be exported to `SavedModel` and used during serving. Should be a dict or None. predictions: A dict of Tensors or single Tensor representing model predictions. This argument is only used if serving_export_outputs is not set. loss: A dict of Tensors or single Tensor representing calculated loss. metrics: A dict of (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op Returns: Dictionary mapping the a key to an `tf.estimator.export.ExportOutput` object The key is the expected SignatureDef key for the mode. Raises: ValueError: if an appropriate ExportOutput cannot be found for the mode." 11638,get_export_outputs,tensorflow/tensorflow/python/saved_model/model_utils/export_utils.py,294,function,"Validate export_outputs or create default export_outputs. Args: export_outputs: Describes the output signatures to be exported to `SavedModel` and used during serving. Should be a dict or None. predictions: Predictions `Tensor` or dict of `Tensor`. Returns: Valid export_outputs dict Raises: TypeError: if export_outputs is not a dict or its values are not ExportOutput instances." 11639,_maybe_add_default_serving_output,tensorflow/tensorflow/python/saved_model/model_utils/export_utils.py,328,function,"Add a default serving output to the export_outputs if not present. Args: export_outputs: Describes the output signatures to be exported to `SavedModel` and used during serving. Should be a dict. Returns: export_outputs dict with default serving signature added if necessary Raises: ValueError: if multiple export_outputs were provided without a default serving key." 11640,KerasModeKeys,tensorflow/tensorflow/python/saved_model/model_utils/mode_keys.py,25,class,"Standard names for model modes. The following standard keys are defined: * `TRAIN`: training/fitting mode. * `TEST`: testing/evaluation mode. * `PREDICT`: prediction/inference mode." 11641,EstimatorModeKeys,tensorflow/tensorflow/python/saved_model/model_utils/mode_keys.py,41,class,"Standard names for Estimator model modes. The following standard keys are defined: * `TRAIN`: training/fitting mode. * `EVAL`: testing/evaluation mode. * `PREDICT`: predication/inference mode." 11642,is_predict,tensorflow/tensorflow/python/saved_model/model_utils/mode_keys.py,56,function, 11643,is_eval,tensorflow/tensorflow/python/saved_model/model_utils/mode_keys.py,60,function, 11644,is_train,tensorflow/tensorflow/python/saved_model/model_utils/mode_keys.py,64,function, 11645,ModeKeyMap,tensorflow/tensorflow/python/saved_model/model_utils/mode_keys.py,68,class,"Map using ModeKeys as keys. This class creates an immutable mapping from modes to values. For example, SavedModel export of Keras and Estimator models use this to map modes to their corresponding MetaGraph tags/SignatureDef keys. Since this class uses modes, rather than strings, as keys, both ""predict"" (Keras's PREDICT ModeKey) and ""infer"" (Estimator's PREDICT ModeKey) map to the same value." 11646,ModeKeyMapTest,tensorflow/tensorflow/python/saved_model/model_utils/mode_keys_test.py,25,class, 11647,get_plugin_asset,tensorflow/tensorflow/python/summary/plugin_asset.py,42,function,"Acquire singleton PluginAsset instance from a graph. PluginAssets are always singletons, and are stored in tf Graph collections. This way, they can be defined anywhere the graph is being constructed, and if the same plugin is configured at many different points, the user can always modify the same instance. Args: plugin_asset_cls: The PluginAsset class graph: (optional) The graph to retrieve the instance from. If not specified, the default graph is used. Returns: An instance of the plugin_asset_class Raises: ValueError: If we have a plugin name collision, or if we unexpectedly find the wrong number of items in a collection." 11648,get_all_plugin_assets,tensorflow/tensorflow/python/summary/plugin_asset.py,84,function,"Retrieve all PluginAssets stored in the graph collection. Args: graph: Optionally, the graph to get assets from. If unspecified, the default graph is used. Returns: A list with all PluginAsset instances in the graph. Raises: ValueError: if we unexpectedly find a collection with the wrong number of PluginAssets." 11649,PluginAsset,tensorflow/tensorflow/python/summary/plugin_asset.py,113,class,"This abstract base class allows TensorBoard to serialize assets to disk. Plugin authors are expected to extend the PluginAsset class, so that it: - has a unique plugin_name - provides an assets method that returns an {asset_name: asset_contents} dictionary. For now, asset_contents are strings, although we may add StringIO support later. LifeCycle of a PluginAsset instance: - It is constructed when get_plugin_asset is called on the class for the first time. - It is configured by code that follows the calls to get_plugin_asset - When the containing graph is serialized by the tf.compat.v1.summary.FileWriter, the writer calls assets and the PluginAsset instance provides its contents to be written to disk." 11650,_UnnamedPluginAsset,tensorflow/tensorflow/python/summary/plugin_asset_test.py,26,class,"An example asset with a dummy serialize method provided, but no name." 11651,_ExamplePluginAsset,tensorflow/tensorflow/python/summary/plugin_asset_test.py,33,class,Simple example asset. 11652,_OtherExampleAsset,tensorflow/tensorflow/python/summary/plugin_asset_test.py,38,class,Simple example asset. 11653,_ExamplePluginThatWillCauseCollision,tensorflow/tensorflow/python/summary/plugin_asset_test.py,43,class, 11654,PluginAssetTest,tensorflow/tensorflow/python/summary/plugin_asset_test.py,47,class, 11655,scalar,tensorflow/tensorflow/python/summary/summary.py,58,function,"Outputs a `Summary` protocol buffer containing a single scalar value. The generated Summary has a Tensor.proto containing the input Tensor. Args: name: A name for the generated node. Will also serve as the series name in TensorBoard. tensor: A real numeric Tensor containing a single value. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf. Raises: ValueError: If tensor has the wrong shape or type." 11656,image,tensorflow/tensorflow/python/summary/summary.py,88,function,"Outputs a `Summary` protocol buffer with images. The summary has up to `max_outputs` summary values containing images. The images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, channels]` and where `channels` can be: * 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA. The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range `[0, 255]`. `uint8` values are unchanged. The op uses two different normalization algorithms: * If the input values are all positive, they are rescaled so the largest one is 255. * If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255. The `tag` in the outputted Summary.Value protobufs is generated based on the name, with a suffix depending on the max_outputs setting: * If `max_outputs` is 1, the summary value tag is '*name*/image'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*name*/image/0', '*name*/image/1', etc. Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height, width, channels]` where `channels` is 1, 3, or 4. max_outputs: Max number of batch elements to generate images for. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [_ops.GraphKeys.SUMMARIES] family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer." 11657,histogram,tensorflow/tensorflow/python/summary/summary.py,144,function,"Outputs a `Summary` protocol buffer with a histogram. Adding a histogram summary makes it possible to visualize your data's distribution in TensorBoard. You can see a detailed explanation of the TensorBoard histogram dashboard [here](https://www.tensorflow.org/get_started/tensorboard_histograms). The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. This op reports an `InvalidArgument` error if any value is not finite. Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. values: A real numeric `Tensor`. Any shape. Values to use to build the histogram. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer." 11658,audio,tensorflow/tensorflow/python/summary/summary.py,185,function,"Outputs a `Summary` protocol buffer with audio. The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. The `tag` in the outputted Summary.Value protobufs is generated based on the name, with a suffix depending on the max_outputs setting: * If `max_outputs` is 1, the summary value tag is '*name*/audio'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*name*/audio/0', '*name*/audio/1', etc Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]` or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`. sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the signal in hertz. max_outputs: Max number of batch elements to generate audio for. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [_ops.GraphKeys.SUMMARIES] family: Optional; if provided, used as the prefix of the summary tag name, which controls the tab name used for display on Tensorboard. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer." 11659,text,tensorflow/tensorflow/python/summary/summary.py,234,function,"Summarizes textual data. Text data summarized via this plugin will be visible in the Text Dashboard in TensorBoard. The standard TensorBoard Text Dashboard will render markdown in the strings, and will automatically organize 1d and 2d tensors into tables. If a tensor with more than 2 dimensions is provided, a 2d subarray will be displayed along with a warning message. (Note that this behavior is not intrinsic to the text summary api, but rather to the default TensorBoard text plugin.) Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. tensor: a string-type Tensor to summarize. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [_ops.GraphKeys.SUMMARIES] Returns: A TensorSummary op that is configured so that TensorBoard will recognize that it contains textual data. The TensorSummary is a scalar `Tensor` of type `string` which contains `Summary` protobufs. Raises: ValueError: If tensor has the wrong type." 11660,tensor_summary,tensorflow/tensorflow/python/summary/summary.py,275,function,"Outputs a `Summary` protocol buffer with a serialized tensor.proto. Args: name: A name for the generated node. If display_name is not set, it will also serve as the tag name in TensorBoard. (In that case, the tag name will inherit tf name scopes.) tensor: A tensor of any type and shape to serialize. summary_description: A long description of the summary sequence. Markdown is supported. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. summary_metadata: Optional SummaryMetadata proto (which describes which plugins may use the summary value). family: Optional; if provided, used as the prefix of the summary tag, which controls the name used for display on TensorBoard when display_name is not set. display_name: A string used to name this data in TensorBoard. If this is not set, then the node name will be used instead. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer." 11661,merge,tensorflow/tensorflow/python/summary/summary.py,331,function,"Merges summaries. This op creates a [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) protocol buffer that contains the union of all the values in the input summaries. When the Op is run, it reports an `InvalidArgument` error if multiple values in the summaries to merge use the same tag. Args: inputs: A list of `string` `Tensor` objects containing serialized `Summary` protocol buffers. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer resulting from the merging. Raises: RuntimeError: If called with eager mode enabled. @compatibility(eager) Not compatible with eager execution. To write TensorBoard summaries under eager execution, use `tf.contrib.summary` instead. @end_compatibility" 11662,merge_all,tensorflow/tensorflow/python/summary/summary.py,377,function,"Merges all summaries collected in the default graph. Args: key: `GraphKey` used to collect the summaries. Defaults to `GraphKeys.SUMMARIES`. scope: Optional scope used to filter the summary ops, using `re.match` Returns: If no summaries were collected, returns None. Otherwise returns a scalar `Tensor` of type `string` containing the serialized `Summary` protocol buffer resulting from the merging. Raises: RuntimeError: If called with eager execution enabled. @compatibility(eager) Not compatible with eager execution. To write TensorBoard summaries under eager execution, use `tf.contrib.summary` instead. @end_compatibility" 11663,get_summary_description,tensorflow/tensorflow/python/summary/summary.py,410,function,"Given a TensorSummary node_def, retrieve its SummaryDescription. When a Summary op is instantiated, a SummaryDescription of associated metadata is stored in its NodeDef. This method retrieves the description. Args: node_def: the node_def_pb2.NodeDef of a TensorSummary op Returns: a summary_pb2.SummaryDescription Raises: ValueError: if the node is not a summary op. @compatibility(eager) Not compatible with eager execution. To write TensorBoard summaries under eager execution, use `tf.contrib.summary` instead. @end_compatibility" 11664,_SummaryIterator,tensorflow/tensorflow/python/summary/summary_iterator.py,27,class,Yields `Event` protocol buffers from a given path. 11665,summary_iterator,tensorflow/tensorflow/python/summary/summary_iterator.py,44,function,"Returns a iterator for reading `Event` protocol buffers from an event file. You can use this function to read events written to an event file. It returns a Python iterator that yields `Event` protocol buffers. Example: Print the contents of an events file. ```python for e in tf.compat.v1.train.summary_iterator(path to events file): print(e) ``` Example: Print selected summary values. ```python # This example supposes that the events file contains summaries with a # summary value tag 'loss'. These could have been added by calling # `add_summary()`, passing the output of a scalar summary op created with # with: `tf.compat.v1.summary.scalar('loss', loss_tensor)`. for e in tf.compat.v1.train.summary_iterator(path to events file): for v in e.summary.value: if v.tag == 'loss': print(v.simple_value) ``` Example: Continuously check for new summary values. ```python summaries = tf.compat.v1.train.summary_iterator(path to events file) while True: for e in summaries: for v in e.summary.value: if v.tag == 'loss': print(v.simple_value) # Wait for a bit before checking the file for any new events time.sleep(wait time) ``` See the protocol buffer definitions of [Event](https://www.tensorflow.org/code/tensorflow/core/util/event.proto) and [Summary](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) for more information about their attributes. Args: path: The path to an event file created by a `SummaryWriter`. Returns: A iterator that yields `Event` protocol buffers" 11666,SummaryIteratorTestCase,tensorflow/tensorflow/python/summary/summary_iterator_test.py,31,class, 11667,SummaryTest,tensorflow/tensorflow/python/summary/summary_test.py,40,class, 11668,EventFileWriter,tensorflow/tensorflow/python/summary/writer/event_file_writer.py,35,class,"Writes `Event` protocol buffers to an event file. The `EventFileWriter` class creates an event file in the specified directory, and asynchronously writes Event protocol buffers to the file. The Event file is encoded using the tfrecord format, which is similar to RecordIO. This class is not thread-safe." 11669,_EventLoggerThread,tensorflow/tensorflow/python/summary/writer/event_file_writer.py,171,class,Thread that logs events. 11670,CloseableQueue,tensorflow/tensorflow/python/summary/writer/event_file_writer.py,235,class,Stripped-down fork of the standard library Queue that is closeable. 11671,QueueClosedError,tensorflow/tensorflow/python/summary/writer/event_file_writer.py,303,class,Raised when CloseableQueue.put() fails because the queue is closed. 11672,EventFileWriterV2,tensorflow/tensorflow/python/summary/writer/event_file_writer_v2.py,29,class,"Writes `Event` protocol buffers to an event file via the graph. The `EventFileWriterV2` class is backed by the summary file writer in the v2 summary API (currently in tf.contrib.summary), so it uses a shared summary writer resource and graph ops to write events. As with the original EventFileWriter, this class will asynchronously write Event protocol buffers to the backing file. The Event file is encoded using the tfrecord format, which is similar to RecordIO." 11673,FakeSummaryWriter,tensorflow/tensorflow/python/summary/writer/fake_summary_writer.py,27,class,Fake summary writer. 11674,SummaryToEventTransformer,tensorflow/tensorflow/python/summary/writer/writer.py,42,class,"Abstractly implements the SummaryWriter API. This API basically implements a number of endpoints (add_summary, add_session_log, etc). The endpoints all generate an event protobuf, which is passed to the contained event_writer." 11675,FileWriter,tensorflow/tensorflow/python/summary/writer/writer.py,283,class,"Writes `Summary` protocol buffers to event files. The `FileWriter` class provides a mechanism to create an event file in a given directory and add summaries and events to it. The class updates the file contents asynchronously. This allows a training program to call methods to add data to the file directly from the training loop, without slowing down training. When constructed with a `tf.compat.v1.Session` parameter, a `FileWriter` instead forms a compatibility layer over new graph-based summaries to facilitate the use of new summary writing with pre-existing code that expects a `FileWriter` instance. This class is not thread-safe." 11676,FileWriterCache,tensorflow/tensorflow/python/summary/writer/writer_cache.py,29,class,"Cache for file writers. This class caches file writers, one per directory." 11677,FileWriterTestBase,tensorflow/tensorflow/python/summary/writer/writer_test.py,50,class, 11678,FakeWriteError,tensorflow/tensorflow/python/summary/writer/writer_test.py,463,class, 11679,FileWriterTestCase,tensorflow/tensorflow/python/summary/writer/writer_test.py,467,class, 11680,SessionBasedFileWriterTestCase,tensorflow/tensorflow/python/summary/writer/writer_test.py,543,class,Tests for FileWriter behavior when passed a Session argument. 11681,FileWriterCacheTest,tensorflow/tensorflow/python/summary/writer/writer_test.py,689,class,FileWriterCache tests. 11682,SymbolTable,tensorflow/tensorflow/python/tf_program/mlir_gen.py,41,class,Symbol Table for python code. 11683,ProcessType,tensorflow/tensorflow/python/tf_program/mlir_gen.py,87,class,"Visit a node and return processed type Currently only visits annotations and gives their type. " 11684,MLIRGen,tensorflow/tensorflow/python/tf_program/mlir_gen.py,115,class,"Visit the AST and generate MLIR code Requires liveness, reading_definitions. " 11685,mlir_gen_internal,tensorflow/tensorflow/python/tf_program/mlir_gen.py,417,function,Returns mlir module for unprocessed node `node`. 11686,mlir_gen,tensorflow/tensorflow/python/tf_program/mlir_gen.py,432,function,Parse a function and return TFProgram. 11687,mlir_gen_from_source,tensorflow/tensorflow/python/tf_program/mlir_gen.py,444,function,"Parse a function as either a string or from a supplied file path and return a TFProgram. " 11688,IfOp,tensorflow/tensorflow/python/tf_program/pywrap_tfd.py,59,class,"tfp.if(cond) ({body}, {orelse}) : type If `cond` is true, `body` is executed, otherwise `orelse` is executed." 11689,OrOp,tensorflow/tensorflow/python/tf_program/pywrap_tfd.py,75,class,"tfp.Or(ops...) This is like tf.Any, except that the first dimension is opened into `ops`. Returns a tensor of 1-bit integers which is ""Logical OR"" of the coressponding elements in ops..." 11690,AndOp,tensorflow/tensorflow/python/tf_program/pywrap_tfd.py,93,class,"tfp.And(ops...) This is like tf.All, except that the first dimension is opened to `ops`. Returns a tensor of 1-bit integers which is ""Logical AND"" of the coressponding elements in ops..." 11691,WhileOp,tensorflow/tensorflow/python/tf_program/pywrap_tfd.py,111,class,"tfp.While(init-vals, { ^bb1(cond-args): cond-region return cond }, { ^bb1(body-args): body-region }) As long as `cond-region` returns a ""true""-like value, the body-region is executed and the arguments are replaced by its return values for the next iteration." 11692,TFProgram,tensorflow/tensorflow/python/tf_program/pywrap_tfd.py,136,class,Python wrap for a Tensorflow Program (essentially an mlir Module). 11693,MLIRGenTestBase,tensorflow/tensorflow/python/tf_program/tests/mlir_gen_test.py,31,class, 11694,MLIRGenTest,tensorflow/tensorflow/python/tf_program/tests/mlir_gen_test.py,37,class,MLIR Generation Tests for Tensorflow Program 11695,_has_no_variables,tensorflow/tensorflow/python/tools/freeze_graph.py,62,function,"Determines if the graph has any variables. Args: sess: TensorFlow Session. Returns: Bool." 11696,freeze_graph_with_def_protos,tensorflow/tensorflow/python/tools/freeze_graph.py,77,function,"Converts all variables in a graph and checkpoint into constants. Args: input_graph_def: A `GraphDef`. input_saver_def: A `SaverDef` (optional). input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. output_node_names: The name(s) of the output nodes, comma separated. restore_op_name: Unused. filename_tensor_name: Unused. output_graph: String where to write the frozen `GraphDef`. clear_devices: A Bool whether to remove device specifications. initializer_nodes: Comma separated string of initializer nodes to run before freezing. variable_names_whitelist: The set of variable names to convert (optional, by default, all variables are converted). variable_names_denylist: The set of variable names to omit converting to constants (optional). input_meta_graph_def: A `MetaGraphDef` (optional), input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and variables (optional). saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to load, in string format (optional). checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1 or saver_pb2.SaverDef.V2) Returns: Location of the output_graph_def." 11697,_parse_input_graph_proto,tensorflow/tensorflow/python/tools/freeze_graph.py,243,function,Parses input tensorflow graph into GraphDef proto. 11698,_parse_input_meta_graph_proto,tensorflow/tensorflow/python/tools/freeze_graph.py,257,function,Parses input tensorflow graph into MetaGraphDef proto. 11699,_parse_input_saver_proto,tensorflow/tensorflow/python/tools/freeze_graph.py,272,function,Parses input tensorflow Saver into SaverDef proto. 11700,freeze_graph,tensorflow/tensorflow/python/tools/freeze_graph.py,286,function,"Converts all variables in a graph and checkpoint into constants. Args: input_graph: A `GraphDef` file to load. input_saver: A TensorFlow Saver file. input_binary: A Bool. True means input_graph is .pb, False indicates .pbtxt. input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. output_node_names: The name(s) of the output nodes, comma separated. restore_op_name: Unused. filename_tensor_name: Unused. output_graph: String where to write the frozen `GraphDef`. clear_devices: A Bool whether to remove device specifications. initializer_nodes: Comma separated list of initializer nodes to run before freezing. variable_names_whitelist: The set of variable names to convert (optional, by default, all variables are converted), variable_names_denylist: The set of variable names to omit converting to constants (optional). input_meta_graph: A `MetaGraphDef` file to load (optional). input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and variables (optional). saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to load, in string format. checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1 or saver_pb2.SaverDef.V2). Returns: String that is the location of frozen GraphDef." 11701,main,tensorflow/tensorflow/python/tools/freeze_graph.py,364,function, 11702,run_main,tensorflow/tensorflow/python/tools/freeze_graph.py,381,function,Main function of freeze_graph. 11703,FreezeGraphTest,tensorflow/tensorflow/python/tools/freeze_graph_test.py,51,class, 11704,import_to_tensorboard,tensorflow/tensorflow/python/tools/import_pb_to_tensorboard.py,43,function,"View an SavedModel as a graph in Tensorboard. Args: model_dir: The directory containing the SavedModel to import. log_dir: The location for the Tensorboard log to begin visualization from. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Usage: Call this function with your SavedModel location and desired log directory. Launch Tensorboard by pointing it to the log directory. View your imported SavedModel as a graph." 11705,main,tensorflow/tensorflow/python/tools/import_pb_to_tensorboard.py,67,function, 11706,_count_total_params,tensorflow/tensorflow/python/tools/inspect_checkpoint.py,33,function,Count total number of variables. 11707,print_tensors_in_checkpoint_file,tensorflow/tensorflow/python/tools/inspect_checkpoint.py,57,function,"Prints tensors in a checkpoint file. If no `tensor_name` is provided, prints the tensor names and shapes in the checkpoint file. If `tensor_name` is provided, prints the content of the tensor. Args: file_name: Name of the checkpoint file. tensor_name: Name of the tensor in the checkpoint file to print. all_tensors: Boolean indicating whether to print all tensors. all_tensor_names: Boolean indicating whether to print all tensor names. count_exclude_pattern: Regex string, pattern to exclude tensors when count." 11708,parse_numpy_printoption,tensorflow/tensorflow/python/tools/inspect_checkpoint.py,115,function,"Sets a single numpy printoption from a string of the form 'x=y'. See documentation on numpy.set_printoptions() for details about what values x and y can take. x can be any option listed there other than 'formatter'. Args: kv_str: A string of the form 'x=y', such as 'threshold=100000' Raises: argparse.ArgumentTypeError: If the string couldn't be used to set any nump printoption." 11709,main,tensorflow/tensorflow/python/tools/inspect_checkpoint.py,148,function, 11710,get_parent_dir,tensorflow/tensorflow/python/tools/module_util.py,29,function, 11711,get_parent_dir_for_name,tensorflow/tensorflow/python/tools/module_util.py,33,function,"Get parent directory for module with the given name. Args: module_name: Module name for e.g. tensorflow_estimator.python.estimator.api._v1.estimator. Returns: Path to the parent directory if module is found and None otherwise. Given example above, it should return: /pathtoestimator/tensorflow_estimator/python/estimator/api/_v1." 11712,main,tensorflow/tensorflow/python/tools/optimize_for_inference.py,74,function, 11713,_parse_placeholder_types,tensorflow/tensorflow/python/tools/optimize_for_inference.py,104,function,Extracts placeholder types from a comma separate list. 11714,parse_args,tensorflow/tensorflow/python/tools/optimize_for_inference.py,110,function,Parses command line arguments. 11715,optimize_for_inference,tensorflow/tensorflow/python/tools/optimize_for_inference_lib.py,93,function,"Applies a series of inference optimizations on the input graph. Args: input_graph_def: A GraphDef containing a training model. input_node_names: A list of names of the nodes that are fed inputs during inference. output_node_names: A list of names of the nodes that produce the final results. placeholder_type_enum: The AttrValue enum for the placeholder data type, or a list that specifies one value per input node name. toco_compatible: Boolean, if True, only runs optimizations that result in TOCO compatible graph operations (default=False). Returns: An optimized version of the input graph." 11716,ensure_graph_is_valid,tensorflow/tensorflow/python/tools/optimize_for_inference_lib.py,126,function,"Makes sure that the graph is internally consistent. Checks basic properties of the graph def and raises an exception if there are input references to missing nodes, duplicated names, or other logic errors. Args: graph_def: Definition of a graph to be checked. Raises: ValueError: If the graph is incorrectly constructed." 11717,node_name_from_input,tensorflow/tensorflow/python/tools/optimize_for_inference_lib.py,151,function,Strips off ports and other decorations to get the underlying node name. 11718,node_from_map,tensorflow/tensorflow/python/tools/optimize_for_inference_lib.py,161,function,"Pulls a node def from a dictionary for a given name. Args: node_map: Dictionary containing an entry indexed by name for every node. name: Identifies the node we want to find. Returns: NodeDef of the node with the given name. Raises: ValueError: If the node isn't present in the dictionary." 11719,values_from_const,tensorflow/tensorflow/python/tools/optimize_for_inference_lib.py,180,function,"Extracts the values from a const NodeDef as a numpy ndarray. Args: node_def: Const NodeDef that has the values we want to access. Returns: Numpy ndarray containing the values. Raises: ValueError: If the node isn't a Const." 11720,scale_after_normalization,tensorflow/tensorflow/python/tools/optimize_for_inference_lib.py,202,function, 11721,fold_batch_norms,tensorflow/tensorflow/python/tools/optimize_for_inference_lib.py,208,function,"Removes batch normalization ops by folding them into convolutions. Batch normalization during training has multiple dynamic parameters that are updated, but once the graph is finalized these become constants. That means there's an opportunity to reduce the computations down to a scale and addition, rather than the more expensive multiple ops, and even bake the scaling into the convolution weights. This function identifies the typical pattern of batch normalization subgraphs, and performs the transformation to fold the computations down into a simpler form. It currently only supports batch normalization that's performed by the BatchNormWithGlobalNormalization FusedBatchNorm and FusedBatchNormV3 ops, and will need to be extended in the future to handle the newer style. Args: input_graph_def: A GraphDef containing a model. Returns: Modified graph with BN ops removed, and modified weights. Raises: ValueError: If the graph is badly formed with duplicate node names." 11722,fuse_resize_and_conv,tensorflow/tensorflow/python/tools/optimize_for_inference_lib.py,419,function,"Merges preceding resize and mirror pad ops into a specialized convolution. There's a common pattern of enlarging the input to a convolution using a resize operation, and also using MirrorPad to extend the boundaries to that zero edge pixels don't bleed inwards when convolving. This routine looks for that pattern of operations, and fuses them together into a Conv2DWithResizeOp. Args: input_graph_def: A GraphDef containing a model. output_node_names: A list of names of the nodes that produce the final results. Returns: Modified graph with resize and pad ops merged. Raises: ValueError: If the graph is badly formed with duplicate node names." 11723,OptimizeForInferenceTest,tensorflow/tensorflow/python/tools/optimize_for_inference_test.py,42,class, 11724,main,tensorflow/tensorflow/python/tools/print_selective_registration_header.py,47,function, 11725,PrintOpFilegroupTest,tensorflow/tensorflow/python/tools/print_selective_registration_header_test.py,82,class, 11726,_shlex_quote,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,68,function, 11727,_sysconfig_module,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,75,function,"Load tf.sysconfig if available and working (i.e., inside a pip package)." 11728,_parse_tensor_name,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,84,function,"Convert a tensor name like 'tensor:0' into a tuple ('tensor', 0)." 11729,_xla_makefile_string,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,101,function,"Returns a Makefile string with variables for using XLA binary object files. Attempts to identify the right include header paths when run from either an installed TensorFlow pip package, or from bazel run. Args: output_prefix: A string containing the output prefix for the XLA AOT compiled header + object files. Returns: A string containing a filled out `_XLA_MAKEFILE_TEMPLATE`." 11730,_get_variable_nodes_from_graph_def,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,147,function,"Get the list of Variable nodes from `graph_def`. Args: graph_def: An instance of `GraphDef`. This GraphDef *must* have already been optimized by Grappler. In particular, function inlining must have already happened. Returns: A dict mapping string names of variables to tuples `(node_def, modified)`, where `node_def` is the `NodeDef` corresponding to variable, and `modified` is a python bool describing whether the variable is modified during runtime." 11731,_prune_removed_feed_nodes,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,188,function,"Identify the inputs in the signature no longer in graph_def, prune them. Args: signature_def: A `SignatureDef` instance. graph_def: A `GraphDef` instance. Returns: A new pruned `SignatureDef`." 11732,aot_compile_cpu_meta_graph_def,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,212,function,"Compile a `MetaGraphDef` to header+object files in `output_prefix`. Use XLA AOT (`tfcompile`) to convert the given meta graph and signature into a header + object files. Also create an include makefile that helps identify the appropriate necessary include and library paths to incorporate these files into your C++ program. The graph is always optimized with grappler, and optionally (by default) variables are frozen as constants, before compilation happens. If the `freeze_graph` is `True`, all variables are embedded as constants into the graph and binary objects. If it is `False`, then the variable values become inputs and outputs of the compiled class and the C++ caller must set these values manually. Args: checkpoint_path: Python string. Path to checkpoints/variables. meta_graph_def: Instance of `MetaGraphDef`. output_prefix: Python string. Path prefix for outputs. signature_def_key: String, the signature_def to use in the SavedModel. cpp_class: String, Name of output C++ class. target_triple: String, LLVM target triple. target_cpu: String, LLVM target cpu name. variables_to_feed: A list of strings, the variables that will be fed by the user; these won't be frozen. If `None`, then we will extract all the variables in the graph and mark them as to-feed. The default behavior is an empty tuple: all variables must be frozen. enable_multithreading: Not implemented. Enable multithreading in the compiled computation. Raises: RuntimeError: If tensorflow was not built with XLA. ImportError: If tensorflow was built with XLA but there was another issue importing the tfcompile python wrapper. ValueError: If `meta_graph_def.signature_def[signature_def_key]` is missing or has empty outputs. NotImplementedError: If `enable_multithreading is True`." 11733,_optimize_graph,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,382,function,Optimize `meta_graph_def` using grappler. Returns a `GraphDef`. 11734,_replace_input_placeholders_with_default_values,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,401,function,Replace graphdef's `tf.placeholder` input ops with all-zero constants. 11735,_signature_to_tf2xla_config,tensorflow/tensorflow/python/tools/saved_model_aot_compile.py,441,function,"Convert `signature_def` to tf2xla config. Returns a `tf2xla.Config` proto. Args: signature_def: Instance of `SignatureDef`. variable_nodes_to_feed: List of tuples of form `(node_def, modified)` corresponding to VarHandleOp, and a boolean `modified` that describes whether the variable was modified during execution. Returns: An instance of `tf2xla.Config` proto. Raises: RuntimeError: If TensorFlow was not compiled with XLA." 11736,_show_tag_sets,tensorflow/tensorflow/python/tools/saved_model_cli.py,65,function,"Prints the tag-sets stored in SavedModel directory. Prints all the tag-sets for MetaGraphs stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect." 11737,_show_signature_def_map_keys,tensorflow/tensorflow/python/tools/saved_model_cli.py,79,function,"Prints the keys for each SignatureDef in the SignatureDef map. Prints the list of SignatureDef keys from the SignatureDef map specified by the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in." 11738,_get_inputs_tensor_info_from_meta_graph_def,tensorflow/tensorflow/python/tools/saved_model_cli.py,98,function,"Gets TensorInfo for all inputs of the SignatureDef. Returns a dictionary that maps each input key to its TensorInfo for the given signature_def_key in the meta_graph_def Args: meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to look up SignatureDef key. signature_def_key: A SignatureDef key string. Returns: A dictionary that maps input tensor keys to TensorInfos." 11739,_get_outputs_tensor_info_from_meta_graph_def,tensorflow/tensorflow/python/tools/saved_model_cli.py,116,function,"Gets TensorInfos for all outputs of the SignatureDef. Returns a dictionary that maps each output key to its TensorInfo for the given signature_def_key in the meta_graph_def. Args: meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to look up signature_def_key. signature_def_key: A SignatureDef key string. Returns: A dictionary that maps output tensor keys to TensorInfos." 11740,_show_inputs_outputs,tensorflow/tensorflow/python/tools/saved_model_cli.py,134,function,"Prints input and output TensorInfos. Prints the details of input and output TensorInfos for the SignatureDef mapped by the given signature_def_key. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. signature_def_key: A SignatureDef key string. indent: How far (in increments of 2 spaces) to indent each line of output." 11741,_show_defined_functions,tensorflow/tensorflow/python/tools/saved_model_cli.py,173,function,"Prints the callable concrete and polymorphic functions of the Saved Model. Args: saved_model_dir: Directory containing the SavedModel to inspect." 11742,_print_args,tensorflow/tensorflow/python/tools/saved_model_cli.py,219,function,"Formats and prints the argument of the concrete functions defined in the model. Args: arguments: Arguments to format print. argument_type: Type of arguments. indent: How far (in increments of 2 spaces) to indent each line of output." 11743,_print_tensor_info,tensorflow/tensorflow/python/tools/saved_model_cli.py,262,function,"Prints details of the given tensor_info. Args: tensor_info: TensorInfo object to be printed. indent: How far (in increments of 2 spaces) to indent each line output" 11744,_show_all,tensorflow/tensorflow/python/tools/saved_model_cli.py,287,function,"Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel. Prints all tag-set, SignatureDef and Inputs/Outputs information stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect." 11745,get_meta_graph_def,tensorflow/tensorflow/python/tools/saved_model_cli.py,310,function,"DEPRECATED: Use saved_model_utils.get_meta_graph_def instead. Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect or execute. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Raises: RuntimeError: An error when the given tag-set does not exist in the SavedModel. Returns: A MetaGraphDef corresponding to the tag-set." 11746,get_signature_def_map,tensorflow/tensorflow/python/tools/saved_model_cli.py,332,function,"Gets SignatureDef map from a MetaGraphDef in a SavedModel. Returns the SignatureDef map for the given tag-set in the SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect or execute. tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Returns: A SignatureDef map that maps from string keys to SignatureDefs." 11747,scan_meta_graph_def,tensorflow/tensorflow/python/tools/saved_model_cli.py,351,function,"Scans meta_graph_def and reports if there are ops on denylist. Print ops if they are on black list, or print success if no denylisted ops found. Args: meta_graph_def: MetaGraphDef protocol buffer." 11748,run_saved_model_with_feed_dict,tensorflow/tensorflow/python/tools/saved_model_cli.py,373,function,"Runs SavedModel and fetch all outputs. Runs the input dictionary through the MetaGraphDef within a SavedModel specified by the given tag_set and SignatureDef. Also save the outputs to file if outdir is not None. Args: saved_model_dir: Directory containing the SavedModel to execute. tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. signature_def_key: A SignatureDef key string. input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays. outdir: A directory to save the outputs to. If the directory doesn't exist, it will be created. overwrite_flag: A boolean flag to allow overwrite output file if file with the same name exists. worker: If provided, the session will be run on the worker. Valid worker specification is a bns or gRPC path. init_tpu: If true, the TPU system will be initialized after the session is created. tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the intermediate Tensor values and runtime GraphDefs while running the SavedModel. Raises: ValueError: When any of the input tensor keys is not valid. RuntimeError: An error when output file already exists and overwrite is not enabled." 11749,preprocess_inputs_arg_string,tensorflow/tensorflow/python/tools/saved_model_cli.py,474,function,"Parses input arg into dictionary that maps input to file/variable tuple. Parses input string in the format of, for example, ""input1=filename1[variable_name1],input2=filename2"" into a dictionary looks like {'input_key1': (filename1, variable_name1), 'input_key2': (file2, None)} , which maps input keys to a tuple of file name and variable name(None if empty). Args: inputs_str: A string that specified where to load inputs. Inputs are separated by semicolons. * For each input key: '=' or '=[]' * The optional 'variable_name' key will be set to None if not specified. Returns: A dictionary that maps input keys to a tuple of file name and variable name. Raises: RuntimeError: An error when the given input string is in a bad format." 11750,preprocess_input_exprs_arg_string,tensorflow/tensorflow/python/tools/saved_model_cli.py,521,function,"Parses input arg into dictionary that maps input key to python expression. Parses input string in the format of 'input_key=' into a dictionary that maps each input_key to its python expression. Args: input_exprs_str: A string that specifies python expression for input keys. Each input is separated by semicolon. For each input key: 'input_key=' Returns: A dictionary that maps input keys to their values. Raises: RuntimeError: An error when the given input string is in a bad format." 11751,preprocess_input_examples_arg_string,tensorflow/tensorflow/python/tools/saved_model_cli.py,550,function,"Parses input into dict that maps input keys to lists of tf.Example. Parses input string in the format of 'input_key1=[{feature_name: feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary that maps each input_key to its list of serialized tf.Example. Args: input_examples_str: A string that specifies a list of dictionaries of feature_names and their feature_lists for each input. Each input is separated by semicolon. For each input key: 'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]' items in feature_list can be the type of float, int, long or str. Returns: A dictionary that maps input keys to lists of serialized tf.Example. Raises: ValueError: An error when the given tf.Example is not a list." 11752,_create_example_string,tensorflow/tensorflow/python/tools/saved_model_cli.py,582,function,Create a serialized tf.example from feature dictionary. 11753,load_inputs_from_input_arg_string,tensorflow/tensorflow/python/tools/saved_model_cli.py,605,function,"Parses input arg strings and create inputs feed_dict. Parses '--inputs' string for inputs to be loaded from file, and parses '--input_exprs' string for inputs to be evaluated from python expression. '--input_examples' string for inputs to be created from tf.example feature dictionary list. Args: inputs_str: A string that specified where to load inputs. Each input is separated by semicolon. * For each input key: '=' or '=[]' * The optional 'variable_name' key will be set to None if not specified. * File specified by 'filename' will be loaded using numpy.load. Inputs can be loaded from only .npy, .npz or pickle files. * The ""[variable_name]"" key is optional depending on the input file type as descripted in more details below. When loading from a npy file, which always contains a numpy ndarray, the content will be directly assigned to the specified input tensor. If a variable_name is specified, it will be ignored and a warning will be issued. When loading from a npz zip file, user can specify which variable within the zip file to load for the input tensor inside the square brackets. If nothing is specified, this function will check that only one file is included in the zip and load it for the specified input tensor. When loading from a pickle file, if no variable_name is specified in the square brackets, whatever that is inside the pickle file will be passed to the specified input tensor, else SavedModel CLI will assume a dictionary is stored in the pickle file and the value corresponding to the variable_name will be used. input_exprs_str: A string that specifies python expressions for inputs. * In the format of: '='. * numpy module is available as np. input_examples_str: A string that specifies tf.Example with dictionary. * In the format of: '=<[{feature:value list}]>' Returns: A dictionary that maps input tensor keys to numpy ndarrays. Raises: RuntimeError: An error when a key is specified, but the input file contains multiple numpy ndarrays, none of which matches the given key. RuntimeError: An error when no key is specified, but the input file contains more than one numpy ndarrays." 11754,show,tensorflow/tensorflow/python/tools/saved_model_cli.py,708,function,"Function triggered by show command. Args: args: A namespace parsed from command line." 11755,run,tensorflow/tensorflow/python/tools/saved_model_cli.py,730,function,"Function triggered by run command. Args: args: A namespace parsed from command line. Raises: AttributeError: An error when neither --inputs nor --input_exprs is passed to run command." 11756,scan,tensorflow/tensorflow/python/tools/saved_model_cli.py,752,function,"Function triggered by scan command. Args: args: A namespace parsed from command line." 11757,convert_with_tensorrt,tensorflow/tensorflow/python/tools/saved_model_cli.py,767,function,"Function triggered by 'convert tensorrt' command. Args: args: A namespace parsed from command line." 11758,aot_compile_cpu,tensorflow/tensorflow/python/tools/saved_model_cli.py,806,function,"Function triggered by aot_compile_cpu command. Args: args: A namespace parsed from command line." 11759,add_show_subparser,tensorflow/tensorflow/python/tools/saved_model_cli.py,834,function,Add parser for `show`. 11760,add_run_subparser,tensorflow/tensorflow/python/tools/saved_model_cli.py,880,function,Add parser for `run`. 11761,add_scan_subparser,tensorflow/tensorflow/python/tools/saved_model_cli.py,958,function,Add parser for `scan`. 11762,add_convert_subparser,tensorflow/tensorflow/python/tools/saved_model_cli.py,980,function,Add parser for `convert`. 11763,add_aot_compile_cpu_subparser,tensorflow/tensorflow/python/tools/saved_model_cli.py,1041,function,Add parser for `aot_compile_cpu`. 11764,create_parser,tensorflow/tensorflow/python/tools/saved_model_cli.py,1149,function,"Creates a parser that parse the command line arguments. Returns: A namespace parsed from command line arguments." 11765,main,tensorflow/tensorflow/python/tools/saved_model_cli.py,1180,function, 11766,captured_output,tensorflow/tensorflow/python/tools/saved_model_cli_test.py,51,function, 11767,SavedModelCLITestCase,tensorflow/tensorflow/python/tools/saved_model_cli_test.py,61,class, 11768,read_saved_model,tensorflow/tensorflow/python/tools/saved_model_utils.py,31,function,"Reads the saved_model.pb or saved_model.pbtxt file containing `SavedModel`. Args: saved_model_dir: Directory containing the SavedModel file. Returns: A `SavedModel` protocol buffer. Raises: IOError: If the file does not exist, or cannot be successfully parsed." 11769,get_saved_model_tag_sets,tensorflow/tensorflow/python/tools/saved_model_utils.py,79,function,"Retrieves all the tag-sets available in the SavedModel. Args: saved_model_dir: Directory containing the SavedModel. Returns: List of all tag-sets in the SavedModel, where a tag-set is represented as a list of strings." 11770,get_meta_graph_def,tensorflow/tensorflow/python/tools/saved_model_utils.py,96,function,"Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. The empty string tag is ignored so that passing '' means the empty tag set. For tag-set contains multiple tags, all tags must be passed in. Raises: RuntimeError: An error when the given tag-set does not exist in the SavedModel. Returns: A MetaGraphDef corresponding to the tag-set." 11771,tearDownModule,tensorflow/tensorflow/python/tools/saved_model_utils_test.py,33,function, 11772,SavedModelUtilTest,tensorflow/tensorflow/python/tools/saved_model_utils_test.py,37,class, 11773,_get_ops_from_ops_list,tensorflow/tensorflow/python/tools/selective_registration_header_lib.py,48,function,Gets the ops and kernels needed from the ops list file. 11774,_get_ops_from_graphdef,tensorflow/tensorflow/python/tools/selective_registration_header_lib.py,61,function,Gets the ops and kernels needed from the tensorflow model. 11775,get_ops_and_kernels,tensorflow/tensorflow/python/tools/selective_registration_header_lib.py,79,function,Gets the ops and kernels needed from the model files. 11776,get_header_from_ops_and_kernels,tensorflow/tensorflow/python/tools/selective_registration_header_lib.py,110,function,"Returns a header for use with tensorflow SELECTIVE_REGISTRATION. Args: ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include. include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op kernels are included. Returns: the string of the header that should be written as ops_to_register.h." 11777,get_header,tensorflow/tensorflow/python/tools/selective_registration_header_lib.py,194,function,"Computes a header for use with tensorflow SELECTIVE_REGISTRATION. Args: graphs: a list of paths to GraphDef files to include. proto_fileformat: optional format of proto file, either 'textproto', 'rawproto' (default) or ops_list. The ops_list is the file contain the list of ops in JSON format, Ex: ""[[""Transpose"", ""TransposeCpuOp""]]"". default_ops: optional comma-separated string of operator:kernel pairs to always include implementation for. Pass 'all' to have all operators and kernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'. Returns: the string of the header that should be written as ops_to_register.h." 11778,main,tensorflow/tensorflow/python/tools/strip_unused.py,54,function, 11779,strip_unused,tensorflow/tensorflow/python/tools/strip_unused_lib.py,32,function,"Removes unused nodes from a GraphDef. Args: input_graph_def: A graph with nodes we want to prune. input_node_names: A list of the nodes we use as inputs. output_node_names: A list of the output nodes. placeholder_type_enum: The AttrValue enum for the placeholder data type, or a list that specifies one value per input node name. Returns: A `GraphDef` with all unnecessary ops removed. Raises: ValueError: If any element in `input_node_names` refers to a tensor instead of an operation. KeyError: If any element in `input_node_names` is not found in the graph." 11780,strip_unused_from_files,tensorflow/tensorflow/python/tools/strip_unused_lib.py,92,function,Removes unused nodes from a graph file. 11781,StripUnusedTest,tensorflow/tensorflow/python/tools/strip_unused_test.py,36,class, 11782,SymbolExposedTwiceError,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,73,class,Raised when different symbols are exported with the same name. 11783,get_canonical_import,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,78,function,"Obtain one single import from a set of possible sources of a symbol. One symbol might come from multiple places as it is being imported and reexported. To simplify API changes, we always use the same import for the same module, and give preference based on higher priority and alphabetical ordering. Args: import_set: (set) Imports providing the same symbol. This is a set of tuples in the form (import, priority). We want to pick an import with highest priority. Returns: A module name to import" 11784,_ModuleInitCodeBuilder,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,105,class,Builds a map from module name to imports included in that module. 11785,add_nested_compat_imports,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,316,function,"Adds compat.vN.compat.vK modules to module builder. To avoid circular imports, we want to add __init__.py files under compat.vN.compat.vK and under compat.vN.compat.vK.compat. For all other imports, we point to corresponding modules under compat.vK. Args: module_builder: `_ModuleInitCodeBuilder` instance. compat_api_versions: Supported compatibility versions. output_package: Base output python package where generated API will be added." 11786,_get_name_and_module,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,374,function,"Split full_name into module and short name. Args: full_name: Full name of symbol that includes module. Returns: Full module name and short symbol name." 11787,_join_modules,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,387,function,"Concatenate 2 module components. Args: module1: First module to join. module2: Second module to join. Returns: Given two modules aaa.bbb and ccc.ddd, returns a joined module aaa.bbb.ccc.ddd." 11788,add_imports_for_symbol,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,405,function,"Add imports for the given symbol to `module_code_builder`. Args: module_code_builder: `_ModuleInitCodeBuilder` instance. symbol: A symbol. source_module_name: Module that we can import the symbol from. source_name: Name we can import the symbol with. api_name: API name. Currently, must be either `tensorflow` or `estimator`. api_version: API version. output_module_prefix: Prefix to prepend to destination module." 11789,get_api_init_text,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,450,function,"Get a map from destination module to __init__.py code for that module. Args: packages: Base python packages containing python with target tf_export decorators. output_package: Base output python package where generated API will be added. api_name: API you want to generate (e.g. `tensorflow` or `estimator`). api_version: API version you want to generate (1 or 2). compat_api_versions: Additional API versions to generate under compat/ directory. lazy_loading: Boolean flag. If True, a lazy loading `__init__.py` file is produced and if `False`, static imports are used. use_relative_imports: True if we should use relative imports when importing submodules. Returns: A dictionary where key: (string) destination module (for e.g. tf or tf.consts). value: (string) text that should be in __init__.py files for corresponding modules." 11790,get_module,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,522,function,"Get module that corresponds to path relative to relative_to_dir. Args: dir_path: Path to directory. relative_to_dir: Get module relative to this directory. Returns: Name of module that corresponds to the given directory." 11791,get_module_docstring,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,538,function,"Get docstring for the given module. This method looks for docstring in the following order: 1. Checks if module has a docstring specified in doc_srcs. 2. Checks if module has a docstring source module specified in doc_srcs. If it does, gets docstring from that module. 3. Checks if module with module_name exists under base package. If it does, gets docstring from that module. 4. Returns a default docstring. Args: module_name: module name relative to tensorflow (excluding 'tensorflow.' prefix) to get a docstring for. package: Base python package containing python with target tf_export decorators. api_name: API you want to generate (e.g. `tensorflow` or `estimator`). Returns: One-line docstring to describe the module." 11792,create_api_files,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,586,function,"Creates __init__.py files for the Python API. Args: output_files: List of __init__.py file paths to create. packages: Base python packages containing python with target tf_export decorators. root_init_template: Template for top-level __init__.py file. ""# API IMPORTS PLACEHOLDER"" comment in the template file will be replaced with imports. output_dir: output API root directory. output_package: Base output package where generated API will be added. api_name: API you want to generate (e.g. `tensorflow` or `estimator`). api_version: API version to generate (`v1` or `v2`). compat_api_versions: Additional API versions to generate in compat/ subdirectory. compat_init_templates: List of templates for top level compat init files in the same order as compat_api_versions. lazy_loading: Boolean flag. If True, a lazy loading `__init__.py` file is produced and if `False`, static imports are used. use_relative_imports: True if we should use relative imports when import submodules. Raises: ValueError: if output_files list is missing a required file." 11793,main,tensorflow/tensorflow/python/tools/api/generator/create_python_api.py,695,function, 11794,test_op,tensorflow/tensorflow/python/tools/api/generator/create_python_api_test.py,30,function, 11795,deprecated_test_op,tensorflow/tensorflow/python/tools/api/generator/create_python_api_test.py,35,function, 11796,TestClass,tensorflow/tensorflow/python/tools/api/generator/create_python_api_test.py,40,class, 11797,CreatePythonApiTest,tensorflow/tensorflow/python/tools/api/generator/create_python_api_test.py,48,class, 11798,DocSource,tensorflow/tensorflow/python/tools/api/generator/doc_srcs.py,23,class,"Specifies docstring source for a module. Only one of docstring or docstring_module_name should be set. * If docstring is set, then we will use this docstring when for the module. * If docstring_module_name is set, then we will copy the docstring from docstring source module." 11799,get_doc_sources,tensorflow/tensorflow/python/tools/api/generator/doc_srcs.py,86,function,"Get a map from module to a DocSource object. Args: api_name: API you want to generate (e.g. `tensorflow` or `estimator`). Returns: Map from module name to DocSource object." 11800,DocSrcsTest,tensorflow/tensorflow/python/tools/api/generator/doc_srcs_test.py,32,class, 11801,_get_module_from_symbol,tensorflow/tensorflow/python/tools/api/generator/output_init_files_test.py,33,function, 11802,_get_modules,tensorflow/tensorflow/python/tools/api/generator/output_init_files_test.py,39,function,"Get list of TF API modules. Args: package: We only look at modules that contain package in the name. attr_name: Attribute set on TF symbols that contains API names. constants_attr_name: Attribute set on TF modules that contains API constant names. Returns: Set of TensorFlow API modules." 11803,_get_files_set,tensorflow/tensorflow/python/tools/api/generator/output_init_files_test.py,78,function,"Get set of file paths from the given file. Args: path: Path to file. File at `path` is expected to contain a list of paths where entire list starts with `start_tag` and ends with `end_tag`. List must be comma-separated and each path entry must be surrounded by double quotes. start_tag: String that indicates start of path list. end_tag: String that indicates end of path list. Returns: List of string paths." 11804,_module_to_paths,tensorflow/tensorflow/python/tools/api/generator/output_init_files_test.py,102,function,"Get all API __init__.py file paths for the given module. Args: module: Module to get file paths for. Returns: List of paths for the given module. For e.g. module foo.bar requires 'foo/__init__.py' and 'foo/bar/__init__.py'." 11805,OutputInitFilesTest,tensorflow/tensorflow/python/tools/api/generator/output_init_files_test.py,125,class,Test that verifies files that list paths for TensorFlow API. 11806,AsyncCheckpointSaverHook,tensorflow/tensorflow/python/tpu/async_checkpoint.py,39,class,Saves checkpoints every N steps or seconds. 11807,input_fn,tensorflow/tensorflow/python/tpu/async_checkpoint_test.py,56,function,Return a dataset of source and target sequences for training. 11808,model_fn,tensorflow/tensorflow/python/tpu/async_checkpoint_test.py,65,function, 11809,AsyncCheckpointingTest,tensorflow/tensorflow/python/tpu/async_checkpoint_test.py,97,class, 11810,_get_custom_getter,tensorflow/tensorflow/python/tpu/bfloat16.py,29,function,"Returns a custom getter that this class's methods must be called under. All methods of this class must be called under a variable scope that was passed this custom getter. Example: ```python network = ConvNetBuilder(...) with tf.compat.v1.variable_scope('cg', custom_getter=network.get_custom_getter()): network.conv(...) # Call more methods of network here ``` Currently, this custom getter only does anything if self.use_tf_layers is True. In that case, it causes variables to be stored as dtype self.variable_type, then casted to the requested dtype, instead of directly storing the variable as the requested dtype." 11811,bfloat16_scope,tensorflow/tensorflow/python/tpu/bfloat16.py,73,function,"Scope class for bfloat16 variables so that the model uses custom getter. This enables variables to be read as bfloat16 type when using get_variable." 11812,BFloat16ScopeTest,tensorflow/tensorflow/python/tpu/bfloat16_test.py,32,class, 11813,_TextLineDataset,tensorflow/tensorflow/python/tpu/datasets.py,31,function, 11814,_TFRecordDataset,tensorflow/tensorflow/python/tpu/datasets.py,37,function, 11815,StreamingFilesDataset,tensorflow/tensorflow/python/tpu/datasets.py,50,function,"StreamingFilesDataset constructs a dataset to stream from workers (GCE VM). Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read files local to your GCE VM. In order to train using files stored on your local VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset helper to generate a dataset to feed your Cloud TPU with files from your GCE VM. The resulting dataset may return an OutOfRangeError if there are no files found as a result of the fileglob expansion. Note: StreamingFilesDataset assumes that the session is using a TPUClusterResolver and has therefore a worker and a coordinator job. File loading will be done on the coordinator job. Args: files: A string glob to match files, or a `tf.data.Dataset` generating file names. filetype: A string (one of 'tfrecord', or 'textline') or a single-argument TensorFlow function that when given a filename returns a dataset. file_reader_job: An optional string that corresponds to the job that should perform the file reads. worker_job: An optional string that corresponds to the job that should process the tensors (i.e. your GPU or TPU worker). num_epochs: The number of epochs through the training set that should be generated. By default, it will repeat infinitely. filename_shuffle_buffer_size: An optional integer whose value controls the shuffling of the file names. If you would like to read from the files in the same order, set to 0 or False. num_parallel_reads: An optional integer controlling the number of files to read from concurrently. (Set to 1 for no parallelism.) batch_transfer_size: An optional integer controlling the batching used to amortize the remote function invocation overhead. Set to a very large number to increase throughput. Set to a very small number to reduce memory consumption. Set to False to skip batching. sloppy: (Optional.) If `False`, read input data while maintaining a deterministic order. (This may have significant performance impacts.) sloppy defaults to: True. Returns: A `tf.data.Dataset` with an infinite stream of elements generated by a parallel interleaving of the set of files matched (or generated) by `files` with a type is the output of the dataset specified by `filetype`. Raises: ValueError: if any argument is not of the expected type." 11816,DatasetsTest,tensorflow/tensorflow/python/tpu/datasets_test.py,41,class, 11817,_compute_task_and_cores_to_replicas,tensorflow/tensorflow/python/tpu/device_assignment.py,34,function,Computes a nested dict which maps task and logical core to replicas. 11818,DeviceAssignment,tensorflow/tensorflow/python/tpu/device_assignment.py,60,class,"Mapping from logical cores in a computation to the physical TPU topology. Prefer to use the `DeviceAssignment.build()` helper to construct a `DeviceAssignment`; it is easier if less flexible than constructing a `DeviceAssignment` directly." 11819,_open_ring_2d,tensorflow/tensorflow/python/tpu/device_assignment.py,179,function,"Ring-order of a X by Y mesh, with a fixed Z coordinate. For example, in a 4x4 mesh, this returns the following order. 0 -- 1 -- 2 -- 3 | | | | 15-- 6 -- 5 -- 4 | | | | 14-- 7 -- 8 -- 9 | | | | 13-- 12-- 11-- 10 Note that chip 0 is not included in the output. Args: x_size: An integer represents the mesh size in the x-dimension. Must be larger than 1. y_size: An integer represents the mesh size in the y-dimension. Must be larger than 1. z_coord: An integer represents the z-coordinate to use for the chips in the ring. Returns: A list of (x,y,z) triples in ring order." 11820,_ring_3d,tensorflow/tensorflow/python/tpu/device_assignment.py,215,function,"Ring-order of a X by Y by Z mesh. Constructs the 3d ring from 2d rings that are stacked in the Z dimension and joined in one corner. z == 0: 0 -- 1 -- 2 -- 3 | | | | 15 - 6 -- 5 -- 4 | | | | 14 - 7 -- 8 -- 9 | | | | 13 - 12 - 11 - 10 z == 1: 63 - 30 - 29 - 28 | | | | 16 - 25 - 26 - 27 | | | | 17 - 24 - 23 - 22 | | | | 18 - 19 - 20 - 21 z == 2: 62 - 31 - 32 - 33 | | | | 45 - 36 - 35 - 34 | | | | 44 - 37 - 38 - 39 | | | | 43 - 42 - 41 - 40 z == 3: 61 - 60 - 59 - 58 | | | | 46 - 55 - 56 - 57 | | | | 47 - 54 - 53 - 52 | | | | 48 - 49 - 50 - 51 Args: x_size: An integer represents the mesh size in the x-dimension. Must be larger than 1. y_size: An integer represents the mesh size in the y-dimension. Must be larger than 1. z_size: An integer represents the mesh size in the z-dimension. Must be larger than 1. For example, in a 4x4x4 mesh, this returns the following order. Returns: A list of (x,y,z) triples in ring order." 11821,device_assignment,tensorflow/tensorflow/python/tpu/device_assignment.py,316,function,"Computes a device_assignment of a computation across a TPU topology. Attempts to choose a compact grid of cores for locality. Returns a `DeviceAssignment` that describes the cores in the topology assigned to each core of each replica. `computation_shape` and `computation_stride` values should be powers of 2 for optimal packing. Args: topology: A `Topology` object that describes the TPU cluster topology. To obtain a TPU topology, evaluate the `Tensor` returned by `initialize_system` using `Session.run`. Either a serialized `TopologyProto` or a `Topology` object may be passed. Note: you must evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor` here. computation_shape: A rank 1 int32 numpy array with size equal to the topology rank, describing the shape of the computation's block of cores. If None, the `computation_shape` is `[1] * topology_rank`. computation_stride: A rank 1 int32 numpy array of size `topology_rank`, describing the inter-core spacing of the `computation_shape` cores in the TPU topology. If None, the `computation_stride` is `[1] * topology_rank`. num_replicas: The number of computation replicas to run. The replicas will be packed into the free spaces of the topology. Returns: A DeviceAssignment object, which describes the mapping between the logical cores in each computation replica and the physical cores in the TPU topology. Raises: ValueError: If `topology` is not a valid `Topology` object. ValueError: If `computation_shape` or `computation_stride` are not 1D int32 numpy arrays with shape [3] where all values are positive. ValueError: If computation's replicas cannot fit into the TPU topology." 11822,embedding_column,tensorflow/tensorflow/python/tpu/feature_column.py,55,function,"TPU embedding_column for `tf.feature_column.embedding_column`. Note that the interface for TPU embedding_column is different from the non-TPU version. The following args available for the non-TPU version are NOT supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable. Args: categorical_column: A categorical_column returned from categorical_column_with_identity, weighted_categorical_column, categorical_column_with_vocabulary_file, categorical_column_with_vocabulary_list, sequence_categorical_column_with_identity, sequence_categorical_column_with_vocabulary_file, sequence_categorical_column_with_vocabulary_list dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row for a non-sequence column. For more information, see `tf.feature_column.embedding_column`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.compat.v1.truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. max_sequence_length: An non-negative integer specifying the max sequence length. Any sequence shorter then this will be padded with 0 embeddings and any sequence longer will be truncated. This must be positive for sequence features and 0 for non-sequence features. learning_rate_fn: A function that takes global step and returns learning rate for the embedding table. If you intend to use the same learning rate for multiple embedding tables, please ensure that you pass the exact same python function to all calls of embedding_column, otherwise performence may suffer. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: A _TPUEmbeddingColumn. Raises: ValueError: if `dimension` not > 0. ValueError: if `initializer` is specified but not callable. TypeError: if categorical_column is not a supported type." 11823,shared_embedding_columns,tensorflow/tensorflow/python/tpu/feature_column.py,161,function,"List of dense columns that convert from sparse, categorical input. Note that the interface for TPU embedding_column is different from the non-TPU version. The following args available for the non-TPU version are NOT supported: ckpt_to_load_from, tensor_name_in_ckp, max_norm and trainable. Args: categorical_columns: A list of categorical_columns returned from categorical_column_with_identity, weighted_categorical_column, categorical_column_with_vocabulary_file, categorical_column_with_vocabulary_list, sequence_categorical_column_with_identity, sequence_categorical_column_with_vocabulary_file, sequence_categorical_column_with_vocabulary_list dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row for a non-sequence column. For more information, see `tf.feature_column.embedding_column`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. shared_embedding_collection_name: Optional name of the collection where shared embedding weights are added. If not given, a reasonable name will be chosen based on the names of `categorical_columns`. This is also used in `variable_scope` when creating shared embedding weights. max_sequence_lengths: An list of non-negative integers, either None or empty or the same length as the argument categorical_columns. Entries corresponding to non-sequence columns must be 0 and entries corresponding to sequence columns specify the max sequence length for the column. Any sequence shorter then this will be padded with 0 embeddings and any sequence longer will be truncated. learning_rate_fn: A function that takes global step and returns learning rate for the embedding table. If you intend to use the same learning rate for multiple embedding tables, please ensure that you pass the exact same python function to all calls of shared_embedding_columns, otherwise performence may suffer. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: A _TPUEmbeddingColumn. Raises: ValueError: if `dimension` not > 0. ValueError: if `initializer` is specified but not callable. ValueError: if `max_sequence_lengths` is specified and not the same length as `categorical_columns`. ValueError: if `max_sequence_lengths` is positive for a non sequence column or 0 for a sequence column." 11824,_TPUBaseEmbeddingColumn,tensorflow/tensorflow/python/tpu/feature_column.py,294,class,Base class for TPU Embedding Column. 11825,_TPUEmbeddingColumn,tensorflow/tensorflow/python/tpu/feature_column.py,361,class,Core Embedding Column. 11826,_TPUSharedEmbeddingColumn,tensorflow/tensorflow/python/tpu/feature_column.py,494,class,Core Shared Embedding Column. 11827,_record_variable_scope_and_name,tensorflow/tensorflow/python/tpu/feature_column.py,628,function,Add embedding variable name and scope to collection. 11828,_is_running_on_cpu,tensorflow/tensorflow/python/tpu/feature_column.py,661,function,Returns True if the current context is CPU model. 11829,get_sequence_length_feature_key_name_from_feature_key_name,tensorflow/tensorflow/python/tpu/feature_column.py,666,function,"Gets the name of the sequence length feature from that of the base feature. Args: feature_name: The feature key of a sequence column. Returns: A string which is the feature key for the associated feature length column." 11830,split_sequence_columns,tensorflow/tensorflow/python/tpu/feature_column.py,678,function,"Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns. For use in a TPUEstimator model_fn function. E.g. def model_fn(features): sequence_columns, feature_columns = ( tf.tpu.feature_column.split_sequence_columns(feature_columns)) input = tf.feature_column.input_layer( features=features, feature_columns=feature_columns) sequence_features, sequence_lengths = ( tf.contrib.feature_column.sequence_input_layer( features=features, feature_columns=sequence_columns)) Args: feature_columns: A list of _TPUEmbeddingColumns to split. Returns: Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the second is the non-sequence columns." 11831,_initialized_session,tensorflow/tensorflow/python/tpu/feature_column_test.py,37,function, 11832,EmbeddingColumnTest,tensorflow/tensorflow/python/tpu/feature_column_test.py,44,class, 11833,SharedEmbeddingColumnTest,tensorflow/tensorflow/python/tpu/feature_column_test.py,168,class, 11834,EmbeddingDevice,tensorflow/tensorflow/python/tpu/feature_column_v2.py,48,class, 11835,embedding_column_v2,tensorflow/tensorflow/python/tpu/feature_column_v2.py,55,function,"TPU version of `tf.compat.v1.feature_column.embedding_column`. Note that the interface for `tf.tpu.experimental.embedding_column` is different from that of `tf.compat.v1.feature_column.embedding_column`: The following arguments are NOT supported: `ckpt_to_load_from`, `tensor_name_in_ckpt`, `max_norm` and `trainable`. Use this function in place of `tf.compat.v1.feature_column.embedding_column` when you want to use the TPU to accelerate your embedding lookups via TPU embeddings. ``` column = tf.feature_column.categorical_column_with_identity(...) tpu_column = tf.tpu.experimental.embedding_column(column, 10) ... def model_fn(features): dense_feature = tf.keras.layers.DenseFeature(tpu_column) embedded_feature = dense_feature(features) ... estimator = tf.estimator.tpu.TPUEstimator( model_fn=model_fn, ... embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec( column=[tpu_column], ...)) ``` Args: categorical_column: A categorical column returned from `categorical_column_with_identity`, `weighted_categorical_column`, `categorical_column_with_vocabulary_file`, `categorical_column_with_vocabulary_list`, `sequence_categorical_column_with_identity`, `sequence_categorical_column_with_vocabulary_file`, `sequence_categorical_column_with_vocabulary_list` dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row for a non-sequence column. For more information, see `tf.feature_column.embedding_column`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.compat.v1.truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. max_sequence_length: An non-negative integer specifying the max sequence length. Any sequence shorter then this will be padded with 0 embeddings and any sequence longer will be truncated. This must be positive for sequence features and 0 for non-sequence features. learning_rate_fn: A function that takes global step and returns learning rate for the embedding table. If you intend to use the same learning rate for multiple embedding tables, please ensure that you pass the exact same python function to all calls of embedding_column, otherwise performence may suffer. embedding_lookup_device: The device on which to run the embedding lookup. Valid options are ""cpu"", ""tpu_tensor_core"", and ""tpu_embedding_core"". If specifying ""tpu_tensor_core"", a tensor_core_shape must be supplied. If not specified, the default behavior is embedding lookup on ""tpu_embedding_core"" for training and ""cpu"" for inference. Valid options for training : [""tpu_embedding_core"", ""tpu_tensor_core""] Valid options for serving : [""cpu"", ""tpu_tensor_core""] For training, tpu_embedding_core is good for large embedding vocab (>1M), otherwise, tpu_tensor_core is often sufficient. For serving, doing embedding lookup on tpu_tensor_core during serving is a way to reduce host cpu usage in cases where that is a bottleneck. tensor_core_shape: If supplied, a list of integers which specifies the intended dense shape to run embedding lookup for this feature on TensorCore. The batch dimension can be left None or -1 to indicate a dynamic shape. Only rank 2 shapes currently supported. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: A `_TPUEmbeddingColumnV2`. Raises: ValueError: if `dimension` not > 0. ValueError: if `initializer` is specified but not callable." 11836,shared_embedding_columns_v2,tensorflow/tensorflow/python/tpu/feature_column_v2.py,211,function,"TPU version of `tf.compat.v1.feature_column.shared_embedding_columns`. Note that the interface for `tf.tpu.experimental.shared_embedding_columns` is different from that of `tf.compat.v1.feature_column.shared_embedding_columns`: The following arguments are NOT supported: `ckpt_to_load_from`, `tensor_name_in_ckpt`, `max_norm` and `trainable`. Use this function in place of tf.compat.v1.feature_column.shared_embedding_columns` when you want to use the TPU to accelerate your embedding lookups via TPU embeddings. ``` column_a = tf.feature_column.categorical_column_with_identity(...) column_b = tf.feature_column.categorical_column_with_identity(...) tpu_columns = tf.tpu.experimental.shared_embedding_columns( [column_a, column_b], 10) ... def model_fn(features): dense_feature = tf.keras.layers.DenseFeature(tpu_columns) embedded_feature = dense_feature(features) ... estimator = tf.estimator.tpu.TPUEstimator( model_fn=model_fn, ... embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec( column=tpu_columns, ...)) ``` Args: categorical_columns: A list of categorical columns returned from `categorical_column_with_identity`, `weighted_categorical_column`, `categorical_column_with_vocabulary_file`, `categorical_column_with_vocabulary_list`, `sequence_categorical_column_with_identity`, `sequence_categorical_column_with_vocabulary_file`, `sequence_categorical_column_with_vocabulary_list` dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row for a non-sequence column. For more information, see `tf.feature_column.embedding_column`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(dimension)`. shared_embedding_collection_name: Optional name of the collection where shared embedding weights are added. If not given, a reasonable name will be chosen based on the names of `categorical_columns`. This is also used in `variable_scope` when creating shared embedding weights. max_sequence_lengths: An list of non-negative integers, either None or empty or the same length as the argument categorical_columns. Entries corresponding to non-sequence columns must be 0 and entries corresponding to sequence columns specify the max sequence length for the column. Any sequence shorter then this will be padded with 0 embeddings and any sequence longer will be truncated. learning_rate_fn: A function that takes global step and returns learning rate for the embedding table. If you intend to use the same learning rate for multiple embedding tables, please ensure that you pass the exact same python function to all calls of shared_embedding_columns, otherwise performence may suffer. embedding_lookup_device: The device on which to run the embedding lookup. Valid options are ""cpu"", ""tpu_tensor_core"", and ""tpu_embedding_core"". If specifying ""tpu_tensor_core"", a tensor_core_shape must be supplied. Defaults to ""cpu"". If not specified, the default behavior is embedding lookup on ""tpu_embedding_core"" for training and ""cpu"" for inference. Valid options for training : [""tpu_embedding_core"", ""tpu_tensor_core""] Valid options for serving : [""cpu"", ""tpu_tensor_core""] For training, tpu_embedding_core is good for large embedding vocab (>1M), otherwise, tpu_tensor_core is often sufficient. For serving, doing embedding lookup on tpu_tensor_core during serving is a way to reduce host cpu usage in cases where that is a bottleneck. tensor_core_shape: If supplied, a list of integers which specifies the intended dense shape to run embedding lookup for this feature on TensorCore. The batch dimension can be left None or -1 to indicate a dynamic shape. Only rank 2 shapes currently supported. use_safe_embedding_lookup: If true, uses safe_embedding_lookup_sparse instead of embedding_lookup_sparse. safe_embedding_lookup_sparse ensures there are no empty rows and all weights and ids are positive at the expense of extra compute cost. This only applies to rank 2 (NxM) shaped input tensors. Defaults to true, consider turning off if the above checks are not needed. Note that having empty rows will not trigger any error though the output result might be 0 or omitted. Returns: A list of `_TPUSharedEmbeddingColumnV2`. Raises: ValueError: if `dimension` not > 0. ValueError: if `initializer` is specified but not callable. ValueError: if `max_sequence_lengths` is specified and not the same length as `categorical_columns`. ValueError: if `max_sequence_lengths` is positive for a non sequence column or 0 for a sequence column." 11837,_TPUEmbeddingColumnV2,tensorflow/tensorflow/python/tpu/feature_column_v2.py,420,class,Core Embedding Column. 11838,_TPUSharedEmbeddingColumnV2,tensorflow/tensorflow/python/tpu/feature_column_v2.py,604,class,Core Shared Embedding Column. 11839,split_sequence_columns_v2,tensorflow/tensorflow/python/tpu/feature_column_v2.py,742,function,"Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns. For use in a TPUEstimator model_fn function. E.g. def model_fn(features): sequence_columns, feature_columns = ( tf.tpu.feature_column.split_sequence_columns(feature_columns)) input = tf.feature_column.input_layer( features=features, feature_columns=feature_columns) sequence_features, sequence_lengths = ( tf.contrib.feature_column.sequence_input_layer( features=features, feature_columns=sequence_columns)) Args: feature_columns: A list of _TPUEmbeddingColumns to split. Returns: Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the second is the non-sequence columns." 11840,sparse_embedding_aggregate_slice,tensorflow/tensorflow/python/tpu/feature_column_v2.py,778,function,"Uses XLA's dynamic slice operations to perform embedding lookups. From third_party/cloud_tpu/models/movielens/tpu_embedding.py Args: params: Tensor of embedding table. Rank 2 (table_size x embedding dim) values_and_values_mask: is a two-tuple that contains: values - Tensor of embedding indices. Rank 2 (batch x n_indices) values_mask - Tensor of mask / weights. Rank 2 (batch x n_indices) combiner: The combiner to use for the embedding lookup. Currently supports 'sum' and 'mean'. name: Optional name scope for created ops Returns: Rank 2 tensor of aggregated (per batch element) embedding vectors. Raises: ValueError: Combiner is not supported." 11841,pad_sparse_embedding_lookup_indices,tensorflow/tensorflow/python/tpu/feature_column_v2.py,832,function,"Creates statically-sized Tensors containing indices and weights. From third_party/cloud_tpu/models/movielens/tpu_embedding.py Also computes sparse_indices.values % embedding_table_size, for equivalent functionality to sparse_column_with_integerized_feature. The returned padded weight Tensor also doubles as a mask indicating which values in the returned padded indices Tensor are indices versus padded zeros. Args: sparse_indices: SparseTensor of embedding lookup indices. padded_size: Number of columns of the returned Tensors. Indices which fall out of bounds will be truncated to the padded size. Returns: (sparse_indices.values padded to the specified size, a mask the same size as the returned padded values in which 0s indicate padded locations and 1s (or values from sparse_weights) indicate actual values)" 11842,_check_invalid_cases,tensorflow/tensorflow/python/tpu/feature_column_v2.py,870,function,Checks for invalid embedding_lookup_device configurations. 11843,_TPUDeviceSpecificEmbeddingColumnV2,tensorflow/tensorflow/python/tpu/feature_column_v2.py,884,class,TPUEmbeddingColumn which allows serving on TensorCore. 11844,_TPUSharedDeviceSpecificEmbeddingColumnV2,tensorflow/tensorflow/python/tpu/feature_column_v2.py,1011,class,TPUSharedEmbeddingColumnV2 which allows serving on TensorCore. 11845,_initialized_session,tensorflow/tensorflow/python/tpu/feature_column_v2_test.py,40,function, 11846,EmbeddingColumnTestV2,tensorflow/tensorflow/python/tpu/feature_column_v2_test.py,47,class, 11847,SharedEmbeddingColumnTestV2,tensorflow/tensorflow/python/tpu/feature_column_v2_test.py,197,class, 11848,DeviceSpecificEmbeddingColumnTestV2,tensorflow/tensorflow/python/tpu/feature_column_v2_test.py,375,class, 11849,CloudTPUPreemptedHook,tensorflow/tensorflow/python/tpu/preempted_hook.py,31,class,"The SessionRunHook for preemptible Cloud TPUs. This is an implementation of SessionRunHook for the pre-emptible Google Cloud TPU service. It attempts to close the session if the TPU is preempted, and exits the coordinator process if the session cannot be closed." 11850,_TPUPollingThread,tensorflow/tensorflow/python/tpu/preempted_hook.py,51,class,"A thread that polls the state of a TPU node. When the node transitions into a TERMINAL state (PREEMPTED, TERMINATED) that's considered as not recoverable by the underlying infrastructure, it attempts to close the session, and exits the entire process if the session.close() stucks." 11851,CoordinatorResetError,tensorflow/tensorflow/python/tpu/session_support.py,41,class,Raised when the monitored session should reset. 11852,_clone_session,tensorflow/tensorflow/python/tpu/session_support.py,49,function, 11853,WorkerHeartbeatManager,tensorflow/tensorflow/python/tpu/session_support.py,56,class,Manages the status/heartbeat monitor for a set of workers. 11854,all_worker_devices,tensorflow/tensorflow/python/tpu/session_support.py,163,function,Return a list of devices for each worker in the system. 11855,WatchdogManager,tensorflow/tensorflow/python/tpu/session_support.py,178,class,"Configures worker watchdog timer and handles periodic pings. Usage: # Ping workers every minute, shutting down workers if they haven't received # a ping after 1 hour. watchdog_manager = WatchdogManager( ping_interval=60, shutdown_timeout=3600 ) # Use as a context manager, resetting watchdog on context exit: with watchdog_manager: session.run(...) # Or setup globally; watchdog will remain active until program exit. watchdog_manager.configure_and_run()" 11856,start_worker_watchdog,tensorflow/tensorflow/python/tpu/session_support.py,287,function,Start global worker watchdog to shutdown workers on coordinator exit. 11857,stop_worker_watchdog,tensorflow/tensorflow/python/tpu/session_support.py,301,function,Stop global worker watchdog. 11858,GracefulShutdownHook,tensorflow/tensorflow/python/tpu/session_support.py,309,class,"Session hook that watches for shutdown events. If a shutdown is indicated, `saver.save(checkpoint_prefix)` is executed, and a SystemShutdown exception is raised to terminate the main session. If `saver` is None the `SAVERS` collection will be read to find a saver. `on_shutdown_hooks` is an optional list of functions that should be called after checkpointing. The function is called with (`run_context`, `all_workers`, `lame_workers`). If `heartbeat_group` is not specified, it will default to all CPU workers in the system." 11859,ResetComputation,tensorflow/tensorflow/python/tpu/session_support.py,409,class,"Hook to reset a TPUEstimator computation loop. This hook shuts down all workers and resets the monitored session loop by throwing a CoordinatorResetError." 11860,ShutdownLameWorkers,tensorflow/tensorflow/python/tpu/session_support.py,427,class,"Shutdown lamed workers. Processing will continue normally (typically by waiting for the down workers to be restarted)." 11861,ShutdownAllWorkers,tensorflow/tensorflow/python/tpu/session_support.py,441,class,"Shutdown all workers. Processing will continue normally (typically by waiting for the down workers to be restarted)." 11862,set_parameters,tensorflow/tensorflow/python/tpu/tensor_tracer.py,106,function,"Enables tensor tracer and sets its parameters. Example usage: tensor_tracer_parameters = {'trace_dir': '/usr/tmp/trace_dir', 'trace_mode': 'norm', 'report_file': '/usr/tmp/trace_dir/report.all'} tensor_tracer.set_parameters(tensor_tracer_parameters) This sets up the parameters for tensor tracer. A call to tensor tracer as below is necessary to enable debugging on CPUs and GPUs. On TPUs below can be skipped as this call is hooked into tpu.rewrite. tt = tensor_tracer.TensorTracer() loss = tt.trace_cpu(tf.get_default_graph(), tensor_fetches=loss) Args: tensor_tracer_params: Tensor tracer parameter dictionary. Below gives examples of these parameters: See tensor_tracer_report.py for all parameters. - enable: If set, tensor tracer will be enabled. Calling enable_tensor_tracer automatically adds this parameters. - trace_mode: The trace_mode to be used by tensor tracer. These include: - summary: Collects multiple statistics for traced tensors, and writes them a summary file that can be visualized using tensorboard. This mode currently only works for TPUEstimator. It can be also be used for other models, but outfeed must be handled by the user. - norm: Collects norm of each traced tensor and writes them into a text file pointed by 'trace_dir' flag. (Default mode). - nan-inf: Checks the existince of NaNs and Infs in the tensor, and writes a boolean value to a text file pointed by 'trace_dir' flag. Note that 'norm' mode can also capture this information with more numerical info. - max-abs: Collects the absolute max for each traced tensors and writes it into a text file pointed by 'trace_dir' flag. - full-tensor: Writes the full tensor content of the traced tensors into a text file pointed by 'trace_dir' flag. - part-tensor: Writes a part of the tensor content of the traced tensors into a text file pointed by 'trace_dir' flag. - full_tensor_summary: Writes the full tensors as binary event files. The outputs can be read using: trace = tensor_tracer.read_tensor_tracer_event_file(event_file_path) - trace-back-if-nan: This mode will write the full tensor content only when the tensor has a NaN or Inf in it. It is possible to also print the inputs coming to this op using 'trace_stack_size' parameter. E.g., if trace_stack_size=2, then the tensor with NaN/Inf, its inputs, and its inputs' inputs will also be printed. - report_file: Path to the metadata file that is written during graph construction. If not set, metadata will be printed to stdout during graph construction. - trace_dir: Path where the execution traces will be written during the graph execution. If not set, trace will be printed to stderr. - trace_level: Tensor tracer aims to trace everything it can. This introduces some overhead on graph execution and graph compilation times. Using trace_level parameter, it is possible to trace operation based on their priorities. For example, - trace_level=7 is the highest trace_level, in which every op is traced. - trace_level=6 will skip constant operations such as tf.constant. - trace_level=5 will skip less important ops such as tf.identities. - The default trace_level=3, that will skip concat ops, or random number generators. - To reduce the graph compile time overhead, trace_level can be set to 0, that will skip additions, and substractions, and multiplications as well. - excluded_opnames: If set, any matching op name will not be traced. excluded_opnames can be set as a regular expression. E.g, excluded_opnames=.* will exclude everything. - excluded_optypes: If set, any matching op type will not be traced. excluded_optypes can be set as a regular expression. E.g, excluded_optypes=.* will exclude everything. excluded_optypes=MatMul will exclude all MatMul ops from tracing. - included_opnames: If set, any matching op name will be forced to be traced. included_opnames can be set as a regular expression. E.g, '--included_opnames=some_op --excluded_opname=*.' will only trace some_op. - included_optypes: If set, any matching op type will be forced to be traced. included_optypes can be set as a regular expression. E.g, '--included_optypes=some_op_type --excluded_optypes=*.' will trace only the ops with type 'some_op_type' Advanced Flags: - compact_trace: If not set, statistics per tensor is written as soon as they are executed. If set, then statistics for all traced tensors will be stored in a cache and will be written only once per step. This flag is ignored for full-tensor and part-tensor trace modes. If the trace_dir is a remote directory, compact_trace will be forced. - trace_scalar: Scalar values are not traced by default. If this flag is set, scalar values will also be traced. - included_cores: Accepts a list string. Tracing will only be dumped for these cores. E.g, setting it to '[0,2,4,6]' will result in a trace only for those cores. - op_range: In the form of '%d:%d' that limits the tracing to the ops within this limit. --op_range='5:10' will trace only the ops that have topological order between 5-10. - trace_before_included_ops: If set to a number-k, it will also trace distance-k inputs of each traced tensor. E.g., k=1, then in addition to each traced_tensor, their input tensors will also be traced. - trace_after_included_ops: Same as trace_before_included_ops, where it will also trace distance-k outputs of each traced tensor. - submode: 'brief' or 'detailed'. If the trace mode is not compact, brief mode will print only the id of each traced tensor to save some space. 'detailed' mode prints the full tensor name. - trace_stack_size: Used only for trace_mode=trace-back-if-nan mode. It determines how many ops to print back from a nan op. E.g, op4 -> op3 -> op2 -> op1 -> op0, if op0 has a NaN and trace_stack_size is 1, the result of op1 will also be printed. trace_stack_size is 2, the result of op1 and op2 will be printed. - use_fingerprint_subdirectory: The trace directory will be chosen as using the fingerprint of the trace metadata under the provided trace_dir." 11863,op_priority,tensorflow/tensorflow/python/tpu/tensor_tracer.py,220,function,"Returns the priority of the op. If the priority of the op is k, it will be traced if trace_level>=k. Args: op_type: String name of the operation type. Returns: Integer value corresponding the priority of the op." 11864,read_tensor_tracer_event_file,tensorflow/tensorflow/python/tpu/tensor_tracer.py,263,function,"Reads the event file written by tensor tracer. This can be used to read the full tensors written into binary event files by by TensorTracer with trace_mode=full_tensor_summary. Example usage: result_dict = tensor_tracer.read_tensor_tracer_event_file(event_file_path) for step, tensor_dict in result_dict.items(): for tensor_name, full_tensor_content in tensor_dict.items(): logging.info(tensor_name, full_tensor_content) Args: event_file: Path to the event file that contains only tensor tracer events. Returns: An event dictionary in the form of {step_number: {tensor_name: tensor_content}} Raises: ValueError: If an unexpected trace is found." 11865,trace_tensor,tensorflow/tensorflow/python/tpu/tensor_tracer.py,307,function,"Programmatic interface to trace a tensor with Tensor Tracer. Tensor Tracer, by default, traces all tensors in the execution. This function can be used to limit traced tensors. If this function is called for a subset of the tensors, only those will be traced. For example, Tensor Traacer will only trace c below. c = tf.MatMul(a, b) tensor_tracer.trace_tensor(c) d = tf.add(c, 1) Args: tensor: the tensor object for which the tracing is requested. tracepoint_name: an optional tensor tracepoint name string. A tracepoint name is an Tensor Tracer internal name for the tensor. It is useful when comparing equivalent traces from different models that have different tensor namings. Equivalent tensors (with different names) can be mapped to each other by assigning a common tracepoint_name. Returns: The provided tensor." 11866,keras_layer_tracepoint,tensorflow/tensorflow/python/tpu/tensor_tracer.py,337,function,"An interface for adding the tensor outputs of a keras layer. Encapsulates trace_tensor. Args: layer: A keras layer. checkpoint_name: a string name for the checkpoint. This name has to be a unique name if used within model comparison. The tensors that have the same checkpoint identifier is compared in model comparison. Returns: The provided layer." 11867,_trace_files_need_precreated,tensorflow/tensorflow/python/tpu/tensor_tracer.py,368,function,Return True if trace files must be pre-created by users. 11868,TensorTracer,tensorflow/tensorflow/python/tpu/tensor_tracer.py,386,class,"A software construct for tracing tensor values in a TF graph. This utility is disabled by default. It is hooked into tpu.rewrite, so it can easily be enabled on TPUs by setting the TENSOR_TRACER_FLAGS env variable as below without a code change. export TENSOR_TRACER_FLAGS=""--enable=1"" Below is the use example to enable it on CPUs or GPUs, or for more advance use cases on TPUs. a = x + 1 b = a * 2 rs = tf.reduce_sum(b) tensor_tracer.set_parameters({'trace_dir': 'path/to/trace_dir', 'report_file: 'path/to/report/file'}) tt = tensor_tracer.TensorTracer() if on_tpu: rs = tt.trace_tpu(tf.get_default_graph(), tensor_fetches=rs) else: rs = tt.trace_cpu(tf.get_default_graph(), tensor_fetches=rs) session.run(rs) If it is enabled, it will trace the output tensor values of selected Ops in the graph. It has two outputs: (1) the traces and (2) a report. The traces are dumped to a specified directory during the graph execution, while the report is dumped during the graph construction. By passing options via the env variable, users can change: (1) the trace mode (e.g., detecting NaN/Inf, printing partial or full tensor values) (2) which Ops to be traced (via op.name or op.type) (3) output trace file path." 11869,TTParameters,tensorflow/tensorflow/python/tpu/tensor_tracer_flags.py,103,class,A class that handles the parameters of Tensor Tracer. 11870,report_proto_path,tensorflow/tensorflow/python/tpu/tensor_tracer_report.py,58,function,"Returns the path where report proto should be written. Args: trace_dir: String denoting the trace directory. Returns: A string denoting the path to the report proto." 11871,topological_sort,tensorflow/tensorflow/python/tpu/tensor_tracer_report.py,70,function,"Performs topological sort on the given graph. Args: g: the graph. Returns: A pair where the first element indicates if the topological sort succeeded (True if there is no cycle found; False if a cycle is found) and the second element is either the sorted list of nodes or the cycle of nodes found." 11872,TensorTracerConfig,tensorflow/tensorflow/python/tpu/tensor_tracer_report.py,136,class,Tensor Tracer config object. 11873,TensorTraceOrder,tensorflow/tensorflow/python/tpu/tensor_tracer_report.py,147,class,Class that is responsible from storing the trace-id of the tensors. 11874,sort_tensors_and_ops,tensorflow/tensorflow/python/tpu/tensor_tracer_report.py,178,function,Returns a wrapper that has consistent tensor and op orders. 11875,OpenReportFile,tensorflow/tensorflow/python/tpu/tensor_tracer_report.py,203,class,Context manager for writing report file. 11876,proto_fingerprint,tensorflow/tensorflow/python/tpu/tensor_tracer_report.py,223,function, 11877,TTReportHandle,tensorflow/tensorflow/python/tpu/tensor_tracer_report.py,229,class,Utility class responsible from creating a tensor tracer report. 11878,_tpu_device_name,tensorflow/tensorflow/python/tpu/topology.py,28,function,Returns the device name for the TPU `device` on `task` of `job`. 11879,_tpu_host_device_name,tensorflow/tensorflow/python/tpu/topology.py,36,function,Returns the device name for the CPU device on `task` of `job`. 11880,Topology,tensorflow/tensorflow/python/tpu/topology.py,45,class,"Describes a set of TPU devices. Represents both the shape of the physical mesh, and the mapping between TensorFlow TPU devices to physical mesh coordinates." 11881,TopologyTest,tensorflow/tensorflow/python/tpu/topology_test.py,26,class, 11882,_tpu_system_device_name,tensorflow/tensorflow/python/tpu/tpu.py,90,function,Returns the device name for the TPU_SYSTEM device of `job`. 11883,initialize_system,tensorflow/tensorflow/python/tpu/tpu.py,99,function,"Initializes a distributed TPU system for use with TensorFlow. Args: embedding_config: If not None, a `TPUEmbeddingConfiguration` proto describing the desired configuration of the hardware embedding lookup tables. If embedding_config is None, no hardware embeddings can be used. job: The job (the XXX in TensorFlow device specification /job:XXX) that contains the TPU devices that will be initialized. If job=None it is assumed there is only one job in the TensorFlow flock, and an error will be returned if this assumption does not hold. compilation_failure_closes_chips: Set the configuration whether we want to close TPU chips when there is a compilation failure. Returns: A serialized `TopologyProto` that describes the TPU system. Note: the topology must be evaluated using `Session.run` before it can be used." 11884,initialize_system_for_tpu_embedding,tensorflow/tensorflow/python/tpu/tpu.py,137,function,"Initializes a distributed TPU Embedding system for use with TensorFlow. The following two are equivalent: 1. initialize_system() with embedding_config. 2. initialize_system() without embedding_config, then initialize_system_for_tpu_embedding(). initialize_system() should not be called with embedding_config if initialize_system_for_tpu_embedding() is meant to be called later. Args: embedding_config: a `TPUEmbeddingConfiguration` proto describing the desired configuration of the hardware embedding lookup tables. job: The job (the XXX in TensorFlow device specification /job:XXX) that contains the TPU devices that will be initialized. If job=None it is assumed there is only one job in the TensorFlow flock, and an error will be returned if this assumption does not hold. Returns: A no-op." 11885,shutdown_system,tensorflow/tensorflow/python/tpu/tpu.py,164,function,"Shuts down a running a distributed TPU system. Args: job: The job (the XXX in TensorFlow device specification /job:XXX) that contains the TPU devices that will be shutdown. If job=None it is assumed there is only one job in the TensorFlow flock, and an error will be returned if this assumption does not hold." 11886,core,tensorflow/tensorflow/python/tpu/tpu.py,179,function,"Returns the device name for a core in a replicated TPU computation. Args: num: the virtual core number within each replica to which operators should be assigned. Returns: A device name, suitable for passing to `tf.device()`." 11887,_enclosing_tpu_context_and_graph,tensorflow/tensorflow/python/tpu/tpu.py,191,function,Returns the TPUReplicateContext and its associated graph. 11888,is_tpu_strategy,tensorflow/tensorflow/python/tpu/tpu.py,208,function, 11889,_enclosing_tpu_device_assignment,tensorflow/tensorflow/python/tpu/tpu.py,214,function, 11890,tpu_replicated_input_resolver,tensorflow/tensorflow/python/tpu/tpu.py,224,function,Replaces TPUReplicatedInput outputs with its inputs in resource_inputs. 11891,TPUReplicateContext,tensorflow/tensorflow/python/tpu/tpu.py,256,class,"A `ControlFlowContext` for nodes inside a TPU computation. The primary role of `TPUReplicateContext` is to mark operators inside a tpu.replicate() computation with the attribute ""_tpu_replicate=XYZ"", where XYZ is a unique name. We use a `ControlFlowContext` to perform the annotation since it integrates with Tensorflow constructs like ResourceVariables. For example, if a `ResourceVariable` is constructed inside a tpu.replicate() block, the `ResourceVariable` implementation can use `with ops.control_dependencies(None)` to build the variable's definition outside the replicated computation." 11892,OutsideCompilationV2Context,tensorflow/tensorflow/python/tpu/tpu.py,652,class,"The context for outside compilation in Tensorflow 2.0. Every op added in this context will be assigned an _xla_outside_compilation attribute." 11893,outside_compilation,tensorflow/tensorflow/python/tpu/tpu.py,684,function,"Builds part of a computation outside any current TPU replicate scope. `tf.tpu.outside_compilation()` is used to run ops in `computation` on CPU instead of running on TPU. For example, users can run ops that are not supported on TPU's (e.g. tf.summary.write()) by explicitly placing those ops on CPU's. Below usage of outside compilation will place ops in `computation_with_string_ops` on CPU. Example usage: ```python def computation_with_string_ops(x): # strings types are not supported on TPU's and below ops must # run on CPU instead. output = tf.strings.format('1{}', x) return tf.strings.to_number(output) def tpu_computation(): # Expected output is 11. output = tf.tpu.outside_compilation(computation_with_string_ops, 1) ``` Outside compilation should be called inside TPUReplicateContext. That is, `tf.tpu.outside_compilation()` should be called inside a function that is passed to `tpu.split_compile_and_replicate()` -- this is implied when outside compilation is invoked inside a function passed to TPUStrategy `run()`. If invoked outside of TPUReplicateContext, then this simply returns the result of `computation`, and therefore, would be a no-op. Note that outside compilation is different from `tf.distribute.experimental.TPUStrategy.merge_call()` as logic in outside compilation is replicated and executed separately for each replica. On the other hand, `merge_call()` requires a `merge_fn` to aggregate the inputs from different replicas and is executed only once. For variables placed in TPU device, which includes variables created inside TPUStrategy scope, outside compilation logic must not include variable read/write. For variables placed on host, which is the case when variables created via TPUEstimator, variable read/write is only allowed if the variable is not accessed by any other ops in the TPU computation. Variable read/write from outside compilation cluster is not visible from TPU computation and vice versa. Therefore, if outside compilation logic contains such host variables read/write ops and if the variables are accessed by TPU computation as well, then this may lead to deadlock. Internally, `tf.tpu.outside_compilation()` adds outside compilation attributes to all ops in `computation`. During later graph pass, these ops with outside compilation attribute is extracted out and replicated into a host-side graph. Inputs to this extract host-side graph is sent from TPU computation graph to host graph via a pair of XlaSendToHost and XlaRecvFromHost ops. Note that using `tf.tpu.outside_compilation()` may result in tensor transfer between TPU and CPU, leading to non-trivial performance impact. Args: computation: A Python function that builds the computation to place on the host. *args: the positional arguments for the computation. **kwargs: the keyword arguments for the computation. Returns: The Tensors returned by computation." 11894,PaddingSpec,tensorflow/tensorflow/python/tpu/tpu.py,806,class,Represents the type of padding policies for tpu.replicate. 11895,XLAOptions,tensorflow/tensorflow/python/tpu/tpu.py,816,class,"XLA compilation options. Attributes: use_spmd_for_xla_partitioning: Boolean. Whether to use XLA's SPMD partitioner instead of MPMD partitioner when compiler partitioning is requested." 11896,replicate,tensorflow/tensorflow/python/tpu/tpu.py,833,function,"Builds a graph operator that runs a replicated TPU computation. Example for the basic usage that `inputs` has static shape: ```python def computation(x): x = x + 1 return tf.math.reduce_mean(x) x = tf.convert_to_tensor([1., 2., 3.]) y = tf.convert_to_tensor([4., 5., 6.]) tf.compat.v1.tpu.replicate(computation, inputs=[[x], [y]]) ``` If the `inputs` has dynamic shapes and you would like to automatically bucketize the inputs to avoid XLA recompilation. See the advanced example below: ```python def computation(x): x = x + 1 return tf.math.reduce_mean(x) # Assume input tensors in two replicas `x` and `y` both have dynamic shape # ([None, 2]). tf.compat.v1.tpu.replicate( computation, inputs=[x, y], maximum_shapes=[tf.TensorShape([None, None])], padding_spec=tf.compat.v1.tpu.PaddingSpec.POWER_OF_TWO) ``` Args: computation: A Python function that builds the computation to replicate. inputs: A list of lists of input tensors or `None` (equivalent to `[[]]`), indexed by `[replica_num][input_num]`. All replicas must have the same number of inputs. Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple of arguments as inputs to computation. device_assignment: If not `None`, a `DeviceAssignment` describing the mapping between logical cores in the computation with physical cores in the TPU topology. Uses a default device assignment if `None`. The `DeviceAssignment` may be omitted if each replica of the computation uses only one core, and there is either only one replica, or the number of replicas is equal to the number of cores in the TPU system. name: (Deprecated) Does nothing. maximum_shapes: A nested structure of tf.TensorShape representing the shape to which the respective component of each input element in each replica should be padded. Any unknown dimensions (e.g. tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like object) will be padded to the maximum size of that dimension over all replicas. The structure of `maximum_shapes` needs to be the same as `inputs[0]`. padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the padding policy when the `inputs` to `tpu.replicate` is dynamic. One usage is to enable automatic bucketizing on the inputs by setting the value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the recompilation in the XLA side. xla_options: An instance of `tpu.XLAOptions` which indicates the options passed to XLA compiler. Use `None` for default options. Returns: A list of outputs, indexed by `[replica_num]` each output can be a nested structure same as what computation() returns with a few exceptions. Exceptions include: 1) None output: a NoOp would be returned which control-depends on computation. 2) Single value output: A tuple containing the value would be returned. 3) Operation-only outputs: a NoOp would be returned which control-depends on computation. TODO(b/121383831): Investigate into removing these special cases. Raises: ValueError: If all replicas do not have equal numbers of input tensors. ValueError: If the number of inputs per replica does not match the number of formal parameters to `computation`. ValueError: If the static `inputs` dimensions don't match with the values given in `maximum_shapes`. ValueError: If the structure of inputs per replica does not match the structure of `maximum_shapes`." 11897,_ceil_to_pow_of_n,tensorflow/tensorflow/python/tpu/tpu.py,939,function,Ceil input `x` to power of `n`. 11898,_pad_all_input,tensorflow/tensorflow/python/tpu/tpu.py,949,function,"Pad all input tensors given padded_shapes. The real shape tensors will be concatenated with the padded original inputs. Args: inputs: The original inputs. padded_shapes: A list of padded shapes for each input. If an entry is None, no padding is performed. padding_spec: An enum specified by `tpu.PaddingSpec`. This describes the padding policy when the `inputs` to `tf.tpu.replicate` is dynamic. One usage is to enable automatic bucketizing on the inputs by setting the value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the recompilation in the XLA side. Returns: The padded inputs and a PaddingMap list which maps the padded input dimension to the real shape argument index." 11899,_flatten_and_filter_composite,tensorflow/tensorflow/python/tpu/tpu.py,1076,function,"For an input, replaced the input by a tuple if the input is composite. If `maybe_composite` is not composite, return the parameter `non_composite_output` otherwise return a tuple which consists of the value of the parameter `composite_output` the same number of times as there are components of the composite tensor. This is useful for computing a mask when flattening nested data with `expand_composites=True`. For example ```python nest.flatten(data, expand_composites=True) ``` and ```python nest.flatten(nest.map( data, lambda x: _flatten_and_filter_composite(x, False, True))) ``` will have the same length and second will be True if the tensor in the first is derived from a expanding a composite tensor. Args: maybe_composite: A value to test for being a composite tensor. non_composite_output: The value to return when `maybe_composite` is not a composite. composite_output: the value to fill the output tuple with if `maybe_composite` is a composite. Returns: `non_composite_output` or a tuple with multiple copies of `composite_output`." 11900,split_compile_and_replicate,tensorflow/tensorflow/python/tpu/tpu.py,1121,function,"Builds graph operators that runs compilation and replicated computation. This is a lower level interface than replicate that returns a separate compile and execute output tensor. In the generated graph the compile op feeds into the execute op and no additional compilation is incurred when running the compile op before the execute op. The compile op returns additional information about the compilation but does not return the compiled program. Args: computation: A Python function that builds the computation to replicate. inputs: A list of lists of input tensors or `None` (equivalent to `[[]]`), indexed by `[replica_num][input_num]`. All replicas must have the same number of inputs. Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple of arguments as inputs to computation. device_assignment: If not `None`, a `DeviceAssignment` describing the mapping between logical cores in the computation with physical cores in the TPU topology. Uses a default device assignment if `None`. The `DeviceAssignment` may be omitted if each replica of the computation uses only one core, and there is either only one replica, or the number of replicas is equal to the number of cores in the TPU system. name: (Deprecated) Does nothing. use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU backends. Currently, only supports a default placement (computation is placed on GPU if one is available, and on CPU if not). maximum_shapes: A nested structure of tf.TensorShape representing the shape to which the respective component of each input element in each replica should be padded. Any unknown dimensions (e.g. tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like object) will be padded to the maximum size of that dimension over all replicas. The structure of `maximum_shapes` needs to be the same as `inputs[0]`. padding_spec: An enum specified by `tf.tpu.PaddingSpec`. This describes the padding policy when the `inputs` to `tf.tpu.replicate` is dynamic. One usage is to enable automatic bucketizing on the inputs by setting the value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the recompilation in the XLA side. xla_options: An instance of `tpu.XLAOptions` which indicates the options passed to XLA compiler. Use `None` for default options. Returns: A list of lists with the first list corresponding to the compile op and the second a list of output tensors, indexed by `[replica_num][output_num]`. Raises: ValueError: If all replicas do not have equal numbers of input tensors. ValueError: If the number of inputs per replica does not match the number of formal parameters to `computation`. ValueError: If the static `inputs` dimensions don't match with the values given in `maximum_shapes`. ValueError: If the structure of inputs per replica does not match the structure of `maximum_shapes`." 11901,_postprocess_flat_outputs,tensorflow/tensorflow/python/tpu/tpu.py,1485,function,"Validates non-flat outputs, add backs device assignments and other attrs. Args: outputs: Output from `computation` inside `tpu.rewrite`. Returns: - Tensors extracted from outputs. - Operations extracted from outputs. - A pack template for use with nest.pack_sequence_as to pack the tensors." 11902,_postprocess_non_flat_outputs,tensorflow/tensorflow/python/tpu/tpu.py,1561,function,"Validates non-flat outputs, add backs device assignments and other attrs. Args: outputs: Output from `computation` inside `tpu.rewrite`. Returns: - Tensors extracted from outputs. - An empty Operations list because Operations are not allowed in non-flat outputs. - A pack template for use with nest.pack_sequence_as to pack the tensors." 11903,split_compile_and_shard,tensorflow/tensorflow/python/tpu/tpu.py,1609,function,"Shards `computation` for parallel execution. `inputs` must be a list of Tensors or None (equivalent to an empty list), each of which has a corresponding split axis (from `input_shard_axes`). Each input is split into `num_shards` pieces along the corresponding axis, and computation is applied to each shard in parallel. Tensors are broadcast to all shards if they are lexically captured by `computation`. e.g., x = tf.constant(7) def computation(): return x + 3 ... = shard(computation, ...) If `outputs_from_all_shards` is true, the outputs from all shards of `computation` are concatenated back together along their `output_shard_axes`. Otherwise, each output is taken from an arbitrary shard. Inputs and outputs of the computation must be at least rank-1 Tensors. Args: computation: A Python function that builds a computation to apply to each shard of the input. inputs: A list of input tensors or None (equivalent to an empty list). Each input tensor has a corresponding shard axes, given by `input_shard_axes`, which must have size divisible by `num_shards`. num_shards: The number of shards. input_shard_axes: A list of dimensions along which to shard `inputs`, or `None`. `None` means ""shard all inputs along dimension 0"". If not `None`, there must be one dimension per input. outputs_from_all_shards: Boolean or list of boolean. For each output, if `True`, outputs from all shards are concatenated along the corresponding `output_shard_axes` entry. Otherwise, each output is taken from an arbitrary shard. If the argument is a boolean, the argument's value is used for each output. output_shard_axes: A list of dimensions along which to concatenate the outputs of `computation`, or `None`. `None` means ""concatenate all outputs along dimension 0"". If not `None`, there must be one dimension per output. Ignored if `outputs_from_all_shards` is False. infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs of `computation`. device_assignment: If not `None`, a `DeviceAssignment` describing the mapping between logical cores in the computation with physical cores in the TPU topology. Uses a default device assignment if `None`. The `DeviceAssignment` may be omitted if each shard of the computation uses only one core, and there is either only one shard, or the number of shards is equal to the number of cores in the TPU system. name: (Deprecated) Does nothing. xla_options: An instance of `tpu.XLAOptions` which indicates the options passed to XLA compiler. Use `None` for default options. Returns: A tuple of (compile op, [output tensors]). Raises: ValueError: If num_shards <= 0 ValueError: If len(input_shard_axes) != len(inputs) ValueError: If len(output_shard_axes) != len(outputs from `computation`)" 11904,shard,tensorflow/tensorflow/python/tpu/tpu.py,1764,function,"Shards `computation` for parallel execution. `inputs` must be a list of Tensors or None (equivalent to an empty list), each of which has a corresponding split axis (from `input_shard_axes`). Each input is split into `num_shards` pieces along the corresponding axis, and computation is applied to each shard in parallel. Tensors are broadcast to all shards if they are lexically captured by `computation`. e.g., x = tf.constant(7) def computation(): return x + 3 ... = shard(computation, ...) TODO(phawkins): consider adding support for broadcasting Tensors passed as inputs. If `outputs_from_all_shards` is true, the outputs from all shards of `computation` are concatenated back together along their `output_shard_axes`. Otherwise, each output is taken from an arbitrary shard. Inputs and outputs of the computation must be at least rank-1 Tensors. Args: computation: A Python function that builds a computation to apply to each shard of the input. inputs: A list of input tensors or None (equivalent to an empty list). Each input tensor has a corresponding shard axes, given by `input_shard_axes`, which must have size divisible by `num_shards`. num_shards: The number of shards. input_shard_axes: A list of dimensions along which to shard `inputs`, or `None`. `None` means ""shard all inputs along dimension 0"". If not `None`, there must be one dimension per input. outputs_from_all_shards: Boolean or list of boolean. For each output, if `True`, outputs from all shards are concatenated along the corresponding `output_shard_axes` entry. Otherwise, each output is taken from an arbitrary shard. If the argument is a boolean, the argument's value is used for each output. output_shard_axes: A list of dimensions along which to concatenate the outputs of `computation`, or `None`. `None` means ""concatenate all outputs along dimension 0"". If not `None`, there must be one dimension per output. Ignored if `outputs_from_all_shards` is False. infeed_queue: If not `None`, the `InfeedQueue` to use to augment the inputs of `computation`. device_assignment: If not `None`, a `DeviceAssignment` describing the mapping between logical cores in the computation with physical cores in the TPU topology. Uses a default device assignment if `None`. The `DeviceAssignment` may be omitted if each shard of the computation uses only one core, and there is either only one shard, or the number of shards is equal to the number of cores in the TPU system. name: (Deprecated) Does nothing. xla_options: An instance of `tpu.XLAOptions` which indicates the options passed to XLA compiler. Use `None` for default options. Returns: A list of output tensors. Raises: ValueError: If num_shards <= 0 ValueError: If len(input_shard_axes) != len(inputs) ValueError: If len(output_shard_axes) != len(outputs from `computation`)" 11905,batch_parallel,tensorflow/tensorflow/python/tpu/tpu.py,1849,function,"Shards `computation` along the batch dimension for parallel execution. Convenience wrapper around shard(). `inputs` must be a list of Tensors or None (equivalent to an empty list). Each input is split into `num_shards` pieces along the 0-th dimension, and computation is applied to each shard in parallel. Tensors are broadcast to all shards if they are lexically captured by `computation`. e.g., x = tf.constant(7) def computation(): return x + 3 ... = shard(computation, ...) The outputs from all shards are concatenated back together along their 0-th dimension. Inputs and outputs of the computation must be at least rank-1 Tensors. Args: computation: A Python function that builds a computation to apply to each shard of the input. inputs: A list of input tensors or None (equivalent to an empty list). The 0-th dimension of each Tensor must have size divisible by `num_shards`. num_shards: The number of shards. infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple of arguments as inputs to `computation`. device_assignment: If not `None`, a `DeviceAssignment` describing the mapping between logical cores in the computation with physical cores in the TPU topology. Uses a default device assignment if `None`. The `DeviceAssignment` may be omitted if each shard of the computation uses only one core, and there is either only one shard, or the number of shards is equal to the number of cores in the TPU system. name: (Deprecated) Does nothing. xla_options: An instance of `tpu.XLAOptions` which indicates the options passed to XLA compiler. Use `None` for default options. Returns: A list of output tensors. Raises: ValueError: If `num_shards <= 0`" 11906,rewrite,tensorflow/tensorflow/python/tpu/tpu.py,1910,function,"Rewrites `computation` for execution on a TPU system. Args: computation: A Python function that builds a computation to apply to the input. If the function takes n inputs, 'inputs' should be a list of n tensors. `computation` may return a list of operations and tensors. Tensors must come before operations in the returned list. The return value of `rewrite` is a list of tensors corresponding to the tensors from the output of `computation`. All `Operation`s constructed during `computation` will be executed when evaluating any of the returned output tensors, not just the ones returned. inputs: A list of input tensors or `None` (equivalent to an empty list). Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple of arguments as inputs to `computation`. device_assignment: if not `None`, a `DeviceAssignment` describing the mapping between logical cores in the computation with physical cores in the TPU topology. May be omitted for a single-core computation, in which case the core attached to task 0, TPU device 0 is used. name: (Deprecated) Does nothing. xla_options: An instance of `tpu.XLAOptions` which indicates the options passed to XLA compiler. Use `None` for default options. Returns: Same data structure as if computation(*inputs) is called directly with some exceptions for correctness. Exceptions include: 1) None output: a NoOp would be returned which control-depends on computation. 2) Single value output: A tuple containing the value would be returned. 3) Operation-only outputs: a NoOp would be returned which control-depends on computation. TODO(b/121383831): Investigate into removing these special cases." 11907,under_tpu_inference_context,tensorflow/tensorflow/python/tpu/tpu.py,1980,function,Check if it is currently under `_TPUInferenceContext`. 11908,_TPUInferenceContext,tensorflow/tensorflow/python/tpu/tpu.py,1997,class,"A `ControlFlowContext` for nodes inside a TPU inference computation. The primary role of `_TPUInferenceContext` is to indicate the mode of operation and possibly sanity check operators inside a tpu.rewrite_for_inference() computation." 11909,validate_inference_rewrite_for_variables,tensorflow/tensorflow/python/tpu/tpu.py,2037,function,"Validates whether rewrite_for_inference() 'worked' for variables. The rewrite_for_inference() method is supposed to append GuaranteeConstOps after ReadVariableOps, but this mechanism works only if you are using tf.compat.v1.get_variable() to create and access variables in your tpu computation. This validation method can be called immediately after calling tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added to the graph. Typical usages: tpu.validate_inference_rewrite_for_variables( tf.compat.v1.get_default_graph()) tpu.validate_inference_rewrite_for_variables(sess.graph) Args: graph: The graph which needs to be validated. Raises: RuntimeError: if validation failed." 11910,rewrite_for_inference,tensorflow/tensorflow/python/tpu/tpu.py,2066,function,"Rewrites `computation` for inference on a TPU system. Other than 'rewriting' the computation to run on a TPU, if using variables in your computation, it moves the ReadVariableOps outside the TPU computation, and adds GuaranteeConst ops just after the ReadVariableOps. This mechanism works only if you are using tf.compat.v1.get_variable() to create and access variables in your tpu computation. You can validate whether this worked, by calling validate_inference_rewrite_for_variables() method immediately after this method to check whether GuaranteeConstOps where added to the graph. Args: computation: A Python function that builds a computation to apply to the input. If the function takes n inputs, 'inputs' should be a list of n tensors. If the function returns m outputs, rewrite will return a list of m tensors. inputs: A list of input tensors or `None` (equivalent to an empty list). infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple of arguments as inputs to `computation`. device_assignment: if not `None`, a `DeviceAssignment` describing the mapping between logical cores in the computation with physical cores in the TPU topology. May be omitted for a single-core computation, in which case the core attached to task 0, TPU device 0 is used. name: The name of the operator. Returns: A list of output tensors." 11911,prune_unconnected_ops_from_xla,tensorflow/tensorflow/python/tpu/tpu.py,2135,function,"Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. Args: prune_graph: A tensorflow graph from which we wish to prune unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have no inputs and no consumers. These can often be left behind due to graph construction rewiring (for instance TF-Hub). While they never execute, they will cause XLA compile to fail so we strip them from XLA compile by removing the tpu_replicate attribute." 11912,TableConfig,tensorflow/tensorflow/python/tpu/tpu_embedding.py,51,class,Embedding table configuration. 11913,FeatureConfig,tensorflow/tensorflow/python/tpu/tpu_embedding.py,144,class,Feature configuration. 11914,EnqueueData,tensorflow/tensorflow/python/tpu/tpu_embedding.py,179,class,Data to be enqueued through generate_enqueue_ops(). 11915,RaggedEnqueueData,tensorflow/tensorflow/python/tpu/tpu_embedding.py,221,class,RaggedTensor Data to be enqueued through generate_enqueue_ops(). 11916,get_enqueue_datas_list_from_sparse_tensors_list,tensorflow/tensorflow/python/tpu/tpu_embedding.py,263,function,"Convenient function for generate_enqueue_ops(). Args: sp_tensors_list: a list of dictionary mapping from string of feature names to SparseTensor. Each dictionary is for one TPU core. Dictionaries for the same host should be contiguous on the list. Returns: enqueue_datas_list: a list of dictionary mapping from string of feature names to EnqueueData. Each dictionary is for one TPU core. Dictionaries for the same host should be contiguous on the list." 11917,get_enqueue_datas_list_from_ragged_tensors_list,tensorflow/tensorflow/python/tpu/tpu_embedding.py,287,function,"Convenient function for generate_enqueue_ops(). Args: rg_tensors_list: a list of dictionary mapping from string of feature names to RaggedTensor. Each dictionary is for one TPU core. Dictionaries for the same host should be contiguous on the list. Returns: enqueue_datas_list: a list of dictionary mapping from string of feature names to RaggedEnqueueData. Each dictionary is for one TPU core. Dictionaries for the same host should be contiguous on the list." 11918,_OptimizationParameters,tensorflow/tensorflow/python/tpu/tpu_embedding.py,348,class,Parameters common to all optimizations. 11919,AdagradParameters,tensorflow/tensorflow/python/tpu/tpu_embedding.py,364,class,"Optimization parameters for Adagrad with TPU embeddings. Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the `optimization_parameters` argument to set the optimizer and its parameters. See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec` for more details. ``` estimator = tf.estimator.tpu.TPUEstimator( ... embedding_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec( ... optimization_parameters=tf.tpu.experimental.AdagradParameters(0.1), ...)) ```" 11920,ProximalAdagradParameters,tensorflow/tensorflow/python/tpu/tpu_embedding.py,416,class,"Optimization parameters for ProximalAdagrad with TPU embeddings. Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the `optimization_parameters` argument to set the optimizer and its parameters. See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec` for more details." 11921,AdamParameters,tensorflow/tensorflow/python/tpu/tpu_embedding.py,474,class,"Optimization parameters for Adam with TPU embeddings. Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the `optimization_parameters` argument to set the optimizer and its parameters. See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec` for more details. ``` estimator = tf.estimator.tpu.TPUEstimator( ... embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec( ... optimization_parameters=tf.tpu.experimental.AdamParameters(0.1), ...)) ```" 11922,FtrlParameters,tensorflow/tensorflow/python/tpu/tpu_embedding.py,551,class,"Optimization parameters for Ftrl with TPU embeddings. Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the `optimization_parameters` argument to set the optimizer and its parameters. See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec` for more details. ``` estimator = tf.estimator.tpu.TPUEstimator( ... embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec( ... optimization_parameters=tf.tpu.experimental.FtrlParameters(0.1), ...)) ```" 11923,ProximalYogiParameters,tensorflow/tensorflow/python/tpu/tpu_embedding.py,633,class,"Optimization parameters for Proximal Yogi with TPU embeddings. Implements the Yogi optimizer as described in [Adaptive Methods for Nonconvex Optimization](https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization). Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the `optimization_parameters` argument to set the optimizer and its parameters. See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec` for more details." 11924,StochasticGradientDescentParameters,tensorflow/tensorflow/python/tpu/tpu_embedding.py,711,class,"Optimization parameters for stochastic gradient descent for TPU embeddings. Pass this to `tf.estimator.tpu.experimental.EmbeddingConfigSpec` via the `optimization_parameters` argument to set the optimizer and its parameters. See the documentation for `tf.estimator.tpu.experimental.EmbeddingConfigSpec` for more details. ``` estimator = tf.estimator.tpu.TPUEstimator( ... embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec( ... optimization_parameters=( tf.tpu.experimental.StochasticGradientDescentParameters(0.1)))) ```" 11925,TPUEmbedding,tensorflow/tensorflow/python/tpu/tpu_embedding.py,757,class,"API for using TPU for embedding. Example: ``` table_config_user = tpu_embedding.TableConfig( vocabulary_size=4, dimension=2, initializer=initializer, combiner='mean') table_to_config_dict = {'video': table_config_video, 'user': table_config_user} feature_to_config_dict = {'watched': tpu_embedding.FeatureConfig('video'), 'favorited': tpu_embedding.FeatureConfig('video'), 'friends': tpu_embedding.FeatureConfig('user')} batch_size = 4 num_hosts = 1 optimization_parameters = tpu_embedding.AdagradParameters(1., 1.) mode = tpu_embedding.TRAINING embedding = tpu_embedding.TPUEmbedding( table_to_config_dict, feature_to_config_dict, batch_size, num_hosts, mode, optimization_parameters) batch_size_per_core = embedding.batch_size_per_core sparse_features_list = [] for host in hosts: with ops.device(host): for _ in range(embedding.num_cores_per_host): sparse_features = {} sparse_features['watched'] = sparse_tensor.SparseTensor(...) sparse_features['favorited'] = sparse_tensor.SparseTensor(...) sparse_features['friends'] = sparse_tensor.SparseTensor(...) sparse_features_list.append(sparse_features) enqueue_ops = embedding.generate_enqueue_ops(sparse_features_list) embedding_variables_and_ops = embedding.create_variables_and_ops() def computation(): activations = embedding.get_activations() loss = compute_loss(activations) base_optimizer = gradient_descent.GradientDescentOptimizer( learning_rate=1) cross_shard_optimizer = tpu_optimizer.CrossShardOptimizer( base_optimizer) train_op = cross_shard_optimizer.minimize(loss) gradients = ( tpu_embedding_gradient.get_gradients_through_compute_gradients( cross_shard_optimizer, loss, activations) send_gradients_op = embedding.generate_send_gradients_op(gradients) with ops.control_dependencies([train_op, send_gradients_op]): loss = array_ops.identity(loss) loss = tpu.shard(computation, num_shards=embedding.num_cores) with self.test_session() as sess: sess.run(tpu.initialize_system(embedding_config= embedding.config_proto)) sess.run(variables.global_variables_initializer()) sess.run(embedding_variables_and_ops.load_ops()) sess.run(enqueue_ops) loss_val = sess.run(loss) ``` Example with weight decay: >>> def learning_rate_fn(global_step): ... return tf.compat.v1.train.polynomial_decay( ... learning_rate=5e-5, ... global_step=global_step, ... decay_steps=100000, ... end_learning_rate=0.0) >>> wordpiece_table_config = TableConfig( ... vocabulary_size=119547, ... dimension=256, ... learning_rate_fn=learning_rate_fn) >>> wordpiece_feature_config = FeatureConfig( ... table_id='bert/embeddings/word_embeddings', ... max_sequence_length=512) >>> optimization_parameters = AdamParameters( ... learning_rate=5e-5, ... epsilon=1e-6, ... weight_decay_factor=0.01, ... multiply_weight_decay_factor_by_learning_rate=True) >>> tpu_embedding = TPUEmbedding( ... table_to_config_dict={ ... 'bert/embeddings/word_embeddings': wordpiece_table_config, ... }, ... feature_to_config_dict={'input_ids': wordpiece_feature_config}, ... batch_size=128, ... mode=TRAINING, ... optimization_parameters=optimization_parameters, ... master='') >>> with tf.Graph().as_default(): ... init_tpu_op = tf.compat.v1.tpu.initialize_system( ... embedding_config=tpu_embedding.config_proto) ... tf.compat.v1.Session().run(init_tpu_op)" 11926,_validate_table_to_config_dict,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1559,function,Validate `table_to_config_dict`. 11927,_validate_feature_to_config_dict,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1567,function,Validate `feature_to_config_dict`. 11928,_validate_batch_size,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1587,function, 11929,_validate_optimization_parameters,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1594,function,"Validate global optimization_parameters and per table optimizers. If global optimizer is `None`, all table optimizers should be non `None`. Args: optimization_parameters: global optimizer provided in `TPUEmbedding` constructor. table_to_config_dict: A dictionary mapping from string of table name to `TableConfig`." 11930,_OptimizerHandler,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1625,class,Interface class for handling optimizer specific logic. 11931,_AdagradHandler,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1645,class,Handles Adagrad specific logic. 11932,_ProximalAdagradHandler,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1717,class,Handles ProximalAdagrad specific logic. 11933,_AdamHandler,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1794,class,Handles Adam specific logic. 11934,_FtrlHandler,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1889,class,Handles Ftrl specific logic. 11935,_ProximalYogiHandler,tensorflow/tensorflow/python/tpu/tpu_embedding.py,1987,class,Handles Proximal Yogi specific logic. 11936,_StochasticGradientDescentHandler,tensorflow/tensorflow/python/tpu/tpu_embedding.py,2083,class,Handles stochastic gradient descent specific logic. 11937,_get_optimization_handler,tensorflow/tensorflow/python/tpu/tpu_embedding.py,2146,function,Gets the optimization handler given the parameter type. 11938,_create_ordered_dict,tensorflow/tensorflow/python/tpu/tpu_embedding.py,2163,function,Create an OrderedDict from Dict. 11939,_create_combiners,tensorflow/tensorflow/python/tpu/tpu_embedding.py,2168,function,"Create a per feature list of combiners, ordered by table." 11940,_create_table_to_features_and_num_features_dicts,tensorflow/tensorflow/python/tpu/tpu_embedding.py,2177,function,Create mapping from table to a list of its features. 11941,_create_device_fn,tensorflow/tensorflow/python/tpu/tpu_embedding.py,2203,function,Create device_fn() to use with _create_partitioned_variables(). 11942,_create_partitioned_variables,tensorflow/tensorflow/python/tpu/tpu_embedding.py,2227,function,Creates PartitionedVariables based on `num_hosts` for `table`. 11943,get_gradients_through_compute_gradients,tensorflow/tensorflow/python/tpu/tpu_embedding_gradient.py,32,function,"Compute gradients to send to TPU embedding. Args: optimizer: a subclass of optimizer.Optimizer, usually CrossShardOptimizer. Used to call compute_gradients(). loss: a Tensor to call optimizer.compute_gradients() on. activations: an OrderedDict mapping feature_name to Tensors of activations. Returns: An OrderedDict mapping from feature name Strings to Tensors of gradients of the loss wrt the activations of the features." 11944,create_dummy_table_variables,tensorflow/tensorflow/python/tpu/tpu_embedding_gradient.py,53,function,"Create dummy embedding table variables. The sole purpose of these dummy variables are to trigger gradient calculation wrt them so that the gradients wrt activation can be captured and later sent to TPU embedding. Args: tpu_embedding: TPUEmbedding, dummy table variables will be created for use with tpu_embedding. Returns: A tuple of dummy variables and their initializer. Raises: RuntimeError: if collection to store gradients already exists and is not empty." 11945,hook_dummy_table_variables_to_activations,tensorflow/tensorflow/python/tpu/tpu_embedding_gradient.py,103,function,"Have activations depend on dummy table variables for gradient intercept. Args: tpu_embedding: TPUEmbedding, activations and dummy_table_variables are from tpu_embedding. activations: An OrderedDict of feature name String to activation tensors. dummy_table_variables: An OrderedDict of table name String to dummy table variables. Returns: An OrderedDict of feature name String to activation tensors, which can be used just as the activations input." 11946,get_gradients_through_dummy_table_variables,tensorflow/tensorflow/python/tpu/tpu_embedding_gradient.py,129,function,"Get gradients wrt the activations of each feature. Args: tpu_embedding: TPUEmbedding, create dummy table variable to be used with tpu_embedding. Returns: An OrderedDict mapping feature name to gradient. Raises: ValueError: if some gradients are not defined." 11947,TPUShardedVariable,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,62,class,A ShardedVariable class for TPU. 11948,_add_key_attr,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,82,function, 11949,TPUEmbedding,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,87,class,"The TPUEmbedding mid level API. NOTE: When instantiated under a TPUStrategy, this class can only be created once per call to `tf.tpu.experimental.initialize_tpu_system`. If you wish to re-initialize the embedding engine you must re-initialize the tpu as well. Doing this will clear any variables from TPU, so ensure you have checkpointed before you do this. If a further instances of the class are needed, set the `initialize_tpu_embedding` argument to `False`. This class can be used to support training large embeddings on TPU. When creating an instance of this class, you must specify the complete set of tables and features you expect to lookup in those tables. See the documentation of `tf.tpu.experimental.embedding.TableConfig` and `tf.tpu.experimental.embedding.FeatureConfig` for more details on the complete set of options. We will cover the basic usage here. NOTE: multiple `FeatureConfig` objects can use the same `TableConfig` object, allowing different features to share the same table: ```python table_config_one = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) table_config_two = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) feature_config = { 'feature_one': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_one), 'feature_two': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_one), 'feature_three': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_two)} ``` There are two modes under which the `TPUEmbedding` class can used. This depends on if the class was created under a `TPUStrategy` scope or not. Under `TPUStrategy`, we allow access to the method `enqueue`, `dequeue` and `apply_gradients`. We will show examples below of how to use these to train and evaluate your model. Under CPU, we only access to the `embedding_tables` property which allow access to the embedding tables so that you can use them to run model evaluation/prediction on CPU. First lets look at the `TPUStrategy` mode. Initial setup looks like: ```python strategy = tf.distribute.TPUStrategy(...) with strategy.scope(): embedding = tf.tpu.experimental.embedding.TPUEmbedding( feature_config=feature_config, batch_size=1024, optimizer=tf.tpu.experimental.embedding.SGD(0.1)) ``` When creating a distributed dataset that is to be passed to the enqueue operation a special input option must be specified: ```python distributed_dataset = ( strategy.experimental_distribute_datasets_from_function( dataset_fn=..., options=tf.distribute.InputOptions( experimental_prefetch_to_device=False)) dataset_iterator = iter(distributed_dataset) ``` To use this API on TPU you should use a custom training loop. Below is an example of a training and evaluation step: ```python @tf.function def training_step(dataset_iterator, num_steps): def tpu_step(tpu_features): with tf.GradientTape() as tape: activations = embedding.dequeue() tape.watch(activations) model_output = model(activations) loss = ... # some function of labels and model_output embedding_gradients = tape.gradient(loss, activations) embedding.apply_gradients(embedding_gradients) # Insert your model gradient and optimizer application here for _ in tf.range(num_steps): embedding_features, tpu_features = next(dataset_iterator) embedding.enqueue(embedding_features, training=True) strategy.run(tpu_step, args=(embedding_features, )) @tf.function def evalution_step(dataset_iterator, num_steps): def tpu_step(tpu_features): activations = embedding.dequeue() model_output = model(activations) # Insert your evaluation code here. for _ in tf.range(num_steps): embedding_features, tpu_features = next(dataset_iterator) embedding.enqueue(embedding_features, training=False) strategy.run(tpu_step, args=(embedding_features, )) ``` NOTE: The calls to `enqueue` have `training` set to `True` when `embedding.apply_gradients` is used and set to `False` when `embedding.apply_gradients` is not present in the function. If you don't follow this pattern you may cause an error to be raised or the tpu may deadlock. In the above examples, we assume that the user has a dataset which returns a tuple where the first element of the tuple matches the structure of what was passed as the `feature_config` argument to the object initializer. Also we utilize `tf.range` to get a `tf.while_loop` in order to increase performance. When checkpointing your model, you should include your `tf.tpu.experimental.embedding.TPUEmbedding` object in the checkpoint. It is a trackable object and saving it will save the embedding tables and their optimizer slot variables: ```python checkpoint = tf.train.Checkpoint(model=model, embedding=embedding) checkpoint.save(...) ``` On CPU, only the `embedding_table` property is usable. This will allow you to restore a checkpoint to the object and have access to the table variables: ```python model = model_fn(...) embedding = tf.tpu.experimental.embedding.TPUEmbedding( feature_config=feature_config, batch_size=1024, optimizer=tf.tpu.experimental.embedding.SGD(0.1)) checkpoint = tf.train.Checkpoint(model=model, embedding=embedding) checkpoint.restore(...) tables = embedding.embedding_tables ``` You can now use table in functions like `tf.nn.embedding_lookup` to perform your embedding lookup and pass to your model." 11950,TPUEmbeddingSaveable,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,1203,class,Save/Restore hook to Retrieve/Load TPUEmbedding variables. 11951,_ragged_embedding_lookup_with_reduce,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,1220,function,"Compute a ragged lookup followed by a reduce on axis 1. Args: table: The embedding table. ragged: A RaggedTensor of ids to look up. weights: A RaggedTensor of weights (or None). combiner: One of ""mean"", ""sum"", ""sqrtn"". Returns: A Tensor." 11952,cpu_embedding_lookup,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,1245,function,"Uses CPU embedding lookup for embedding ids in features. Args: inputs: a nested structure of Tensors, SparseTensors or RaggedTensors. weights: a nested structure of Tensors, SparseTensors or RaggedTensors or None for no weights. tables: a dict of mapping TableConfig objects to Variables. feature_config: a nested structure of FeatureConfig objects with the same structure as inputs. Returns: A nested structure of Tensors with the same structure as inputs." 11953,get_list_of_hosts,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,1302,function,"Returns a sorted list of CPU devices for the remote jobs. Args: strategy: A TPUStrategy object. Returns: A sort list of device strings." 11954,extract_variable_info,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,1321,function,"Extracts the variable creation attributes from the kwargs. Args: kwargs: a dict of keyword arguments that were passed to a variable creator scope. Returns: A tuple of variable name, initialization function, shape, and dtype." 11955,make_sharded_variable_creator,tensorflow/tensorflow/python/tpu/tpu_embedding_v2.py,1355,function,"Makes a sharded variable creator given a list of hosts. Args: hosts: a list of tensorflow devices on which to shard the tensors. Returns: A variable creator function." 11956,TPUEmbeddingCorrectness,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_correctness_test.py,58,class, 11957,_compute_gradients_wrt_embedding_table,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_correctness_test.py,544,function,"Compute gradients wrt embedding_table. Args: batch_size: `int`, batch size. gradient_wrt_activation: `np.array` with shape `batch_size` by embedding `dimension`. embedding_table: `np.array` with shape `vocabulary_size` by embedding `dimension`. feature_indices: `indices` as used to construct `SparseTensor`. feature_values: `values` as used to construct `SparseTensor`. combiner: `String`, 'mean' or 'sum'. max_sequence_length: If non-zero, a sequence feature with the given length. Returns: Gradients wrt `embedding_table`, an `np.array`s with shape `batch_size` by `vocabulary_size` by embedding `dimension`. Raises: ValueError: if `combiner` is not one of 'mean' or 'sum'." 11958,_unpack,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_correctness_test.py,593,function, 11959,_get_total_loss_tensor,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_correctness_test.py,599,function, 11960,_compute_loss,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_correctness_test.py,610,function, 11961,_get_variable,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_correctness_test.py,624,function, 11962,CPUEmbeddingTest,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_cpu_test.py,35,class, 11963,TPUEmbeddingCheckpointTest,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_test.py,69,class, 11964,TPUEmbeddingTest,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_test.py,316,class, 11965,_unpack,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_test.py,1216,function, 11966,_get_tmpdir,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_test.py,1222,function, 11967,_get_variable,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_test.py,1227,function, 11968,_Optimizer,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_utils.py,33,class,"Base class for all optimizers, with common parameters." 11969,SGD,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_utils.py,125,class,"Optimization parameters for stochastic gradient descent for TPU embeddings. Pass this to `tf.tpu.experimental.embedding.TPUEmbedding` via the `optimizer` argument to set the global optimizer and its parameters: ``` embedding = tf.tpu.experimental.embedding.TPUEmbedding( ... optimizer=tf.tpu.experimental.embedding.SGD(0.1)) ``` This can also be used in a `tf.tpu.experimental.embedding.TableConfig` as the optimizer parameter to set a table specific optimizer. This will override the optimizer and parameters for global embedding optimizer defined above: ``` table_one = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=..., optimizer=tf.tpu.experimental.embedding.SGD(0.2)) table_two = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) feature_config = ( tf.tpu.experimental.embedding.FeatureConfig( table=table_one), tf.tpu.experimental.embedding.FeatureConfig( table=table_two)) embedding = tf.tpu.experimental.embedding.TPUEmbedding( feature_config=feature_config, batch_size=... optimizer=tf.tpu.experimental.embedding.SGD(0.1)) ``` In the above example, the first feature will be looked up in a table that has a learning rate of 0.2 while the second feature will be looked up in a table that has a learning rate of 0.1. See 'tensorflow/core/protobuf/tpu/optimization_parameters.proto' for a complete description of these parameters and their impacts on the optimizer algorithm." 11970,Adagrad,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_utils.py,212,class,"Optimization parameters for Adagrad with TPU embeddings. Pass this to `tf.tpu.experimental.embedding.TPUEmbedding` via the `optimizer` argument to set the global optimizer and its parameters: ```python embedding = tf.tpu.experimental.embedding.TPUEmbedding( ... optimizer=tf.tpu.experimental.embedding.Adagrad(0.1)) ``` This can also be used in a `tf.tpu.experimental.embedding.TableConfig` as the optimizer parameter to set a table specific optimizer. This will override the optimizer and parameters for global embedding optimizer defined above: ```python table_one = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=..., optimizer=tf.tpu.experimental.embedding.Adagrad(0.2)) table_two = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) feature_config = ( tf.tpu.experimental.embedding.FeatureConfig( table=table_one), tf.tpu.experimental.embedding.FeatureConfig( table=table_two)) embedding = tf.tpu.experimental.embedding.TPUEmbedding( feature_config=feature_config, batch_size=... optimizer=tf.tpu.experimental.embedding.Adagrad(0.1)) ``` In the above example, the first feature will be looked up in a table that has a learning rate of 0.2 while the second feature will be looked up in a table that has a learning rate of 0.1. See 'tensorflow/core/protobuf/tpu/optimization_parameters.proto' for a complete description of these parameters and their impacts on the optimizer algorithm." 11971,Adam,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_utils.py,315,class,"Optimization parameters for Adam with TPU embeddings. Pass this to `tf.tpu.experimental.embedding.TPUEmbedding` via the `optimizer` argument to set the global optimizer and its parameters: NOTE: By default this optimizer is lazy, i.e. it will not apply the gradient update of zero to rows that were not looked up. You can change this behavior by setting `lazy_adam` to `False`. ```python embedding = tf.tpu.experimental.embedding.TPUEmbedding( ... optimizer=tf.tpu.experimental.embedding.Adam(0.1)) ``` This can also be used in a `tf.tpu.experimental.embedding.TableConfig` as the optimizer parameter to set a table specific optimizer. This will override the optimizer and parameters for global embedding optimizer defined above: ```python table_one = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=..., optimizer=tf.tpu.experimental.embedding.Adam(0.2)) table_two = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) feature_config = ( tf.tpu.experimental.embedding.FeatureConfig( table=table_one), tf.tpu.experimental.embedding.FeatureConfig( table=table_two)) embedding = tf.tpu.experimental.embedding.TPUEmbedding( feature_config=feature_config, batch_size=... optimizer=tf.tpu.experimental.embedding.Adam(0.1)) ``` In the above example, the first feature will be looked up in a table that has a learning rate of 0.2 while the second feature will be looked up in a table that has a learning rate of 0.1. See 'tensorflow/core/protobuf/tpu/optimization_parameters.proto' for a complete description of these parameters and their impacts on the optimizer algorithm." 11972,TableConfig,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_utils.py,455,class,"Configuration data for one embedding table. This class holds the configuration data for a single embedding table. It is used as the `table` parameter of a `tf.tpu.experimental.embedding.FeatureConfig`. Multiple `tf.tpu.experimental.embedding.FeatureConfig` objects can use the same `tf.tpu.experimental.embedding.TableConfig` object. In this case a shared table will be created for those feature lookups. ```python table_config_one = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) table_config_two = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) feature_config = { 'feature_one': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_one), 'feature_two': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_one), 'feature_three': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_two)} embedding = tf.tpu.experimental.embedding.TPUEmbedding( feature_config=feature_config, batch_size=... optimizer=tf.tpu.experimental.embedding.Adam(0.1)) ``` The above configuration has 2 tables, and three features. The first two features will be looked up in the first table and the third feature will be looked up in the second table." 11973,FeatureConfig,tensorflow/tensorflow/python/tpu/tpu_embedding_v2_utils.py,548,class,"Configuration data for one embedding feature. This class holds the configuration data for a single embedding feature. The main use is to assign features to `tf.tpu.experimental.embedding.TableConfig`s via the table parameter: ```python table_config_one = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) table_config_two = tf.tpu.experimental.embedding.TableConfig( vocabulary_size=..., dim=...) feature_config = { 'feature_one': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_one), 'feature_two': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_one), 'feature_three': tf.tpu.experimental.embedding.FeatureConfig( table=table_config_two)} embedding = tf.tpu.experimental.embedding.TPUEmbedding( feature_config=feature_config, batch_size=... optimizer=tf.tpu.experimental.embedding.Adam(0.1)) ``` The above configuration has 2 tables, and three features. The first two features will be looked up in the first table and the third feature will be looked up in the second table. When feeding features into `embedding.enqueue` they can be `tf.Tensor`s, `tf.SparseTensor`s or `tf.RaggedTensor`s. When the argument `max_sequence_length` is 0, the default, you should expect a output of `embedding.dequeue` for this feature of shape `(batch_size, dim)`. If `max_sequence_length` is greater than 0, the feature is embedded as a sequence and padded up to the given length. The shape of the output for this feature will be `(batch_size, max_sequence_length, dim)`." 11974,partition_or_replicate_on_host,tensorflow/tensorflow/python/tpu/tpu_feed.py,40,function,"Partitions or replicates the input tensor. The ops inside this function are placed on the host side. Args: tensor: The input tensor which will be partitioned or replicated. dims: A list of integer describes how to partition the input tensor. Returns: An iterator of `Tensor`s or a list of partitioned tensors." 11975,_tag_sharding_attribute_for_dequeued_tensor,tensorflow/tensorflow/python/tpu/tpu_feed.py,86,function,"Tags appropriate XLA sharding attribute to the dequeued tensor. The sharding attribute of the dequeued tensor will be a tuple. Args: tensor: The dequeued tensor on TPU. dims: A list of integer describes how the tensor is partitioned. Returns: The same tensor with the xla_sharding attribute." 11976,tag_sharding_attribute_for_dequeued_tensors,tensorflow/tensorflow/python/tpu/tpu_feed.py,110,function,"Tags appropriate XLA sharding attribute to the dequeued tensors. Args: dequeues: A list of dequeued tensors on TPU. dims: A list of integer describes how the tensor is partitioned. Returns: The same dequeues with appropriate xla_sharding attribute." 11977,InfeedQueue,tensorflow/tensorflow/python/tpu/tpu_feed.py,125,class,"A helper object to build a device infeed queue. The InfeedQueue builds the host-side and device-side Ops to enqueue and dequeue elements, respectively, and ensures that their types and shapes match." 11978,_PartitionedInfeedQueue,tensorflow/tensorflow/python/tpu/tpu_feed.py,733,class,"A helper object to build a device infeed queue with input partition. Args: number_of_tuple_elements: the number of Tensors fed atomically through the queue, must be present unless it can be inferred from other arguments. device_assignment: A TPU `DeviceAssignment` which is used to place all the partitions to different TPU infeed queues. host_id: The id of the host machine. input_partition_dims: A nested list/tuple of integers. Each inner list/tuple describes how to partition the corresponding input tensor. tuple_types: If not None, a list of types of the elements of the queue. tuple_shapes: If not None, a list of shapes of the elements of the queue. name: The name of the queue." 11979,TpuContext,tensorflow/tensorflow/python/tpu/tpu_function.py,26,class,A context object holding state about the TPU computation being built. 11980,tpu_shard_context,tensorflow/tensorflow/python/tpu/tpu_function.py,47,function, 11981,get_tpu_context,tensorflow/tensorflow/python/tpu/tpu_function.py,57,function, 11982,on_device_training_loop,tensorflow/tensorflow/python/tpu/tpu_function.py,64,function, 11983,InfeedTest,tensorflow/tensorflow/python/tpu/tpu_infeed_test.py,29,class, 11984,CrossShardOptimizer,tensorflow/tensorflow/python/tpu/tpu_optimizer.py,33,class,An optimizer that averages gradients across TPU shards. 11985,get_tpu_cluster_resolver,tensorflow/tensorflow/python/tpu/tpu_outside_compilation_test.py,48,function, 11986,get_tpu_strategy,tensorflow/tensorflow/python/tpu/tpu_outside_compilation_test.py,57,function, 11987,TpuOutsideCompilationTest,tensorflow/tensorflow/python/tpu/tpu_outside_compilation_test.py,64,class, 11988,ShardingPolicy,tensorflow/tensorflow/python/tpu/tpu_sharding.py,31,class,"An object use to hold the sharding policy for a Tensor. " 11989,ShardingTest,tensorflow/tensorflow/python/tpu/tpu_sharding_test.py,28,class, 11990,initialize_tpu_system,tensorflow/tensorflow/python/tpu/tpu_strategy_util.py,41,function,"Initialize the TPU devices. Args: cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. Returns: The tf.tpu.Topology object for the topology of the TPU cluster. Raises: RuntimeError: If running inside a tf.function. NotFoundError: If no TPU devices found in eager mode." 11991,shutdown_tpu_system,tensorflow/tensorflow/python/tpu/tpu_strategy_util.py,139,function,"Shuts down the TPU devices. This will clear all caches, even those that are maintained through sequential calls to tf.tpu.experimental.initialize_tpu_system, such as the compilation cache. Args: cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver, which provides information about the TPU cluster. Raises: RuntimeError: If no TPU devices found for eager execution or if run in a tf.function." 11992,TPUSystemMetadata,tensorflow/tensorflow/python/tpu/tpu_system_metadata.py,45,class,"Describes some metadata about the TPU system. Attributes: num_cores: interger. Total number of TPU cores in the TPU system. num_hosts: interger. Total number of hosts (TPU workers) in the TPU system. num_of_cores_per_host: interger. Number of TPU cores per host (TPU worker). topology: an instance of `tf.tpu.experimental.Topology`, which describes the physical topology of TPU system. devices: a tuple of strings, which describes all the TPU devices in the system." 11993,_query_tpu_system_metadata,tensorflow/tensorflow/python/tpu/tpu_system_metadata.py,72,function,Automatically detects the TPU system metadata in the system. 11994,_obtain_topology,tensorflow/tensorflow/python/tpu/tpu_system_metadata.py,171,function,Obtains TPU fabric topology. 11995,get_session_config_with_timeout,tensorflow/tensorflow/python/tpu/tpu_system_metadata.py,191,function,Returns a session given a timeout and a cluster configuration. 11996,master_job,tensorflow/tensorflow/python/tpu/tpu_system_metadata.py,198,function,"Returns the canonical job name to use to place TPU computations on. Args: master: A `string` representing the TensorFlow master to use. cluster_def: A ClusterDef object describing the TPU cluster. Returns: A string containing the job name, or None if no job should be specified. Raises: ValueError: If the user needs to specify a tpu_job_name, because we are unable to infer the job name automatically, or if the user-specified job names are inappropriate." 11997,TPUContextTest,tensorflow/tensorflow/python/tpu/tpu_test.py,41,class, 11998,TPULayerRewriteTest,tensorflow/tensorflow/python/tpu/tpu_test.py,56,class, 11999,TPUGraphPruneTest,tensorflow/tensorflow/python/tpu/tpu_test.py,87,class, 12000,do_einsum,tensorflow/tensorflow/python/tpu/tpu_test.py,144,function, 12001,find_einsum,tensorflow/tensorflow/python/tpu/tpu_test.py,150,function, 12002,find_xla_einsum,tensorflow/tensorflow/python/tpu/tpu_test.py,158,function, 12003,TPUXlaEinsumTest,tensorflow/tensorflow/python/tpu/tpu_test.py,166,class, 12004,maybe_define_flags,tensorflow/tensorflow/python/tpu/tpu_test_wrapper.py,58,function,Defines any required flags that are missing. 12005,set_random_test_dir,tensorflow/tensorflow/python/tpu/tpu_test_wrapper.py,67,function,"Pick a random GCS directory under --test_dir_base, set as --model_dir." 12006,calculate_parent_python_path,tensorflow/tensorflow/python/tpu/tpu_test_wrapper.py,73,function,"Returns the absolute import path for the containing directory. Args: test_filepath: The filepath which Bazel invoked (ex: /filesystem/path/tensorflow/tensorflow/python/tpu/tpu_test) Returns: Absolute import path of parent (ex: tensorflow.python.tpu). Raises: ValueError: if bazel_repo_root does not appear within test_filepath." 12007,import_user_module,tensorflow/tensorflow/python/tpu/tpu_test_wrapper.py,100,function,"Imports the flag-specified user test code. This runs all top-level statements in the user module, specifically flag definitions. Returns: The user test module." 12008,_is_test_class,tensorflow/tensorflow/python/tpu/tpu_test_wrapper.py,113,function,"Check if arbitrary object is a test class (not a test object!). Args: obj: An arbitrary object from within a module. Returns: True iff obj is a test class inheriting at some point from a module named ""TestCase"". This is because we write tests using different underlying test libraries." 12009,move_test_classes_into_scope,tensorflow/tensorflow/python/tpu/tpu_test_wrapper.py,131,function,"Add all test classes defined in wrapped module to our module. The test runner works by inspecting the main module for TestCase classes, so by adding a module-level reference to the TestCase we cause it to execute the wrapped TestCase. Args: wrapped_test_module: The user-provided test code to run." 12010,run_user_main,tensorflow/tensorflow/python/tpu/tpu_test_wrapper.py,146,function,"Runs the ""if __name__ == '__main__'"" at the bottom of a module. TensorFlow practice is to have a main if at the bottom of the module which might call an API compat function before calling test.main(). Since this is a statement, not a function, we can't cleanly reference it, but we can inspect it from the user module and run it in the context of that module so all imports and variables are available to it. Args: wrapped_test_module: The user-provided test code to run. Raises: NotImplementedError: If main block was not found in module. This should not be caught, as it is likely an error on the user's part -- absltest is all too happy to report a successful status (and zero tests executed) if a user forgets to end a class with ""test.main()""." 12011,TPUTestWrapperTest,tensorflow/tensorflow/python/tpu/tpu_test_wrapper_test.py,31,class, 12012,_write_and_load_module,tensorflow/tensorflow/python/tpu/tpu_test_wrapper_test.py,202,function, 12013,while_loop,tensorflow/tensorflow/python/tpu/training_loop.py,30,function,"Builds a training loop for TPUs. The set of loop-carried tensors corresponds to `inputs`. Both `condition` and `body` take the current value of the loop-carried tensors. 'body' additionally takes a tuple of infeed from infeed_queue if infeed_queue is not None. `condition` must return a single boolean value that determines whether iteration continues. `body` must return an updated list of values for the loop-carried tensors. Args: condition: a Python function that builds the loop condition. body: a Python function that builds the loop body. inputs: a list of initial values passed into the training loop, or None (equivalent to an empty list). infeed_queue: if not None, the infeed queue from which to append a tuple of arguments as inputs to condition. name: (Deprecated) Does nothing. Returns: The final values of the loop-carried tensors. Raises: TypeError: if body or condition has the wrong signature." 12014,repeat,tensorflow/tensorflow/python/tpu/training_loop.py,181,function,"Builds a training loop that executes a fixed number of iterations. The set of loop-carried tensors correspond to `inputs`. `body` must be a function that takes and returns the values of the loop-carried tensors. Args: n: the number of loop iterations body: a Python function that builds the loop body. inputs: a list of initial values passed into the training loop or None (equivalent to an empty list). infeed_queue: if not None, the infeed queue from which to append a tuple of arguments as inputs to condition. name: (Deprecated) Does nothing. Returns: The final values of the loop-carried tensors. Raises: ValueError: if there is a type error." 12015,_utcnow,tensorflow/tensorflow/python/tpu/client/client.py,55,function,"A wrapper function around datetime.datetime.utcnow. This function is created for unit testing purpose. It's not easy to do StubOutWithMock with datetime.datetime package. Returns: datetime.datetime" 12016,_environment_discovery_url,tensorflow/tensorflow/python/tpu/client/client.py,67,function, 12017,_request_compute_metadata,tensorflow/tensorflow/python/tpu/client/client.py,71,function, 12018,_environment_var_to_network_endpoints,tensorflow/tensorflow/python/tpu/client/client.py,79,function,Yields a dict with ip address and port. 12019,_get_tpu_name,tensorflow/tensorflow/python/tpu/client/client.py,96,function, 12020,_as_text,tensorflow/tensorflow/python/tpu/client/client.py,106,function, 12021,Client,tensorflow/tensorflow/python/tpu/client/client.py,112,class,"Client for working with the Cloud TPU API. This client is intended to be used for resolving tpu name to ip addresses. It's recommended to use this library as a contextlib to utilize all functionality." 12022,mock_utcnow,tensorflow/tensorflow/python/tpu/client/client_test.py,39,function, 12023,mock_request_compute_metadata,tensorflow/tensorflow/python/tpu/client/client_test.py,43,function, 12024,MockRequestClass,tensorflow/tensorflow/python/tpu/client/client_test.py,53,class, 12025,MockNodeClass,tensorflow/tensorflow/python/tpu/client/client_test.py,72,class, 12026,CloudTpuClientTest,tensorflow/tensorflow/python/tpu/client/client_test.py,81,class, 12027,_create_default_group_assignment,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,34,function, 12028,all_to_all,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,45,function,"Exchange data across TPU replicas. Args: x: The local tensor. concat_dimension: The dimension number to concatenate. split_dimension: The dimension number to split. split_count: The number of splits, this number must equal to the sub-group size(group_assignment.get_shape()[1]) group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the replica ids in the ith subgroup. name: Optional op name. Returns: A `Tensor` which is concatenated by data from different replicas." 12029,_all_to_all_grad,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,79,function, 12030,cross_replica_sum,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,94,function,"Sum the input tensor across replicas according to group_assignment. Args: x: The local tensor to the sum. group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the replica ids in the ith subgroup. name: Optional op name. Returns: A `Tensor` which is summed across replicas." 12031,collective_permute,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,113,function,"Permute the input tensor across replicas given source_target_pairs. For each source_target_pair , we send replica a's input to replica b. Each replica id must only appear once in the source column. Also it must only appear once in the target column. For the replica id not in the target column, this op returns a zero tensor with the same shape and dtype of the input x. For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing source_target_pairs=`[[0,1],[1,2],[2,3]]` gets the outputs: `[0, A, B, C]`. Args: x: The local tensor to be permuted. source_target_pairs: 2d int lists with shape [num_pairs, 2]. source_target_pairs[i][0] represents the source replica id and source_target_pairs[i][1] represents the target replica id. name: Optional op name. Returns: A `Tensor` which is permuted." 12032,_collective_permute_grad,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,140,function, 12033,_cross_replica_sum_grad,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,149,function, 12034,_embedding_activations_grad,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,166,function,Saves the gradient of embedding activations ops in a graph collection. 12035,infeed_dequeue,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,196,function,"A placeholder op for a value that will be fed into the computation. Args: dtype: A `tf.DType`. The type of elements in the tensor. shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. A tensor that will be provided using the infeed mechanism. Raises: TypeError: If 'dtype` is not a supported infeed type." 12036,infeed_dequeue_tuple,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,221,function,"A placeholder op for values fed into the TPU simultaneously as a tuple. Args: dtypes: A list of `tf.DType`s that has length `>= 1`. The element types of each element in `outputs`. shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). The shapes of each tensor in `outputs`. name: A name for the operation (optional). Returns: A list of `Tensor` objects of type `dtypes`. A list of tensors that will be provided using the infeed mechanism. Raises: TypeError: If a type in 'dtypes` is not a supported infeed type." 12037,send_tpu_embedding_gradients,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,248,function,"A placeholder op for feeding per-sample gradients to the embedding layer. Args: inputs: A TensorList of gradients with which to update embedding tables. This argument has the same length and shapes as the return value of RecvTPUEmbeddingActivations, but contains gradients of the model's loss with respect to the embedding activations. The embedding tables are updated from these gradients via the optimizers specified in the TPU embedding configuration given to tpu.initialize_system. config: Serialized TPUEmbeddingConfiguration proto. learning_rates: A TensorList of float32 scalars, one for each dynamic learning rate tag: see the comments in //third_party/tensorflow/core/protobuf/tpu/ optimization_parameters.proto. Multiple tables can share the same dynamic learning rate tag as specified in the configuration. If the learning rates for all tables are constant, this list should be empty. name: A name for the operation (optional). Returns: A SendTPUEmbeddingGradients operation." 12038,enqueue_tpu_embedding_integer_batch,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,285,function,"A placeholder op for enqueueing embedding IDs to the TPU. Args: batch: A list of 1D tensors, one for each embedding table, containing the indices into the tables. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'train', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingIntegerBatch operation." 12039,enqueue_tpu_embedding_sparse_batch,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,320,function,"A placeholder op for enqueueing embedding IDs to the TPU. Args: sample_indices: A list of rank 1 Tensors specifying the training example and feature to which the corresponding embedding_indices and aggregation_weights values belong. sample_indices[i] must equal b * nf + f, where nf is the number of features from the corresponding table, f is in [0, nf), and b is in [0, batch size). Both int32 and int64 are allowed, and will be converted to int32 internally. embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. Both int32 and int64 are allowed and will be converted to int32 internally. aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per (training example, feature) -- aggregation weights. Both float32 and float64 are allowed and will be converted to float32 internally. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. combiners: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables (optional). mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'train', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingSparseBatch operation." 12040,enqueue_tpu_embedding_sparse_tensor_batch,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,377,function,"A placeholder op for enqueueing embedding IDs to the TPU. Args: sample_indices: A list of rank 2 Tensors specifying the training example to which the corresponding embedding_indices and aggregation_weights values belong. It corresponds to sp_ids.indices in embedding_lookup_sparse(). If the size of its first dimension is 0, we assume each embedding_indices belongs to a different sample. Both int32 and int64 are allowed and will be converted to int32 internally. embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. It corresponds to sp_ids.values in embedding_lookup_sparse(). Both int32 and int64 are allowed and will be converted to int32 internally. aggregation_weights: A list of rank 1 Tensors containing per training example aggregation weights. It corresponds to sp_weights.values in embedding_lookup_sparse(). If the size of its first dimension is 0, we assume all weights are 1. Both float32 and float64 are allowed and will be converted to float32 internally. table_ids: A list of integers specifying the identifier of the embedding table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the corresponding input. The ith input is looked up using table_ids[i]. The size of the table_ids list must be equal to that of sample_indices, embedding_indices and aggregation_weights. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. max_sequence_lengths: A list of integers, the size of which is equal to sample_indices. If equal to 0, the corresponding feature is considered to be a non-sequence feature, If greater than 0, the corresponding feature is a sequence feature with the given maximal length. If None, then we assume a list of all zeroes. combiners: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables (optional). mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'train', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingSparseTensorBatch operation." 12041,enqueue_tpu_embedding_ragged_tensor_batch,tensorflow/tensorflow/python/tpu/ops/tpu_ops.py,450,function,"A placeholder op for enqueueing embedding IDs to the TPU. Args: sample_splits: A list of rank 1 Tensors specifying the break points for splitting embedding_indices and aggregation_weights into rows. It corresponds to ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. Both int32 and int64 are allowed and will be converted to int32 internally. embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor. Both int32 and int64 are allowed and will be converted to int32 internally. aggregation_weights: A list of rank 1 Tensors containing per training example aggregation weights. It corresponds to the values field of a RaggedTensor with the same row_splits as ids in embedding_lookup(), when ids is a RaggedTensor. Both float32 and float64 are allowed and will be converted to float32 internally. table_ids: A list of integers specifying the identifier of the embedding table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the corresponding input. The ith input is looked up using table_ids[i]. The size of the table_ids list must be equal to that of sample_indices, embedding_indices and aggregation_weights. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. max_sequence_lengths: A list of integers, the size of which is equal to sample_indices. If equal to 0, the corresponding feature is considered to be a non-sequence feature, If greater than 0, the corresponding feature is a sequence feature with the given maximal length. If None, then we assume a list of all zeroes. combiners: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables (optional). mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingRaggedTensorBatch operation." 12042,get_workers_list,tensorflow/tensorflow/python/tpu/profiler/capture_tpu_profile.py,85,function,"Returns a comma separated list of TPU worker IP addresses. Gets cluster_spec from cluster_resolver. Use the worker's task indices to obtain and return a list of ip addresses. Args: cluster_resolver: TensorFlow TPUClusterResolver instance. Returns: A string of comma separated list of IP addresses. For example: '10.2.0.1,10.2.0.2,10.2.0.3,10.2.0.4' Raises: UnavailableError: cluster_resolver doesn't contain a valid cluster_spec." 12043,monitoring_helper,tensorflow/tensorflow/python/tpu/profiler/capture_tpu_profile.py,115,function,"Helper function to print monitoring results. Helper function to print monitoring results for num_queries times. Args: service_addr: Address of the TPU profiler service. duration_ms: Duration of one monitoring sample in milliseconds. monitoring_level: An integer between 1 and 2. Level 2 is more verbose than level 1 and shows more metrics. num_queries: Number of monitoring samples to collect." 12044,run_main,tensorflow/tensorflow/python/tpu/profiler/capture_tpu_profile.py,135,function, 12045,main,tensorflow/tensorflow/python/tpu/profiler/capture_tpu_profile.py,139,function, 12046,ProfileAnalysisStub,tensorflow/tensorflow/python/tpu/profiler/profiler_analysis_pb2_grpc.py,28,class,"////////////////////////////////////////////////////////////////////////////// ProfileAnalysis service provide entry point for profiling TPU and for serving profiled data to Tensorboard through GRPC //////////////////////////////////////////////////////////////////////////////" 12047,ProfileAnalysisServicer,tensorflow/tensorflow/python/tpu/profiler/profiler_analysis_pb2_grpc.py,65,class,"////////////////////////////////////////////////////////////////////////////// ProfileAnalysis service provide entry point for profiling TPU and for serving profiled data to Tensorboard through GRPC //////////////////////////////////////////////////////////////////////////////" 12048,add_ProfileAnalysisServicer_to_server,tensorflow/tensorflow/python/tpu/profiler/profiler_analysis_pb2_grpc.py,96,function, 12049,AdadeltaOptimizer,tensorflow/tensorflow/python/training/adadelta.py,29,class,"Optimizer that implements the Adadelta algorithm. References: ADADELTA - An Adaptive Learning Rate Method: [Zeiler, 2012](http://arxiv.org/abs/1212.5701) ([pdf](http://arxiv.org/pdf/1212.5701v1.pdf))" 12050,AdadeltaOptimizerTest,tensorflow/tensorflow/python/training/adadelta_test.py,35,class, 12051,AdagradOptimizer,tensorflow/tensorflow/python/training/adagrad.py,32,class,"Optimizer that implements the Adagrad algorithm. References: Adaptive Subgradient Methods for Online Learning and Stochastic Optimization :[Duchi et al., 2011](http://jmlr.org/papers/v12/duchi11a.html) ([pdf](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf))" 12052,AdagradDAOptimizer,tensorflow/tensorflow/python/training/adagrad_da.py,30,class,"Adagrad Dual Averaging algorithm for sparse linear models. This optimizer takes care of regularization of unseen features in a mini batch by updating them when they are seen with a closed form update rule that is equivalent to having updated them on every mini-batch. AdagradDA is typically used when there is a need for large sparsity in the trained model. This optimizer only guarantees sparsity for linear models. Be careful when using AdagradDA for deep networks as it will require careful initialization of the gradient accumulators for it to train. References: Adaptive Subgradient Methods for Online Learning and Stochastic Optimization :[Duchi et al., 2011](http://jmlr.org/papers/v12/duchi11a.html) ([pdf](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf))" 12053,AdagradDAOptimizerTest,tensorflow/tensorflow/python/training/adagrad_da_test.py,35,class, 12054,AdagradOptimizerTest,tensorflow/tensorflow/python/training/adagrad_test.py,37,class, 12055,AdamOptimizer,tensorflow/tensorflow/python/training/adam.py,32,class,"Optimizer that implements the Adam algorithm. References: Adam - A Method for Stochastic Optimization: [Kingma et al., 2015](https://arxiv.org/abs/1412.6980) ([pdf](https://arxiv.org/pdf/1412.6980.pdf))" 12056,adam_update_numpy,tensorflow/tensorflow/python/training/adam_test.py,37,function, 12057,AdamOptimizerTest,tensorflow/tensorflow/python/training/adam_test.py,55,class, 12058,basic_train_loop,tensorflow/tensorflow/python/training/basic_loops.py,25,function,"Basic loop to train a model. Calls `train_step_fn` in a loop to train a model. The function is called as: ```python train_step_fn(session, *args, **kwargs) ``` It is passed a `tf.compat.v1.Session` in addition to `args` and `kwargs`. The function typically runs one training step in the session. Args: supervisor: `tf.compat.v1.train.Supervisor` to run the training services. train_step_fn: Callable to execute one training step. Called repeatedly as `train_step_fn(session, *args **kwargs)`. args: Optional positional arguments passed to `train_step_fn`. kwargs: Optional keyword arguments passed to `train_step_fn`. master: Master to use to create the training session. Defaults to `""""` which causes the session to be created in the local process." 12059,_test_dir,tensorflow/tensorflow/python/training/basic_loops_test.py,31,function, 12060,BasicTrainLoopTest,tensorflow/tensorflow/python/training/basic_loops_test.py,38,class, 12061,_HookTimer,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,54,class,"Base timer for determining when Hooks should trigger. Should not be instantiated directly." 12062,SecondOrStepTimer,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,91,class,"Timer that triggers at most once every N seconds or once every N steps. This symbol is also exported to v2 in tf.estimator namespace. See https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py" 12063,NeverTriggerTimer,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,158,class,Timer that never triggers. 12064,LoggingTensorHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,174,class,"Prints the given tensors every N local steps, every N seconds, or at end. The tensors will be printed to the log, with `INFO` severity. If you are not seeing the logs, you might want to add the following line after your imports: ```python tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) ``` Note that if `at_end` is True, `tensors` should not include any tensor whose evaluation produces a side effect such as consuming additional inputs." 12065,get_or_create_steps_per_run_variable,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,278,function,"Gets or creates the steps_per_run variable. In Estimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each device program execution and before the next execution. The purpose of using a variable, rather than a constant, is to allow Estimator adapt the device training iterations according to the final steps specified by users. For example, if the user sets the steps_per_run as 4 and steps as 10 in Estimator.train(), the steps_per_run variable will have the following value before each training run. - 1-st execution: steps_per_run = 4 - 2-nd execution: steps_per_run = 4 - 3-rd execution: steps_per_run = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi steps_per_run variables were found." 12066,_MultiStepStopAtStepHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,325,class,Hook that requests stop at a specified step. 12067,StopAtStepHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,390,class,Hook that requests stop at a specified step. 12068,CheckpointSaverListener,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,446,class,"Interface for listeners that take action before or after checkpoint save. `CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is triggered, and provides callbacks at the following points: - before using the session - before each call to `Saver.save()` - after each call to `Saver.save()` - at the end of session To use a listener, implement a class and pass the listener to a `CheckpointSaverHook`, as in this example: ```python class ExampleCheckpointSaverListener(CheckpointSaverListener): def begin(self): # You can add ops to the graph here. print('Starting the session.') self.your_tensor = ... def before_save(self, session, global_step_value): print('About to write a checkpoint') def after_save(self, session, global_step_value): print('Done writing checkpoint.') if decided_to_stop_training(): return True def end(self, session, global_step_value): print('Done with the session.') ... listener = ExampleCheckpointSaverListener() saver_hook = tf.estimator.CheckpointSaverHook( checkpoint_dir, listeners=[listener]) with tf.compat.v1.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]): ... ``` A `CheckpointSaverListener` may simply take some action after every checkpoint save. It is also possible for the listener to use its own schedule to act less frequently, e.g. based on global_step_value. In this case, implementors should implement the `end()` method to handle actions related to the last checkpoint save. But the listener should not act twice if `after_save()` already handled this last checkpoint save. A `CheckpointSaverListener` can request training to be stopped, by returning True in `after_save`. Please note that, in replicated distributed training setting, only `chief` should use this behavior. Otherwise each worker will do their own evaluation, which may be wasteful of resources." 12069,CheckpointSaverHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,513,class,Saves checkpoints every N steps or seconds. 12070,StepCounterHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,660,class,Hook that counts steps per second. 12071,NanLossDuringTrainingError,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,740,class, 12072,NanTensorHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,747,class,"Monitors the loss tensor and stops training if loss is NaN. Can either fail with exception or just stop training." 12073,SummarySaverHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,779,class,Saves summaries every N steps. 12074,GlobalStepWaiterHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,888,class,"Delays execution until global step reaches `wait_until_step`. This hook delays execution until global step reaches to `wait_until_step`. It is used to gradually start workers in distributed settings. One example usage would be setting `wait_until_step=int(K*log(task_id+1))` assuming that task_id=0 is the chief." 12075,FinalOpsHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,937,class,A hook which evaluates `Tensors` at the end of a session. 12076,FeedFnHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,981,class,Runs `feed_fn` and sets the `feed_dict` accordingly. 12077,ProfilerHook,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,999,class,"Captures CPU/GPU profiling information every N steps or seconds. This produces files called ""timeline-.json"", which are in Chrome Trace format. For more information see: https://github.com/catapult-project/catapult/blob/master/tracing/README.md" 12078,_as_graph_element,tensorflow/tensorflow/python/training/basic_session_run_hooks.py,1083,function,Retrieves Graph element. 12079,MockCheckpointSaverListener,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,59,class, 12080,SecondOrStepTimerTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,92,class, 12081,StopAtStepTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,144,class, 12082,LoggingTensorHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,218,class, 12083,CheckpointSaverHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,386,class, 12084,CheckpointSaverHookMultiStepTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,822,class, 12085,ResourceCheckpointSaverHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,915,class, 12086,StepCounterHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,958,class, 12087,SummarySaverHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,1166,class, 12088,GlobalStepWaiterHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,1331,class, 12089,FinalOpsHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,1383,class, 12090,ResourceSummarySaverHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,1447,class, 12091,FeedFnHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,1496,class, 12092,ProfilerHookTest,tensorflow/tensorflow/python/training/basic_session_run_hooks_test.py,1509,class, 12093,_evaluate,tensorflow/tensorflow/python/training/checkpoint_management.py,43,function,Returns the numpy value of a tensor. 12094,_GetCheckpointFilename,tensorflow/tensorflow/python/training/checkpoint_management.py,50,function,"Returns a filename for storing the CheckpointState. Args: save_dir: The directory for saving and restoring checkpoints. latest_filename: Name of the file in 'save_dir' that is used to store the CheckpointState. Returns: The path of the file that contains the CheckpointState proto." 12095,generate_checkpoint_state_proto,tensorflow/tensorflow/python/training/checkpoint_management.py,67,function,"Generates a checkpoint state proto. Args: save_dir: Directory where the model was saved. model_checkpoint_path: The checkpoint file. all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted checkpoints, sorted from oldest to newest. If this is a non-empty list, the last element must be equal to model_checkpoint_path. These paths are also saved in the CheckpointState proto. all_model_checkpoint_timestamps: A list of floats, indicating the number of seconds since the Epoch when each checkpoint was generated. last_preserved_timestamp: A float, indicating the number of seconds since the Epoch when the last preserved checkpoint was written, e.g. due to a `keep_checkpoint_every_n_hours` parameter (see `tf.train.CheckpointManager` for an implementation). Returns: CheckpointState proto with model_checkpoint_path and all_model_checkpoint_paths updated to either absolute paths or relative paths to the current save_dir. Raises: ValueError: If `all_model_checkpoint_timestamps` was provided but its length does not match `all_model_checkpoint_paths`." 12096,update_checkpoint_state,tensorflow/tensorflow/python/training/checkpoint_management.py,136,function,"Updates the content of the 'checkpoint' file. This updates the checkpoint file containing a CheckpointState proto. Args: save_dir: Directory where the model was saved. model_checkpoint_path: The checkpoint file. all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted checkpoints, sorted from oldest to newest. If this is a non-empty list, the last element must be equal to model_checkpoint_path. These paths are also saved in the CheckpointState proto. latest_filename: Optional name of the checkpoint file. Default to 'checkpoint'. all_model_checkpoint_timestamps: Optional list of timestamps (floats, seconds since the Epoch) indicating when the checkpoints in `all_model_checkpoint_paths` were created. last_preserved_timestamp: A float, indicating the number of seconds since the Epoch when the last preserved checkpoint was written, e.g. due to a `keep_checkpoint_every_n_hours` parameter (see `tf.train.CheckpointManager` for an implementation). Raises: RuntimeError: If any of the model checkpoint paths conflict with the file containing CheckpointSate." 12097,update_checkpoint_state_internal,tensorflow/tensorflow/python/training/checkpoint_management.py,177,function,"Updates the content of the 'checkpoint' file. This updates the checkpoint file containing a CheckpointState proto. Args: save_dir: Directory where the model was saved. model_checkpoint_path: The checkpoint file. all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted checkpoints, sorted from oldest to newest. If this is a non-empty list, the last element must be equal to model_checkpoint_path. These paths are also saved in the CheckpointState proto. latest_filename: Optional name of the checkpoint file. Default to 'checkpoint'. save_relative_paths: If `True`, will write relative paths to the checkpoint state file. all_model_checkpoint_timestamps: Optional list of timestamps (floats, seconds since the Epoch) indicating when the checkpoints in `all_model_checkpoint_paths` were created. last_preserved_timestamp: A float, indicating the number of seconds since the Epoch when the last preserved checkpoint was written, e.g. due to a `keep_checkpoint_every_n_hours` parameter (see `tf.train.CheckpointManager` for an implementation). Raises: RuntimeError: If any of the model checkpoint paths conflict with the file containing CheckpointSate." 12098,get_checkpoint_state,tensorflow/tensorflow/python/training/checkpoint_management.py,252,function,"Returns CheckpointState proto from the ""checkpoint"" file. If the ""checkpoint"" file contains a valid CheckpointState proto, returns it. Args: checkpoint_dir: The directory of checkpoints. latest_filename: Optional name of the checkpoint file. Default to 'checkpoint'. Returns: A CheckpointState if the state was available, None otherwise. Raises: ValueError: if the checkpoint read doesn't have model_checkpoint_path set." 12099,_prefix_to_checkpoint_path,tensorflow/tensorflow/python/training/checkpoint_management.py,308,function,"Returns the pathname of a checkpoint file, given the checkpoint prefix. For V1 checkpoint, simply returns the prefix itself (the data file). For V2, returns the pathname to the index file. Args: prefix: a string, the prefix of a checkpoint. format_version: the checkpoint format version that corresponds to the prefix. Returns: The pathname of a checkpoint file, taking into account the checkpoint format version." 12100,latest_checkpoint,tensorflow/tensorflow/python/training/checkpoint_management.py,328,function,"Finds the filename of latest saved checkpoint file. Gets the checkpoint state given the provided checkpoint_dir and looks for a corresponding TensorFlow 2 (preferred) or TensorFlow 1.x checkpoint path. The latest_filename argument is only applicable if you are saving checkpoint using `v1.train.Saver.save` See the [Training Checkpoints Guide](https://www.tensorflow.org/guide/checkpoint) for more details and examples.` Args: checkpoint_dir: Directory where the variables were saved. latest_filename: Optional name for the protocol buffer file that contains the list of most recent checkpoint filenames. See the corresponding argument to `v1.train.Saver.save`. Returns: The full path to the latest checkpoint or `None` if no checkpoint was found." 12101,checkpoint_exists_internal,tensorflow/tensorflow/python/training/checkpoint_management.py,367,function,"Checks whether a V1 or V2 checkpoint exists with the specified prefix. This is an internal function to check if a checkpoint exists, since it takes into account the naming difference between V1 and V2 formats. Args: checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. Returns: A bool, true if a checkpoint referred to by `checkpoint_prefix` exists." 12102,checkpoint_exists,tensorflow/tensorflow/python/training/checkpoint_management.py,395,function,"Checks whether a V1 or V2 checkpoint exists with the specified prefix. This is the recommended way to check if a checkpoint exists, since it takes into account the naming difference between V1 and V2 formats. Args: checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. Returns: A bool, true if a checkpoint referred to by `checkpoint_prefix` exists." 12103,get_checkpoint_mtimes,tensorflow/tensorflow/python/training/checkpoint_management.py,417,function,"Returns the mtimes (modification timestamps) of the checkpoints. Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files exist, collect their mtime. Both V2 and V1 checkpoints are considered, in that priority. This is the recommended way to get the mtimes, since it takes into account the naming difference between V1 and V2 formats. Note: If not all checkpoints exist, the length of the returned mtimes list will be smaller than the length of `checkpoint_prefixes` list, so mapping checkpoints to corresponding mtimes will not be possible. Args: checkpoint_prefixes: a list of checkpoint paths, typically the results of `Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. Returns: A list of mtimes (in microseconds) of the found checkpoints." 12104,remove_checkpoint,tensorflow/tensorflow/python/training/checkpoint_management.py,463,function,"Removes a checkpoint given by `checkpoint_prefix`. Args: checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or V1/V2. checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to `SaverDef.V2`. meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'." 12105,_delete_file_if_exists,tensorflow/tensorflow/python/training/checkpoint_management.py,487,function,Deletes files matching `filespec`. 12106,meta_graph_filename,tensorflow/tensorflow/python/training/checkpoint_management.py,493,function,"Returns the meta graph filename. Args: checkpoint_filename: Name of the checkpoint file. meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'. Returns: MetaGraph file name." 12107,CheckpointManager,tensorflow/tensorflow/python/training/checkpoint_management.py,513,class,"Manages multiple checkpoints by keeping some and deleting unneeded ones. Example usage: ```python import tensorflow as tf checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) manager = tf.train.CheckpointManager( checkpoint, directory=""/tmp/model"", max_to_keep=5) status = checkpoint.restore(manager.latest_checkpoint) while True: # train manager.save() ``` `CheckpointManager` preserves its own state across instantiations (see the `__init__` documentation for details). Only one should be active in a particular directory at a time." 12108,LatestCheckpointWithRelativePaths,tensorflow/tensorflow/python/training/checkpoint_management_test.py,44,class, 12109,CheckpointStateTest,tensorflow/tensorflow/python/training/checkpoint_management_test.py,150,class, 12110,SaverUtilsTest,tensorflow/tensorflow/python/training/checkpoint_management_test.py,266,class, 12111,CheckpointManagerTest,tensorflow/tensorflow/python/training/checkpoint_management_test.py,325,class, 12112,_load_and_remap_matrix,tensorflow/tensorflow/python/training/checkpoint_ops.py,33,function,"Loads a 2-D (matrix) `Tensor` from checkpoint. Generates 1D-remappings for rows and columns using the `GenerateVocabRemapping` op, and initializes any anticipated values with the provided initializer. Then, uses the `LoadAndRemapMatrix` op to create a matrix that loads existing values from the checkpoint, while filling out ""missing"" values with the newly initialized values. See contrib/framework/ops/checkpoint_ops.cc for more information on the wrapped functionality (LoadAndRemapMatrix). This wrapper can be used to perform only row remapping or only col remapping. If only row remapping is desired, {new,old}_col_vocab_file should be `None`, and vice versa for column remapping. NOTE: This only supports div-partitioning the vocabulary on the 1st dimension (row axis) via `new_row_vocab_offset`. Args: ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from which the old matrix `Tensor` will be loaded. old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. new_row_vocab_offset: A 0-indexed integer representing what line to start reading at in the new row vocabulary. Used for partitioned variables. num_rows_to_load: Number of rows to load for the new vocabulary (note: to support variable partitioning and partial loading, this does not need to be the same as the number of entries in `new_row_vocab_file`). new_col_vocab_size: Number of columns to load - should be the same as the number of entries in `new_col_vocab_file`, since we don't support partitioning along the column axis. initializer: Callable initializer function that accepts a 1-D tensor as the arg to specify the shape of the returned tensor. Used to initialize missing values. old_row_vocab_size: The number of entries to consider in the old vocabulary. With the default value of -1, the entire old row vocabulary file will be used. Otherwise, only the first `old_row_vocab_size` entries will be considered for remapping.Must be smaller than the length of `old_row_vocab_file`. NOTE: we do not provide an equivalent `old_col_vocab_size` for classes. old_row_vocab_file: A scalar `Tensor` of type `string` containing the path to the old row vocabulary file. Can be None, which represents no remapping on the row axis. new_row_vocab_file: A scalar `Tensor` of type `string` containing the path to the new row vocabulary file. Can be None, which represents no remapping on the row axis - in which case, `new_row_vocab_offset` and `num_rows_to_load` work under the assumption that the new row vocab is the same as the old row vocab. old_col_vocab_file: A scalar `Tensor` of type `string` containing the path to the old column vocabulary file. Can be None, which represents no remapping on the column axis. new_col_vocab_file: A scalar `Tensor` of type `string` containing the path to the new column vocabulary file. Can be None, which represents no remapping on the column axis - in which case, `new_col_vocab_size` works under the assumption that the new col vocab is the same as the old col vocab. num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows to append. Must be >= 0. num_col_oov_buckets: `int` specifying the number of out-of-vocabulary columns to append. Must be >= 0. max_rows_in_memory: `int` specifying the maximum number of rows to load from the checkpoint at once. If less than or equal to 0, the entire matrix will be loaded into memory. Setting this arg trades increased disk reads for lower memory usage. Returns: A Tensor of shape `[num_rows_to_load + num_row_oov_buckets, new_col_vocab_size + num_col_oov_buckets]`, with values loaded from the specified tensor in the checkpoint, and any missing or OOV values initialized with the given `initializer`. Raises: ValueError: If `num_row_oov_buckets` or `num_col_oov_buckets` < 0. ValueError: If either `old_row_vocab_file` or `new_row_vocab_file` is provided, while the other is not. Same for `old_col_vocab_file` and `new_col_vocab_file`. ValueError: If neither row vocabs or col vocabs are provided." 12113,_load_and_remap_matrix_initializer,tensorflow/tensorflow/python/training/checkpoint_ops.py,206,function,"Returns a var initializer for loading and remapping a 2-D (matrix) tensor. The returned initializer loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint at `ckpt_path`. It will reorder the rows/columns according to the specified vocab files and append additional out-of-vocabulary rows/columns according to the number of OOV buckets. The format of the file at the `{old,new}_{row,col}_vocab_file` path should be a text file, with each line containing a single entity within the vocabulary. Let the function `line_of(f, ""x"")` return the 0-indexed line number of the entity ""x"" in file f, and the function `entity_at(f, i)` return the entity at line i of file f. Then, row i of the new output matrix will be taken from row `line_of(old_row_vocab_file, entity_at(new_row_vocab_file, i))` of the old matrix. If any entity in `new_row_vocab_file` is not found in `old_row_vocab_file`, that row is considered a ""missing"" row, and its values will be initialized using the `initializer` arg. The same logic also applies for the columns. For example, assuming that: * `old_row_vocab_file` contains ""mercury\nvenus\nmars"" * `new_row_vocab_file` contains ""venus\njupiter\nmercury"" * `old_col_vocab_file` contains ""good\nbetter\nbest"" * `new_col_vocab_file` contains ""good\nbest\nfantastic"" * `initializer` returns the natural numbers `[1, 2, 3, 4, ...]` * `w(i, j)` represents the value from row i, column j of the old matrix Then the new output matrix will look like: `[[w(1, 0), w(1, 2), 1], [2, 3, 4], [w(0, 0), w(0, 2), 5]]` If we further specify that: * `num_row_oov_buckets` == 2 * `num_col_oov_buckets` == 1 Then the new output matrix will look like: `[[w(1, 0), w(1, 2), 1, 12], [2, 3, 4, 13], [w(0, 0), w(0, 2), 5, 14], [6, 7, 8, 15], [9, 10, 11, 16]]` If `{old,new}_row_vocab_file` are None, we assume that the old and new row vocab files are the same, and no row remapping is done. If `{old,new}_col_vocab_file` are None, we assume that the old and new column vocab files are the same, and no column remapping is done. The returned initializer only supports div-partitioning along the row axis. It does not support partitioning along the column axis (as this is not common in practice) or mod-partitioning. NOTE: When this is used to warm-start variables, client code should use `tf.lookup.index_table_from_tensor()` like contrib/layers/python/layers/feature_column.py does, as opposed to `tf.feature_to_id()` - in order to ensure the underlying lookup tables are the same. Args: ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from which the old matrix `Tensor` will be loaded. old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. new_row_vocab_size: `int` specifying the number of entries in `new_row_vocab_file`. If no row remapping is needed (no row vocab provided), this should be equal to the number of rows to load from the old matrix (which can theoretically be smaller than the number of rows in the old matrix). new_col_vocab_size: `int` specifying the number of entries in `new_col_vocab_file`. If no column remapping is needed (no column vocab provided), this should be equal to the number of columns in the old matrix. old_row_vocab_size: The number of entries to consider in the old vocabulary. With the default value of -1, the entire old row vocabulary file will be used. Otherwise, only the first `old_row_vocab_size` entries will be considered for remapping.Must be smaller than the length of `old_row_vocab_file`. NOTE: we do not provide an equivalent `old_col_vocab_size` for classes. old_row_vocab_file: A scalar `Tensor` of type `string` containing the path to the old row vocabulary file. Can be None, which represents no remapping on the row axis. new_row_vocab_file: A scalar `Tensor` of type `string` containing the path to the new row vocabulary file. Can be None, which represents no remapping on the row axis. old_col_vocab_file: A scalar `Tensor` of type `string` containing the path to the old column vocabulary file. Can be None, which represents no remapping on the column axis. new_col_vocab_file: A scalar `Tensor` of type `string` containing the path to the new column vocabulary file. Can be None, which represents no remapping on the column axis. num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows to append. Must be >= 0. num_col_oov_buckets: `int` specifying the number of out-of-vocabulary columns to append. Must be >= 0. initializer: Initializer function to initialize missing values. Accepts a 1-D tensor as the arg to specify the shape of the returned tensor. If `None`, defaults to using `zeros_initializer()`. max_rows_in_memory: `int` specifying the maximum number of rows to load from the checkpoint at once. If less than or equal to 0, the entire matrix will be loaded into memory. Setting this arg trades increased disk reads for lower memory usage. Returns: A variable initializer function that should be used to initialize a (potentially partitioned) `Variable` whose complete shape is `[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size + num_col_oov_buckets]`. Raises: TypeError: If `initializer` is specified but not callable." 12114,_load_embedding_initializer,tensorflow/tensorflow/python/training/checkpoint_ops.py,419,function,"Returns a variable initializer for loading pre-trained embeddings. Wrapper around `load_and_remap_matrix_initializer()` specialized for loading embedding weights and remapping according to the provided vocab files. See docs for `load_and_remap_matrix_initializer()` for more details. NOTE: Only for use with div-partitioned variables / vocabularies. Args: ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from which the old matrix `Tensor` will be loaded. embedding_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. new_vocab_size: Number of entries in the new vocab. embedding_dim: `int` specifying the dimension of the embedding vectors from the checkpoint. Must match the number of columns in the old embedding matrix. old_vocab_file: A scalar `Tensor` of type `string` containing the path to the old vocabulary file. new_vocab_file: A scalar `Tensor` of type `string` containing the path to the new vocabulary file. old_vocab_size: The number of entries to consider in the old vocabulary. With the default value of -1, the entire old row vocabulary file will be used. Otherwise, only the first `old_vocab_size` entries will be considered for remapping.Must be smaller than the length of `old_row_vocab_file`. num_oov_buckets: `int` specifying the number of out-of-vocabulary buckets to use. Must be >= 0. initializer: Initializer function that accepts a 1-D tensor as the arg to specify the shape of the returned tensor. If `None`, defaults to using `truncated_normal_initializer()`. max_rows_in_memory: `int` specifying the maximum number of rows to load from the checkpoint at once. If less than or equal to 0, the entire matrix will be loaded into memory. Setting this arg trades increased disk reads for lower memory usage. Returns: A variable initializer function." 12115,LoadAndRemapWrappersTest,tensorflow/tensorflow/python/training/checkpoint_ops_test.py,40,class,Tests for the functionality of the Python wrappers. 12116,load_checkpoint,tensorflow/tensorflow/python/training/checkpoint_utils.py,46,function,"Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`. If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints, reader for the latest checkpoint is returned. Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint file. Returns: `CheckpointReader` object. Raises: ValueError: If `ckpt_dir_or_file` resolves to a directory with no checkpoints." 12117,load_variable,tensorflow/tensorflow/python/training/checkpoint_utils.py,71,function,"Returns the tensor value of the given variable in the checkpoint. Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. name: Name of the variable to return. Returns: A numpy `ndarray` with a copy of the value of this variable." 12118,list_variables,tensorflow/tensorflow/python/training/checkpoint_utils.py,89,function,"Lists the checkpoint keys and shapes of variables in a checkpoint. Checkpoint keys are paths in a checkpoint graph. Example usage: ```python import tensorflow as tf import os ckpt_directory = ""/tmp/training_checkpoints/ckpt"" ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model) manager = tf.train.CheckpointManager(ckpt, ckpt_directory, max_to_keep=3) train_and_checkpoint(model, manager) tf.train.list_variables(manager.latest_checkpoint) ``` Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. Returns: List of tuples `(key, shape)`." 12119,wait_for_new_checkpoint,tensorflow/tensorflow/python/training/checkpoint_utils.py,121,function,"Waits until a new checkpoint file is found. Args: checkpoint_dir: The directory in which checkpoints are saved. last_checkpoint: The last checkpoint path used or `None` if we're expecting a checkpoint for the first time. seconds_to_sleep: The number of seconds to sleep for before looking for a new checkpoint. timeout: The maximum number of seconds to wait. If left as `None`, then the process will wait indefinitely. Returns: a new checkpoint path, or None if the timeout was reached." 12120,checkpoints_iterator,tensorflow/tensorflow/python/training/checkpoint_utils.py,153,function,"Continuously yield new checkpoint files as they appear. The iterator only checks for new checkpoints when control flow has been reverted to it. This means it can miss checkpoints if your code takes longer to run between iterations than `min_interval_secs` or the interval at which new checkpoints are written. The `timeout` argument is the maximum number of seconds to block waiting for a new checkpoint. It is used in combination with the `timeout_fn` as follows: * If the timeout expires and no `timeout_fn` was specified, the iterator stops yielding. * If a `timeout_fn` was specified, that function is called and if it returns a true boolean value the iterator stops yielding. * If the function returns a false boolean value then the iterator resumes the wait for new checkpoints. At this point the timeout logic applies again. This behavior gives control to callers on what to do if checkpoints do not come fast enough or stop being generated. For example, if callers have a way to detect that the training has stopped and know that no new checkpoints will be generated, they can provide a `timeout_fn` that returns `True` when the training has stopped. If they know that the training is still going on they return `False` instead. Args: checkpoint_dir: The directory in which checkpoints are saved. min_interval_secs: The minimum number of seconds between yielding checkpoints. timeout: The maximum number of seconds to wait between checkpoints. If left as `None`, then the process will wait indefinitely. timeout_fn: Optional function to call after a timeout. If the function returns True, then it means that no new checkpoints will be generated and the iterator will exit. The function is called with no arguments. Yields: String paths to latest checkpoint files as they arrive." 12121,init_from_checkpoint,tensorflow/tensorflow/python/training/checkpoint_utils.py,219,function,"Replaces `tf.Variable` initializers so they load from a checkpoint file. Values are not loaded immediately, but when the initializer is run (typically by running a `tf.compat.v1.global_variables_initializer` op). Note: This overrides default initialization ops of specified variables and redefines dtype. Assignment map supports following syntax: * `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in current `scope_name` from `checkpoint_scope_name` with matching tensor names. * `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` - will initialize `scope_name/variable_name` variable from `checkpoint_scope_name/some_other_variable`. * `'scope_variable_name': variable` - will initialize given `tf.Variable` object with tensor 'scope_variable_name' from the checkpoint. * `'scope_variable_name': list(variable)` - will initialize list of partitioned variables with tensor 'scope_variable_name' from the checkpoint. * `'/': 'scope_name/'` - will load all variables in current `scope_name` from checkpoint's root (e.g. no scope). Supports loading into partitioned variables, which are represented as `'/part_'`. Example: ```python # Say, '/tmp/model.ckpt' has the following tensors: # -- name='old_scope_1/var1', shape=[20, 2] # -- name='old_scope_1/var2', shape=[50, 4] # -- name='old_scope_2/var3', shape=[100, 100] # Create new model's variables with tf.compat.v1.variable_scope('new_scope_1'): var1 = tf.compat.v1.get_variable('var1', shape=[20, 2], initializer=tf.compat.v1.zeros_initializer()) with tf.compat.v1.variable_scope('new_scope_2'): var2 = tf.compat.v1.get_variable('var2', shape=[50, 4], initializer=tf.compat.v1.zeros_initializer()) # Partition into 5 variables along the first axis. var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100], initializer=tf.compat.v1.zeros_initializer(), partitioner=lambda shape, dtype: [5, 1]) # Initialize all variables in `new_scope_1` from `old_scope_1`. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'}) # Use names to specify which variables to initialize from checkpoint. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/var1': 'new_scope_1/var1', 'old_scope_1/var2': 'new_scope_2/var2'}) # Or use tf.Variable objects to identify what to initialize. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/var1': var1, 'old_scope_1/var2': var2}) # Initialize partitioned variables using variable's name init_from_checkpoint('/tmp/model.ckpt', {'old_scope_2/var3': 'new_scope_2/var3'}) # Or specify the list of tf.Variable objects. init_from_checkpoint('/tmp/model.ckpt', {'old_scope_2/var3': var3._get_variable_list()}) ``` Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. assignment_map: Dict, where keys are names of the variables in the checkpoint and values are current variables or names of current variables (in default graph). Raises: ValueError: If missing variables in current graph, or if missing checkpoints or tensors in checkpoints." 12122,_init_from_checkpoint,tensorflow/tensorflow/python/training/checkpoint_utils.py,309,function,See `init_from_checkpoint` for documentation. 12123,_get_checkpoint_filename,tensorflow/tensorflow/python/training/checkpoint_utils.py,396,function,Returns checkpoint filename given directory or specific checkpoint file. 12124,_set_checkpoint_initializer,tensorflow/tensorflow/python/training/checkpoint_utils.py,403,function,"Overrides given variable's initialization op. Sets variable initializer to assign op that initializes variable from tensor's value in the checkpoint. Args: variable: `tf.Variable` object. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. slice_spec: Slice specification for loading partitioned tensors. name: Name of the operation." 12125,_set_variable_or_list_initializer,tensorflow/tensorflow/python/training/checkpoint_utils.py,445,function,"Overrides initialization op of given variable or list of variables. Calls `_set_checkpoint_initializer` for each variable in the given list of variables. Args: variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects. ckpt_file: string, full path of the checkpoint. tensor_name: Name of the tensor to load from the checkpoint. Raises: ValueError: if all objects in `variable_or_list` are not partitions of the same large variable." 12126,_is_variable,tensorflow/tensorflow/python/training/checkpoint_utils.py,476,function, 12127,_collect_partitioned_variable,tensorflow/tensorflow/python/training/checkpoint_utils.py,481,function,Returns list of `tf.Variable` that comprise the partitioned variable. 12128,_create_checkpoints,tensorflow/tensorflow/python/training/checkpoint_utils_test.py,44,function, 12129,_create_partition_checkpoints,tensorflow/tensorflow/python/training/checkpoint_utils_test.py,63,function, 12130,CheckpointsTest,tensorflow/tensorflow/python/training/checkpoint_utils_test.py,84,class, 12131,CheckpointIteratorTest,tensorflow/tensorflow/python/training/checkpoint_utils_test.py,400,class, 12132,WaitForNewCheckpointTest,tensorflow/tensorflow/python/training/checkpoint_utils_test.py,474,class, 12133,Coordinator,tensorflow/tensorflow/python/training/coordinator.py,34,class,"A coordinator for threads. This class implements a simple mechanism to coordinate the termination of a set of threads. #### Usage: ```python # Create a coordinator. coord = Coordinator() # Start a number of threads, passing the coordinator to each of them. ...start thread 1...(coord, ...) ...start thread N...(coord, ...) # Wait for all the threads to terminate. coord.join(threads) ``` Any of the threads can call `coord.request_stop()` to ask for all the threads to stop. To cooperate with the requests, each thread must check for `coord.should_stop()` on a regular basis. `coord.should_stop()` returns `True` as soon as `coord.request_stop()` has been called. A typical thread running with a coordinator will do something like: ```python while not coord.should_stop(): ...do some work... ``` #### Exception handling: A thread can report an exception to the coordinator as part of the `request_stop()` call. The exception will be re-raised from the `coord.join()` call. Thread code: ```python try: while not coord.should_stop(): ...do some work... except Exception as e: coord.request_stop(e) ``` Main code: ```python try: ... coord = Coordinator() # Start a number of threads, passing the coordinator to each of them. ...start thread 1...(coord, ...) ...start thread N...(coord, ...) # Wait for all the threads to terminate. coord.join(threads) except Exception as e: ...exception that was passed to coord.request_stop() ``` To simplify the thread implementation, the Coordinator provides a context handler `stop_on_exception()` that automatically requests a stop if an exception is raised. Using the context handler the thread code above can be written as: ```python with coord.stop_on_exception(): while not coord.should_stop(): ...do some work... ``` #### Grace period for stopping: After a thread has called `coord.request_stop()` the other threads have a fixed time to stop, this is called the 'stop grace period' and defaults to 2 minutes. If any of the threads is still alive after the grace period expires `coord.join()` raises a RuntimeError reporting the laggards. ```python try: ... coord = Coordinator() # Start a number of threads, passing the coordinator to each of them. ...start thread 1...(coord, ...) ...start thread N...(coord, ...) # Wait for all the threads to terminate, give them 10s grace period coord.join(threads, stop_grace_period_secs=10) except RuntimeError: ...one of the threads took more than 10s to stop after request_stop() ...was called. except Exception: ...exception that was passed to coord.request_stop() ```" 12134,LooperThread,tensorflow/tensorflow/python/training/coordinator.py,412,class,"A thread that runs code repeatedly, optionally on a timer. This thread class is intended to be used with a `Coordinator`. It repeatedly runs code specified either as `target` and `args` or by the `run_loop()` method. Before each run the thread checks if the coordinator has requested stop. In that case the looper thread terminates immediately. If the code being run raises an exception, that exception is reported to the coordinator and the thread terminates. The coordinator will then request all the other threads it coordinates to stop. You typically pass looper threads to the supervisor `Join()` method." 12135,StopOnEvent,tensorflow/tensorflow/python/training/coordinator_test.py,30,function, 12136,RaiseOnEvent,tensorflow/tensorflow/python/training/coordinator_test.py,36,function, 12137,RaiseOnEventUsingContextHandler,tensorflow/tensorflow/python/training/coordinator_test.py,50,function, 12138,SleepABit,tensorflow/tensorflow/python/training/coordinator_test.py,58,function, 12139,WaitForThreadsToRegister,tensorflow/tensorflow/python/training/coordinator_test.py,64,function, 12140,CoordinatorTest,tensorflow/tensorflow/python/training/coordinator_test.py,72,class, 12141,_StopAt0,tensorflow/tensorflow/python/training/coordinator_test.py,337,function, 12142,LooperTest,tensorflow/tensorflow/python/training/coordinator_test.py,344,class, 12143,_RoundRobinStrategy,tensorflow/tensorflow/python/training/device_setter.py,40,class,"Returns the next ps task index for placement in round-robin order. This class is not to be used directly by users. See instead `replica_device_setter()` below." 12144,_ReplicaDeviceChooser,tensorflow/tensorflow/python/training/device_setter.py,71,class,"Class to choose devices for Ops in a replicated training setup. This class is not to be used directly by users. See instead `replica_device_setter()` below." 12145,replica_device_setter,tensorflow/tensorflow/python/training/device_setter.py,137,function,"Return a `device function` to use when building a Graph for replicas. Device Functions are used in `with tf.device(device_function):` statement to automatically assign devices to `Operation` objects as they are constructed, Device constraints are added from the inner-most context first, working outwards. The merging behavior adds constraints to fields that are yet unset by a more inner context. Currently the fields are (job, task, cpu/gpu). If `cluster` is `None`, and `ps_tasks` is 0, the returned function is a no-op. Otherwise, the value of `ps_tasks` is derived from `cluster`. By default, only Variable ops are placed on ps tasks, and the placement strategy is round-robin over all ps tasks. A custom `ps_strategy` may be used to do more intelligent placement, such as `tf.contrib.training.GreedyLoadBalancingStrategy`. For example, ```python # To build a cluster with two ps jobs on hosts ps0 and ps1, and 3 worker # jobs on hosts worker0, worker1 and worker2. cluster_spec = { ""ps"": [""ps0:2222"", ""ps1:2222""], ""worker"": [""worker0:2222"", ""worker1:2222"", ""worker2:2222""]} with tf.device(tf.compat.v1.train.replica_device_setter(cluster=cluster_spec)): # Build your graph v1 = tf.Variable(...) # assigned to /job:ps/task:0 v2 = tf.Variable(...) # assigned to /job:ps/task:1 v3 = tf.Variable(...) # assigned to /job:ps/task:0 # Run compute ``` Args: ps_tasks: Number of tasks in the `ps` job. Ignored if `cluster` is provided. ps_device: String. Device of the `ps` job. If empty no `ps` job is used. Defaults to `ps`. worker_device: String. Device of the `worker` job. If empty no `worker` job is used. merge_devices: `Boolean`. If `True`, merges or only sets a device if the device constraint is completely unset. merges device specification rather than overriding them. cluster: `ClusterDef` proto or `ClusterSpec`. ps_ops: List of strings representing `Operation` types that need to be placed on `ps` devices. If `None`, defaults to `STANDARD_PS_OPS`. ps_strategy: A callable invoked for every ps `Operation` (i.e. matched by `ps_ops`), that takes the `Operation` and returns the ps task index to use. If `None`, defaults to a round-robin strategy across all `ps` devices. Returns: A function to pass to `tf.device()`. Raises: TypeError if `cluster` is not a dictionary or `ClusterDef` protocol buffer, or if `ps_strategy` is provided but not a callable." 12146,DeviceSetterTest,tensorflow/tensorflow/python/training/device_setter_test.py,30,class, 12147,_get_or_create_eval_step,tensorflow/tensorflow/python/training/evaluation.py,37,function,"Gets or creates the eval step `Tensor`. Returns: A `Tensor` representing a counter for the evaluation step. Raises: ValueError: If multiple `Tensors` have been added to the `tf.GraphKeys.EVAL_STEP` collection." 12148,_get_latest_eval_step_value,tensorflow/tensorflow/python/training/evaluation.py,64,function,"Gets the eval step `Tensor` value after running `update_ops`. Args: update_ops: A list of `Tensors` or a dictionary of names to `Tensors`, which are run before reading the eval step value. Returns: A `Tensor` representing the value for the evaluation step." 12149,_MultiStepStopAfterNEvalsHook,tensorflow/tensorflow/python/training/evaluation.py,81,class,Run hook used by the evaluation routines to run the `eval_ops` N times. 12150,_StopAfterNEvalsHook,tensorflow/tensorflow/python/training/evaluation.py,133,class,Run hook used by the evaluation routines to run the `eval_ops` N times. 12151,_evaluate_once,tensorflow/tensorflow/python/training/evaluation.py,172,function,"Evaluates the model at the given checkpoint path. During a single evaluation, the `eval_ops` is run until the session is interrupted or requested to finish. This is typically requested via a `tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running the requested number of times. Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of `Tensors` or a dictionary from names to `Tensors`. The `final_ops` is evaluated a single time after `eval_ops` has finished running and the fetched values of `final_ops` are returned. If `final_ops` is left as `None`, then `None` is returned. One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record summaries after the `eval_ops` have run. If `eval_ops` is `None`, the summaries run immediately after the model checkpoint has been restored. Note that `evaluate_once` creates a local variable used to track the number of evaluations run via `tf.contrib.training.get_or_create_eval_step`. Consequently, if a custom local init op is provided via a `scaffold`, the caller should ensure that the local init op also initializes the eval step. Args: checkpoint_path: The path to a checkpoint to use for evaluation. master: The BNS address of the TensorFlow master. scaffold: An tf.compat.v1.train.Scaffold instance for initializing variables and restoring variables. Note that `scaffold.init_fn` is used by the function to restore the checkpoint. If you supply a custom init_fn, then it must also take care of restoring the model from its checkpoint. eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to `Tensors`, which is run until the session is requested to stop, commonly done by a `tf.contrib.training.StopAfterNEvalsHook`. feed_dict: The feed dictionary to use when executing the `eval_ops`. final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to `Tensors`. final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`. hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside the evaluation loop. config: An instance of `tf.compat.v1.ConfigProto` that will be used to configure the `Session`. If left as `None`, the default will be used. Returns: The fetched values of `final_ops` or `None` if `final_ops` is `None`." 12152,logistic_classifier,tensorflow/tensorflow/python/training/evaluation_test.py,47,function, 12153,local_variable,tensorflow/tensorflow/python/training/evaluation_test.py,51,function, 12154,EvaluateOnceTest,tensorflow/tensorflow/python/training/evaluation_test.py,60,class, 12155,FtrlOptimizer,tensorflow/tensorflow/python/training/ftrl.py,29,class,"Optimizer that implements the FTRL algorithm. This version has support for both online L2 (McMahan et al., 2013) and shrinkage-type L2, which is the addition of an L2 penalty to the loss function. References: Ad-click prediction: [McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200) ([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526))" 12156,FtrlOptimizerTest,tensorflow/tensorflow/python/training/ftrl_test.py,36,class, 12157,GradientDescentOptimizer,tensorflow/tensorflow/python/training/gradient_descent.py,30,class,"Optimizer that implements the gradient descent algorithm. " 12158,GradientDescentOptimizerTest,tensorflow/tensorflow/python/training/gradient_descent_test.py,36,class, 12159,match_filenames_once,tensorflow/tensorflow/python/training/input.py,62,function,"Save the list of files matching pattern, so it is only computed once. NOTE: The order of the files returned is deterministic. Args: pattern: A file pattern (glob), or 1D tensor of file patterns. name: A name for the operations (optional). Returns: A variable that is initialized to the list of files matching the pattern(s)." 12160,limit_epochs,tensorflow/tensorflow/python/training/input.py,85,function,"Returns tensor `num_epochs` times and then raises an `OutOfRange` error. Note: creates local counter `epochs`. Use `local_variables_initializer()` to initialize local variables. Args: tensor: Any `Tensor`. num_epochs: A positive integer (optional). If specified, limits the number of steps the output tensor may be evaluated. name: A name for the operations (optional). Returns: tensor or `OutOfRange`. Raises: ValueError: if `num_epochs` is invalid." 12161,input_producer,tensorflow/tensorflow/python/training/input.py,123,function,"Output the rows of `input_tensor` to a queue for an input pipeline. Note: if `num_epochs` is not `None`, this function creates local counter `epochs`. Use `local_variables_initializer()` to initialize local variables. Args: input_tensor: A tensor with the rows to produce. Must be at least one-dimensional. Must either have a fully-defined shape, or `element_shape` must be defined. element_shape: (Optional.) A `TensorShape` representing the shape of a row of `input_tensor`, if it cannot be inferred. num_epochs: (Optional.) An integer. If specified `input_producer` produces each row of `input_tensor` `num_epochs` times before generating an `OutOfRange` error. If not specified, `input_producer` can cycle through the rows of `input_tensor` an unlimited number of times. shuffle: (Optional.) A boolean. If true, the rows are randomly shuffled within each epoch. seed: (Optional.) An integer. The seed to use if `shuffle` is true. capacity: (Optional.) The capacity of the queue to be used for buffering the input. shared_name: (Optional.) If set, this queue will be shared under the given name across multiple sessions. summary_name: (Optional.) If set, a scalar summary for the current queue size will be generated, using this name as part of the tag. name: (Optional.) A name for queue. cancel_op: (Optional.) Cancel op for the queue Returns: A queue with the output rows. A `QueueRunner` for the queue is added to the current `QUEUE_RUNNER` collection of the current graph. Raises: ValueError: If the shape of the input cannot be inferred from the arguments. RuntimeError: If called with eager execution enabled. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12162,string_input_producer,tensorflow/tensorflow/python/training/input.py,211,function,"Output strings (e.g. filenames) to a queue for an input pipeline. Note: if `num_epochs` is not `None`, this function creates local counter `epochs`. Use `local_variables_initializer()` to initialize local variables. Args: string_tensor: A 1-D string tensor with the strings to produce. num_epochs: An integer (optional). If specified, `string_input_producer` produces each string from `string_tensor` `num_epochs` times before generating an `OutOfRange` error. If not specified, `string_input_producer` can cycle through the strings in `string_tensor` an unlimited number of times. shuffle: Boolean. If true, the strings are randomly shuffled within each epoch. seed: An integer (optional). Seed used if shuffle == True. capacity: An integer. Sets the queue capacity. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. All sessions open to the device which has this queue will be able to access it via the shared_name. Using this in a distributed setting means each name will only be seen by one of the sessions which has access to this operation. name: A name for the operations (optional). cancel_op: Cancel op for the queue (optional). Returns: A queue with the output strings. A `QueueRunner` for the Queue is added to the current `Graph`'s `QUEUE_RUNNER` collection. Raises: ValueError: If the string_tensor is a null Python list. At runtime, will fail with an assertion if string_tensor becomes a null tensor. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12163,range_input_producer,tensorflow/tensorflow/python/training/input.py,285,function,"Produces the integers from 0 to limit-1 in a queue. Note: if `num_epochs` is not `None`, this function creates local counter `epochs`. Use `local_variables_initializer()` to initialize local variables. Args: limit: An int32 scalar tensor. num_epochs: An integer (optional). If specified, `range_input_producer` produces each integer `num_epochs` times before generating an OutOfRange error. If not specified, `range_input_producer` can cycle through the integers an unlimited number of times. shuffle: Boolean. If true, the integers are randomly shuffled within each epoch. seed: An integer (optional). Seed used if shuffle == True. capacity: An integer. Sets the queue capacity. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. name: A name for the operations (optional). Returns: A Queue with the output integers. A `QueueRunner` for the Queue is added to the current `Graph`'s `QUEUE_RUNNER` collection. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12164,slice_input_producer,tensorflow/tensorflow/python/training/input.py,328,function,"Produces a slice of each `Tensor` in `tensor_list`. Implemented using a Queue -- a `QueueRunner` for the Queue is added to the current `Graph`'s `QUEUE_RUNNER` collection. Args: tensor_list: A list of `Tensor` objects. Every `Tensor` in `tensor_list` must have the same size in the first dimension. num_epochs: An integer (optional). If specified, `slice_input_producer` produces each slice `num_epochs` times before generating an `OutOfRange` error. If not specified, `slice_input_producer` can cycle through the slices an unlimited number of times. shuffle: Boolean. If true, the integers are randomly shuffled within each epoch. seed: An integer (optional). Seed used if shuffle == True. capacity: An integer. Sets the queue capacity. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. name: A name for the operations (optional). Returns: A list of tensors, one for each element of `tensor_list`. If the tensor in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output tensor will have shape `[a, b, ..., z]`. Raises: ValueError: if `slice_input_producer` produces nothing from `tensor_list`. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12165,_flatten,tensorflow/tensorflow/python/training/input.py,382,function, 12166,_SparseMetaData,tensorflow/tensorflow/python/training/input.py,386,class,"Store information about the Tensor: Is it sparse?, map_op, and rank." 12167,_as_tensor_list,tensorflow/tensorflow/python/training/input.py,445,function, 12168,_as_tensor_list_list,tensorflow/tensorflow/python/training/input.py,452,function, 12169,_as_original_type,tensorflow/tensorflow/python/training/input.py,466,function, 12170,_store_sparse_tensors,tensorflow/tensorflow/python/training/input.py,478,function,"Store SparseTensors for feeding into batch, etc. If `shared_map_ops` is provided, the underlying `SparseTensorsMap` objects are reused (shared). This argument is useful for, e.g., `batch_join` where multiple enqueue operations write to the same Queue component, and another (dequeue) thread reads from that same location and must then restore the associated `SparseTensor` objects. In this case, the sparse restore must have a single `SparseTensorMap` from which to read out the handles; so a single `SparseTensorMap` must be shared for storing across the multiple enqueue operations. This sharing is performed by calling `_store_sparse_tensors` the first time with `shared_map_ops=None`, and then in subsequent times with this value set to the list of `Operation` objects created in the first call. Args: tensor_list: List of `Tensor` and `SparseTensor` objects. enqueue_many: Python `Boolean`. keep_input: Must be a scalar bool Tensor (not a Python bool). If False, don't store. shared_map_ops: (optional) List of `Operation` objects from a previous call to `_store_sparse_tensors`. If not `None`, the op types should be one of `AddSparseToTensorsMap` or `AddManySparseToTensorsMap` in the locations corresponding to `SparseTensors` in `tensor_list`. Returns: A tuple `(stored_list, sparse_info_list)` where `stored_list` is a list of `Tensor` objects (same length as `tensor_list`) and `sparse_info_list` is a list of the same length of `_SparseMetaData` objects." 12171,_store_sparse_tensors_join,tensorflow/tensorflow/python/training/input.py,580,function,"Store SparseTensors for feeding into batch_join, etc." 12172,_restore_sparse_tensors,tensorflow/tensorflow/python/training/input.py,600,function,"Restore SparseTensors after dequeue in batch, batch_join, etc." 12173,_validate,tensorflow/tensorflow/python/training/input.py,630,function, 12174,_validate_join,tensorflow/tensorflow/python/training/input.py,637,function, 12175,_validate_keep_input,tensorflow/tensorflow/python/training/input.py,645,function,Validate `keep_input` argument to conditional batching functions. 12176,_dtypes,tensorflow/tensorflow/python/training/input.py,659,function, 12177,_merge_shapes,tensorflow/tensorflow/python/training/input.py,670,function, 12178,_shapes,tensorflow/tensorflow/python/training/input.py,681,function,"Calculate and merge the shapes of incoming tensors. Args: tensor_list_list: List of tensor lists. shapes: List of shape tuples corresponding to tensors within the lists. enqueue_many: Boolean describing whether shapes will be enqueued as batches or individual entries. Returns: A list of shapes aggregating shape inference info from `tensor_list_list`, or returning `shapes` if it is not `None`. Raises: ValueError: If any of the inferred shapes in `tensor_list_list` lack a well defined rank." 12179,_select_which_to_enqueue,tensorflow/tensorflow/python/training/input.py,712,function,Select which examples to enqueue based on vector `keep_input`. 12180,_enqueue_join,tensorflow/tensorflow/python/training/input.py,721,function,Enqueue `tensor_list_list` in `queue`. 12181,_enqueue,tensorflow/tensorflow/python/training/input.py,738,function,Enqueue `tensor_list` in `queue`. 12182,_which_queue,tensorflow/tensorflow/python/training/input.py,755,function, 12183,_batch,tensorflow/tensorflow/python/training/input.py,760,function,Helper function for `batch` and `maybe_batch`. 12184,_batch_join,tensorflow/tensorflow/python/training/input.py,800,function,Helper function for `batch_join` and `maybe_batch_join`. 12185,_shuffle_batch,tensorflow/tensorflow/python/training/input.py,835,function,Helper function for `shuffle_batch` and `maybe_shuffle_batch`. 12186,_shuffle_batch_join,tensorflow/tensorflow/python/training/input.py,879,function,Helper function for `shuffle_batch_join` and `maybe_shuffle_batch_join`. 12187,batch,tensorflow/tensorflow/python/training/input.py,929,function,"Creates batches of tensors in `tensors`. The argument `tensors` can be a list or a dictionary of tensors. The value returned by the function will be of the same type as `tensors`. This function is implemented using a queue. A `QueueRunner` for the queue is added to the current `Graph`'s `QUEUE_RUNNER` collection. If `enqueue_many` is `False`, `tensors` is assumed to represent a single example. An input tensor with shape `[x, y, z]` will be output as a tensor with shape `[batch_size, x, y, z]`. If `enqueue_many` is `True`, `tensors` is assumed to represent a batch of examples, where the first dimension is indexed by example, and all members of `tensors` should have the same size in the first dimension. If an input tensor has shape `[*, x, y, z]`, the output will have shape `[batch_size, x, y, z]`. The `capacity` argument controls the how long the prefetching is allowed to grow the queues. The returned operation is a dequeue operation and will throw `tf.errors.OutOfRangeError` if the input queue is exhausted. If this operation is feeding another input queue, its queue runner will catch this exception, however, if this operation is used in your main thread you are responsible for catching this yourself. *N.B.:* If `dynamic_pad` is `False`, you must ensure that either (i) the `shapes` argument is passed, or (ii) all of the tensors in `tensors` must have fully-defined shapes. `ValueError` will be raised if neither of these conditions holds. If `dynamic_pad` is `True`, it is sufficient that the *rank* of the tensors is known, but individual dimensions may have shape `None`. In this case, for each enqueue the dimensions with value `None` may have a variable length; upon dequeue, the output tensors will be padded on the right to the maximum shape of the tensors in the current minibatch. For numbers, this padding takes value 0. For strings, this padding is the empty string. See `PaddingFIFOQueue` for more info. If `allow_smaller_final_batch` is `True`, a smaller batch value than `batch_size` is returned when the queue is closed and there are not enough elements to fill the batch, otherwise the pending elements are discarded. In addition, all output tensors' static shapes, as accessed via the `shape` property will have a first `Dimension` value of `None`, and operations that depend on fixed batch_size would fail. Args: tensors: The list or dictionary of tensors to enqueue. batch_size: The new batch size pulled from the queue. num_threads: The number of threads enqueuing `tensors`. The batching will be nondeterministic if `num_threads > 1`. capacity: An integer. The maximum number of elements in the queue. enqueue_many: Whether each tensor in `tensors` is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for `tensors`. dynamic_pad: Boolean. Allow variable dimensions in input shapes. The given dimensions are padded upon dequeue so that tensors within a batch have the same shapes. allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (Optional). If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the same types as `tensors` (except if the input is a list of one element, then it returns a tensor, not a list). Raises: ValueError: If the `shapes` are not specified, and cannot be inferred from the elements of `tensors`. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12188,maybe_batch,tensorflow/tensorflow/python/training/input.py,1028,function,"Conditionally creates batches of tensors based on `keep_input`. See docstring in `batch` for more details. Args: tensors: The list or dictionary of tensors to enqueue. keep_input: A `bool` Tensor. This tensor controls whether the input is added to the queue or not. If it is a scalar and evaluates `True`, then `tensors` are all added to the queue. If it is a vector and `enqueue_many` is `True`, then each example is added to the queue only if the corresponding value in `keep_input` is `True`. This tensor essentially acts as a filtering mechanism. batch_size: The new batch size pulled from the queue. num_threads: The number of threads enqueuing `tensors`. The batching will be nondeterministic if `num_threads > 1`. capacity: An integer. The maximum number of elements in the queue. enqueue_many: Whether each tensor in `tensors` is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for `tensors`. dynamic_pad: Boolean. Allow variable dimensions in input shapes. The given dimensions are padded upon dequeue so that tensors within a batch have the same shapes. allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (Optional). If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the same types as `tensors`. Raises: ValueError: If the `shapes` are not specified, and cannot be inferred from the elements of `tensors`." 12189,batch_join,tensorflow/tensorflow/python/training/input.py,1085,function,"Runs a list of tensors to fill a queue to create batches of examples. The `tensors_list` argument is a list of tuples of tensors, or a list of dictionaries of tensors. Each element in the list is treated similarly to the `tensors` argument of `tf.compat.v1.train.batch()`. WARNING: This function is nondeterministic, since it starts a separate thread for each tensor. Enqueues a different list of tensors in different threads. Implemented using a queue -- a `QueueRunner` for the queue is added to the current `Graph`'s `QUEUE_RUNNER` collection. `len(tensors_list)` threads will be started, with thread `i` enqueuing the tensors from `tensors_list[i]`. `tensors_list[i1][j]` must match `tensors_list[i2][j]` in type and shape, except in the first dimension if `enqueue_many` is true. If `enqueue_many` is `False`, each `tensors_list[i]` is assumed to represent a single example. An input tensor `x` will be output as a tensor with shape `[batch_size] + x.shape`. If `enqueue_many` is `True`, `tensors_list[i]` is assumed to represent a batch of examples, where the first dimension is indexed by example, and all members of `tensors_list[i]` should have the same size in the first dimension. The slices of any input tensor `x` are treated as examples, and the output tensors will have shape `[batch_size] + x.shape[1:]`. The `capacity` argument controls the how long the prefetching is allowed to grow the queues. The returned operation is a dequeue operation and will throw `tf.errors.OutOfRangeError` if the input queue is exhausted. If this operation is feeding another input queue, its queue runner will catch this exception, however, if this operation is used in your main thread you are responsible for catching this yourself. *N.B.:* If `dynamic_pad` is `False`, you must ensure that either (i) the `shapes` argument is passed, or (ii) all of the tensors in `tensors_list` must have fully-defined shapes. `ValueError` will be raised if neither of these conditions holds. If `dynamic_pad` is `True`, it is sufficient that the *rank* of the tensors is known, but individual dimensions may have value `None`. In this case, for each enqueue the dimensions with value `None` may have a variable length; upon dequeue, the output tensors will be padded on the right to the maximum shape of the tensors in the current minibatch. For numbers, this padding takes value 0. For strings, this padding is the empty string. See `PaddingFIFOQueue` for more info. If `allow_smaller_final_batch` is `True`, a smaller batch value than `batch_size` is returned when the queue is closed and there are not enough elements to fill the batch, otherwise the pending elements are discarded. In addition, all output tensors' static shapes, as accessed via the `shape` property will have a first `Dimension` value of `None`, and operations that depend on fixed batch_size would fail. Args: tensors_list: A list of tuples or dictionaries of tensors to enqueue. batch_size: An integer. The new batch size pulled from the queue. capacity: An integer. The maximum number of elements in the queue. enqueue_many: Whether each tensor in `tensor_list_list` is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for `tensor_list_list[i]`. dynamic_pad: Boolean. Allow variable dimensions in input shapes. The given dimensions are padded upon dequeue so that tensors within a batch have the same shapes. allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (Optional) If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the same number and types as `tensors_list[i]`. Raises: ValueError: If the `shapes` are not specified, and cannot be inferred from the elements of `tensor_list_list`. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12190,maybe_batch_join,tensorflow/tensorflow/python/training/input.py,1195,function,"Runs a list of tensors to conditionally fill a queue to create batches. See docstring in `batch_join` for more details. Args: tensors_list: A list of tuples or dictionaries of tensors to enqueue. keep_input: A `bool` Tensor. This tensor controls whether the input is added to the queue or not. If it is a scalar and evaluates `True`, then `tensors` are all added to the queue. If it is a vector and `enqueue_many` is `True`, then each example is added to the queue only if the corresponding value in `keep_input` is `True`. This tensor essentially acts as a filtering mechanism. batch_size: An integer. The new batch size pulled from the queue. capacity: An integer. The maximum number of elements in the queue. enqueue_many: Whether each tensor in `tensor_list_list` is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for `tensor_list_list[i]`. dynamic_pad: Boolean. Allow variable dimensions in input shapes. The given dimensions are padded upon dequeue so that tensors within a batch have the same shapes. allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (Optional) If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the same number and types as `tensors_list[i]`. Raises: ValueError: If the `shapes` are not specified, and cannot be inferred from the elements of `tensor_list_list`." 12191,shuffle_batch,tensorflow/tensorflow/python/training/input.py,1251,function,"Creates batches by randomly shuffling tensors. This function adds the following to the current `Graph`: * A shuffling queue into which tensors from `tensors` are enqueued. * A `dequeue_many` operation to create batches from the queue. * A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors from `tensors`. If `enqueue_many` is `False`, `tensors` is assumed to represent a single example. An input tensor with shape `[x, y, z]` will be output as a tensor with shape `[batch_size, x, y, z]`. If `enqueue_many` is `True`, `tensors` is assumed to represent a batch of examples, where the first dimension is indexed by example, and all members of `tensors` should have the same size in the first dimension. If an input tensor has shape `[*, x, y, z]`, the output will have shape `[batch_size, x, y, z]`. The `capacity` argument controls the how long the prefetching is allowed to grow the queues. The returned operation is a dequeue operation and will throw `tf.errors.OutOfRangeError` if the input queue is exhausted. If this operation is feeding another input queue, its queue runner will catch this exception, however, if this operation is used in your main thread you are responsible for catching this yourself. For example: ```python # Creates batches of 32 images and 32 labels. image_batch, label_batch = tf.compat.v1.train.shuffle_batch( [single_image, single_label], batch_size=32, num_threads=4, capacity=50000, min_after_dequeue=10000) ``` *N.B.:* You must ensure that either (i) the `shapes` argument is passed, or (ii) all of the tensors in `tensors` must have fully-defined shapes. `ValueError` will be raised if neither of these conditions holds. If `allow_smaller_final_batch` is `True`, a smaller batch value than `batch_size` is returned when the queue is closed and there are not enough elements to fill the batch, otherwise the pending elements are discarded. In addition, all output tensors' static shapes, as accessed via the `shape` property will have a first `Dimension` value of `None`, and operations that depend on fixed batch_size would fail. Args: tensors: The list or dictionary of tensors to enqueue. batch_size: The new batch size pulled from the queue. capacity: An integer. The maximum number of elements in the queue. min_after_dequeue: Minimum number elements in the queue after a dequeue, used to ensure a level of mixing of elements. num_threads: The number of threads enqueuing `tensor_list`. seed: Seed for the random shuffling within the queue. enqueue_many: Whether each tensor in `tensor_list` is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for `tensor_list`. allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (Optional) If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the types as `tensors`. Raises: ValueError: If the `shapes` are not specified, and cannot be inferred from the elements of `tensors`. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12192,maybe_shuffle_batch,tensorflow/tensorflow/python/training/input.py,1355,function,"Creates batches by randomly shuffling conditionally-enqueued tensors. See docstring in `shuffle_batch` for more details. Args: tensors: The list or dictionary of tensors to enqueue. batch_size: The new batch size pulled from the queue. capacity: An integer. The maximum number of elements in the queue. min_after_dequeue: Minimum number elements in the queue after a dequeue, used to ensure a level of mixing of elements. keep_input: A `bool` Tensor. This tensor controls whether the input is added to the queue or not. If it is a scalar and evaluates `True`, then `tensors` are all added to the queue. If it is a vector and `enqueue_many` is `True`, then each example is added to the queue only if the corresponding value in `keep_input` is `True`. This tensor essentially acts as a filtering mechanism. num_threads: The number of threads enqueuing `tensor_list`. seed: Seed for the random shuffling within the queue. enqueue_many: Whether each tensor in `tensor_list` is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for `tensor_list`. allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (Optional) If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the types as `tensors`. Raises: ValueError: If the `shapes` are not specified, and cannot be inferred from the elements of `tensors`. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12193,shuffle_batch_join,tensorflow/tensorflow/python/training/input.py,1419,function,"Create batches by randomly shuffling tensors. The `tensors_list` argument is a list of tuples of tensors, or a list of dictionaries of tensors. Each element in the list is treated similarly to the `tensors` argument of `tf.compat.v1.train.shuffle_batch()`. This version enqueues a different list of tensors in different threads. It adds the following to the current `Graph`: * A shuffling queue into which tensors from `tensors_list` are enqueued. * A `dequeue_many` operation to create batches from the queue. * A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors from `tensors_list`. `len(tensors_list)` threads will be started, with thread `i` enqueuing the tensors from `tensors_list[i]`. `tensors_list[i1][j]` must match `tensors_list[i2][j]` in type and shape, except in the first dimension if `enqueue_many` is true. If `enqueue_many` is `False`, each `tensors_list[i]` is assumed to represent a single example. An input tensor with shape `[x, y, z]` will be output as a tensor with shape `[batch_size, x, y, z]`. If `enqueue_many` is `True`, `tensors_list[i]` is assumed to represent a batch of examples, where the first dimension is indexed by example, and all members of `tensors_list[i]` should have the same size in the first dimension. If an input tensor has shape `[*, x, y, z]`, the output will have shape `[batch_size, x, y, z]`. The `capacity` argument controls the how long the prefetching is allowed to grow the queues. The returned operation is a dequeue operation and will throw `tf.errors.OutOfRangeError` if the input queue is exhausted. If this operation is feeding another input queue, its queue runner will catch this exception, however, if this operation is used in your main thread you are responsible for catching this yourself. If `allow_smaller_final_batch` is `True`, a smaller batch value than `batch_size` is returned when the queue is closed and there are not enough elements to fill the batch, otherwise the pending elements are discarded. In addition, all output tensors' static shapes, as accessed via the `shape` property will have a first `Dimension` value of `None`, and operations that depend on fixed batch_size would fail. Args: tensors_list: A list of tuples or dictionaries of tensors to enqueue. batch_size: An integer. The new batch size pulled from the queue. capacity: An integer. The maximum number of elements in the queue. min_after_dequeue: Minimum number elements in the queue after a dequeue, used to ensure a level of mixing of elements. seed: Seed for the random shuffling within the queue. enqueue_many: Whether each tensor in `tensor_list_list` is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for `tensors_list[i]`. allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the same number and types as `tensors_list[i]`. Raises: ValueError: If the `shapes` are not specified, and cannot be inferred from the elements of `tensors_list`. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12194,maybe_shuffle_batch_join,tensorflow/tensorflow/python/training/input.py,1517,function,"Create batches by randomly shuffling conditionally-enqueued tensors. See docstring in `shuffle_batch_join` for more details. Args: tensors_list: A list of tuples or dictionaries of tensors to enqueue. batch_size: An integer. The new batch size pulled from the queue. capacity: An integer. The maximum number of elements in the queue. min_after_dequeue: Minimum number elements in the queue after a dequeue, used to ensure a level of mixing of elements. keep_input: A `bool` Tensor. This tensor controls whether the input is added to the queue or not. If it is a scalar and evaluates `True`, then `tensors` are all added to the queue. If it is a vector and `enqueue_many` is `True`, then each example is added to the queue only if the corresponding value in `keep_input` is `True`. This tensor essentially acts as a filtering mechanism. seed: Seed for the random shuffling within the queue. enqueue_many: Whether each tensor in `tensor_list_list` is a single example. shapes: (Optional) The shapes for each example. Defaults to the inferred shapes for `tensors_list[i]`. allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final batch to be smaller if there are insufficient items left in the queue. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. name: (Optional) A name for the operations. Returns: A list or dictionary of tensors with the same number and types as `tensors_list[i]`. Raises: ValueError: If the `shapes` are not specified, and cannot be inferred from the elements of `tensors_list`. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the `tf.data` API to ingest data under eager execution. @end_compatibility" 12195,MatchFilenamesOnceTest,tensorflow/tensorflow/python/training/input_test.py,46,class, 12196,LimitEpochsTest,tensorflow/tensorflow/python/training/input_test.py,73,class, 12197,InputProducerTest,tensorflow/tensorflow/python/training/input_test.py,95,class, 12198,StringInputProducerTest,tensorflow/tensorflow/python/training/input_test.py,153,class, 12199,RangeInputProducerTest,tensorflow/tensorflow/python/training/input_test.py,269,class, 12200,SliceInputProducerTest,tensorflow/tensorflow/python/training/input_test.py,341,class, 12201,DictHelperTest,tensorflow/tensorflow/python/training/input_test.py,425,class, 12202,BatchTest,tensorflow/tensorflow/python/training/input_test.py,449,class, 12203,BatchJoinTest,tensorflow/tensorflow/python/training/input_test.py,1000,class, 12204,ShuffleBatchTest,tensorflow/tensorflow/python/training/input_test.py,1624,class, 12205,ShuffleBatchJoinTest,tensorflow/tensorflow/python/training/input_test.py,2044,class, 12206,CreateLocalClusterTest,tensorflow/tensorflow/python/training/localhost_cluster_performance_test.py,36,class, 12207,CreateLocalClusterBenchmark,tensorflow/tensorflow/python/training/localhost_cluster_performance_test.py,60,class, 12208,PartitionedVariablesBenchmark,tensorflow/tensorflow/python/training/localhost_cluster_performance_test.py,80,class, 12209,MomentumOptimizer,tensorflow/tensorflow/python/training/momentum.py,29,class,"Optimizer that implements the Momentum algorithm. Computes (if `use_nesterov = False`): ``` accumulation = momentum * accumulation + gradient variable -= learning_rate * accumulation ``` Note that in the dense version of this algorithm, `accumulation` is updated and applied regardless of a gradient's value, whereas the sparse version (when the gradient is an `IndexedSlices`, typically because of `tf.gather` or an embedding) only updates variable slices and corresponding `accumulation` terms when that part of the variable was used in the forward pass." 12210,MomentumOptimizerTest,tensorflow/tensorflow/python/training/momentum_test.py,38,class, 12211,Scaffold,tensorflow/tensorflow/python/training/monitored_session.py,59,class,"Structure to create or gather pieces commonly needed to train a model. When you build a model for training you usually need ops to initialize variables, a `Saver` to checkpoint them, an op to collect summaries for the visualizer, and so on. Various libraries built on top of the core TensorFlow library take care of creating some or all of these pieces and storing them in well known collections in the graph. The `Scaffold` class helps pick these pieces from the graph collections, creating and adding them to the collections if needed. If you call the scaffold constructor without any arguments, it will pick pieces from the collections, creating default ones if needed when `scaffold.finalize()` is called. You can pass arguments to the constructor to provide your own pieces. Pieces that you pass to the constructor are not added to the graph collections. The following pieces are directly accessible as attributes of the `Scaffold` object: * `saver`: A `tf.compat.v1.train.Saver` object taking care of saving the variables. Picked from and stored into the `SAVERS` collection in the graph by default. * `init_op`: An op to run to initialize the variables. Picked from and stored into the `INIT_OP` collection in the graph by default. * `ready_op`: An op to verify that the variables are initialized. Picked from and stored into the `READY_OP` collection in the graph by default. * `ready_for_local_init_op`: An op to verify that global state has been initialized and it is alright to run `local_init_op`. Picked from and stored into the `READY_FOR_LOCAL_INIT_OP` collection in the graph by default. This is needed when the initialization of local variables depends on the values of global variables. * `local_init_op`: An op to initialize the local variables. Picked from and stored into the `LOCAL_INIT_OP` collection in the graph by default. * `summary_op`: An op to run and merge the summaries in the graph. Picked from and stored into the `SUMMARY_OP` collection in the graph by default. You can also pass the following additional pieces to the constructor: * `init_feed_dict`: A session feed dictionary that should be used when running the init op. * `init_fn`: A callable to run after the init op to perform additional initializations. The callable will be called as `init_fn(scaffold, session)`." 12212,_create_monitored_session_with_worker_context,tensorflow/tensorflow/python/training/monitored_session.py,321,function, 12213,MonitoredTrainingSession,tensorflow/tensorflow/python/training/monitored_session.py,434,function,"Creates a `MonitoredSession` for training. For a chief, this utility sets proper session initializer/restorer. It also creates hooks related to checkpoint and summary saving. For workers, this utility sets proper session creator which waits for the chief to initialize/restore. Please check `tf.compat.v1.train.MonitoredSession` for more information. Args: master: `String` the TensorFlow master to use. is_chief: If `True`, it will take care of initialization and recovery the underlying TensorFlow session. If `False`, it will wait on a chief to initialize or recover the TensorFlow session. checkpoint_dir: A string. Optional path to a directory where to restore variables. scaffold: A `Scaffold` used for gathering or building supportive ops. If not specified, a default one is created. It's used to finalize the graph. hooks: Optional list of `SessionRunHook` objects. chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if `is_chief==True`, ignore otherwise. save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved using a default checkpoint saver. If both `save_checkpoint_steps` and `save_checkpoint_secs` are set to `None`, then the default checkpoint saver isn't used. If both are provided, then only `save_checkpoint_secs` is used. Default 600. save_summaries_steps: The frequency, in number of global steps, that the summaries are written to disk using a default summary saver. If both `save_summaries_steps` and `save_summaries_secs` are set to `None`, then the default summary saver isn't used. Default 100. save_summaries_secs: The frequency, in secs, that the summaries are written to disk using a default summary saver. If both `save_summaries_steps` and `save_summaries_secs` are set to `None`, then the default summary saver isn't used. Default not enabled. config: an instance of `tf.compat.v1.ConfigProto` proto used to configure the session. It's the `config` argument of constructor of `tf.compat.v1.Session`. stop_grace_period_secs: Number of seconds given to threads to stop after `close()` has been called. log_step_count_steps: The frequency, in number of global steps, that the global step/sec is logged. max_wait_secs: Maximum time workers should wait for the session to become available. This should be kept relatively short to help detect incorrect code, but sometimes may need to be increased if the chief takes a while to start up. save_checkpoint_steps: The frequency, in number of global steps, that a checkpoint is saved using a default checkpoint saver. If both `save_checkpoint_steps` and `save_checkpoint_secs` are set to `None`, then the default checkpoint saver isn't used. If both are provided, then only `save_checkpoint_secs` is used. Default not enabled. summary_dir: A string. Optional path to a directory where to save summaries. If None, checkpoint_dir is used instead. save_graph_def: Whether to save the GraphDef and MetaGraphDef to `checkpoint_dir`. The GraphDef is saved after the session is created as `graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as `model.ckpt-*.meta`. Returns: A `MonitoredSession` object." 12214,SessionCreator,tensorflow/tensorflow/python/training/monitored_session.py,609,class,A factory for tf.Session. 12215,ChiefSessionCreator,tensorflow/tensorflow/python/training/monitored_session.py,619,class,Creates a tf.compat.v1.Session for a chief. 12216,WorkerSessionCreator,tensorflow/tensorflow/python/training/monitored_session.py,673,class,Creates a tf.compat.v1.Session for a worker. 12217,_MonitoredSession,tensorflow/tensorflow/python/training/monitored_session.py,715,class,See `MonitoredSession` or `SingularMonitoredSession`. 12218,MonitoredSession,tensorflow/tensorflow/python/training/monitored_session.py,954,class,"Session-like object that handles initialization, recovery and hooks. Example usage: ```python saver_hook = CheckpointSaverHook(...) summary_hook = SummarySaverHook(...) with MonitoredSession(session_creator=ChiefSessionCreator(...), hooks=[saver_hook, summary_hook]) as sess: while not sess.should_stop(): sess.run(train_op) ``` Initialization: At creation time the monitored session does following things in given order: * calls `hook.begin()` for each given hook * finalizes the graph via `scaffold.finalize()` * create session * initializes the model via initialization ops provided by `Scaffold` * restores variables if a checkpoint exists * launches queue runners * calls `hook.after_create_session()` Run: When `run()` is called, the monitored session does following things: * calls `hook.before_run()` * calls TensorFlow `session.run()` with merged fetches and feed_dict * calls `hook.after_run()` * returns result of `session.run()` asked by user * if `AbortedError` or `UnavailableError` occurs, it recovers or reinitializes the session before executing the run() call again Exit: At the `close()`, the monitored session does following things in order: * calls `hook.end()` * closes the queue runners and the session * suppresses `OutOfRange` error which indicates that all inputs have been processed if the monitored_session is used as a context How to set `tf.compat.v1.Session` arguments: * In most cases you can set session arguments as follows: ```python MonitoredSession( session_creator=ChiefSessionCreator(master=..., config=...)) ``` * In distributed setting for a non-chief worker, you can use following: ```python MonitoredSession( session_creator=WorkerSessionCreator(master=..., config=...)) ``` See `MonitoredTrainingSession` for an example usage based on chief or worker. Note: This is not a `tf.compat.v1.Session`. For example, it cannot do following: * it cannot be set as default session. * it cannot be sent to saver.save. * it cannot be sent to tf.train.start_queue_runners. Args: session_creator: A factory object to create session. Typically a `ChiefSessionCreator` which is the default one. hooks: An iterable of `SessionRunHook' objects. Returns: A MonitoredSession object." 12219,SingularMonitoredSession,tensorflow/tensorflow/python/training/monitored_session.py,1042,class,"Session-like object that handles initialization, restoring, and hooks. Please note that this utility is not recommended for distributed settings. For distributed settings, please use `tf.compat.v1.train.MonitoredSession`. The differences between `MonitoredSession` and `SingularMonitoredSession` are: * `MonitoredSession` handles `AbortedError` and `UnavailableError` for distributed settings, but `SingularMonitoredSession` does not. * `MonitoredSession` can be created in `chief` or `worker` modes. `SingularMonitoredSession` is always created as `chief`. * You can access the raw `tf.compat.v1.Session` object used by `SingularMonitoredSession`, whereas in MonitoredSession the raw session is private. This can be used: - To `run` without hooks. - To save and restore. * All other functionality is identical. Example usage: ```python saver_hook = CheckpointSaverHook(...) summary_hook = SummarySaverHook(...) with SingularMonitoredSession(hooks=[saver_hook, summary_hook]) as sess: while not sess.should_stop(): sess.run(train_op) ``` Initialization: At creation time the hooked session does following things in given order: * calls `hook.begin()` for each given hook * finalizes the graph via `scaffold.finalize()` * create session * initializes the model via initialization ops provided by `Scaffold` * restores variables if a checkpoint exists * launches queue runners Run: When `run()` is called, the hooked session does following things: * calls `hook.before_run()` * calls TensorFlow `session.run()` with merged fetches and feed_dict * calls `hook.after_run()` * returns result of `session.run()` asked by user Exit: At the `close()`, the hooked session does following things in order: * calls `hook.end()` * closes the queue runners and the session * suppresses `OutOfRange` error which indicates that all inputs have been processed if the `SingularMonitoredSession` is used as a context." 12220,_WrappedSession,tensorflow/tensorflow/python/training/monitored_session.py,1135,class,"Wrapper around a `tf.compat.v1.Session`. This wrapper is used as a base class for various session wrappers that provide additional functionality such as monitoring, coordination, and recovery. In addition to the methods exported by `SessionInterface` the wrapper provides a method to check for stop and never raises exceptions from calls to `close()`." 12221,_RecoverableSession,tensorflow/tensorflow/python/training/monitored_session.py,1210,class,"A wrapped session that recreates a session upon certain kinds of errors. The constructor is passed a SessionCreator object, not a session. Calls to `run()` are delegated to the wrapped session. If a call raises the exception `tf.errors.AbortedError` or `tf.errors.UnavailableError`, the wrapped session is closed, and a new one is created by calling the factory again." 12222,_CoordinatedSession,tensorflow/tensorflow/python/training/monitored_session.py,1319,class,"A wrapped session that works with a `tf.Coordinator`. Calls to `run()` are delegated to the wrapped session. If a call raises an exception, the exception is reported to the coordinator. In addition, after each call to `run()` this session ask the coordinator if the session should stop. In that case it will will join all the threads registered with the coordinator before returning. If the coordinator was requested to stop with an exception, that exception will be re-raised from the call to `run()`." 12223,_HookedSession,tensorflow/tensorflow/python/training/monitored_session.py,1387,class,"A _WrappedSession that calls hooks during calls to run(). The list of hooks to call is passed in the constructor. Before each call to `run()` the session calls the `before_run()` method of the hooks, which can return additional ops or tensors to run. These are added to the arguments of the call to `run()`. When the `run()` call finishes, the session calls the `after_run()` methods of the hooks, passing the values returned by the `run()` call corresponding to the ops and tensors that each hook requested. If any call to the hooks, requests stop via run_context the session will be marked as needing to stop and its `should_stop()` method will now return `True`." 12224,latest_summaries,tensorflow/tensorflow/python/training/monitored_session_test.py,57,function,Parse summary events from latest event file in base_dir. 12225,ScaffoldTest,tensorflow/tensorflow/python/training/monitored_session_test.py,64,class,Scaffold tests. 12226,_test_dir,tensorflow/tensorflow/python/training/monitored_session_test.py,238,function,"Create an empty dir to use for tests. Args: temp_dir: Tmp directory path. test_name: Name of the test. Returns: Absolute path to the test directory." 12227,FakeHook,tensorflow/tensorflow/python/training/monitored_session_test.py,257,class, 12228,MonitoredTrainingSessionTest,tensorflow/tensorflow/python/training/monitored_session_test.py,287,class,Tests MonitoredTrainingSession. 12229,MockExtended,tensorflow/tensorflow/python/training/monitored_session_test.py,433,class, 12230,MockStrategy,tensorflow/tensorflow/python/training/monitored_session_test.py,443,class, 12231,MonitoredTrainingSessionWithDistributeCoordinatorTest,tensorflow/tensorflow/python/training/monitored_session_test.py,454,class,Test distribute coordinator controls summary saving and checkpointing. 12232,StopAtNSession,tensorflow/tensorflow/python/training/monitored_session_test.py,564,class,A wrapped session that stops at the N-th call to _check_stop. 12233,WrappedSessionTest,tensorflow/tensorflow/python/training/monitored_session_test.py,578,class,_WrappedSession tests. 12234,busy_wait_for_coord_stop,tensorflow/tensorflow/python/training/monitored_session_test.py,636,function, 12235,CoordinatedSessionTest,tensorflow/tensorflow/python/training/monitored_session_test.py,641,class,_CoordinatedSession tests. 12236,AbortAtNSession,tensorflow/tensorflow/python/training/monitored_session_test.py,770,class,A mock session that aborts at the N-th run call. 12237,StopCoordinatorWithException,tensorflow/tensorflow/python/training/monitored_session_test.py,787,class,With this hook Coordinator throws an exception after N-runs. 12238,FailTrainingAfterCoordinatorStopped,tensorflow/tensorflow/python/training/monitored_session_test.py,832,class,With this hook training encounters an exception after N-runs. 12239,CountingSessionCreator,tensorflow/tensorflow/python/training/monitored_session_test.py,861,class,A creator that counts the number of created sessions. 12240,RecoverableSessionTest,tensorflow/tensorflow/python/training/monitored_session_test.py,880,class,_RecoverableSession tests. 12241,FakeSession,tensorflow/tensorflow/python/training/monitored_session_test.py,1252,class, 12242,HookedSessionTest,tensorflow/tensorflow/python/training/monitored_session_test.py,1264,class,Tests of _HookedSession. 12243,RaiseOnceAtCountN,tensorflow/tensorflow/python/training/monitored_session_test.py,1419,class,Hook that raises an Exception at step N. 12244,RunOptionsMetadataHook,tensorflow/tensorflow/python/training/monitored_session_test.py,1436,class,A hook that observes & optionally modifies RunOptions and RunMetadata. 12245,MonitoredSessionTest,tensorflow/tensorflow/python/training/monitored_session_test.py,1467,class,MonitoredSession tests. 12246,SingularMonitoredSessionTest,tensorflow/tensorflow/python/training/monitored_session_test.py,2221,class,Tests SingularMonitoredSession. 12247,assign_moving_average,tensorflow/tensorflow/python/training/moving_averages.py,36,function,"Compute the moving average of a variable. The moving average of 'variable' updated with 'value' is: variable * decay + value * (1 - decay) The returned Operation sets 'variable' to the newly computed moving average, by performing this subtraction: variable -= (1 - decay) * (variable - value) Since variables that are initialized to a `0` value will be `0` biased, `zero_debias` optionally enables scaling by the mathematically correct debiasing factor of 1 - decay ** num_updates See Section 3 of (Kingma et al., 2015) for more details. The names of the debias shadow variables, by default, include both the scope they were created in and the scope of the variables they debias. They are also given a uniquifying-suffix. E.g.: ``` with tf.compat.v1.variable_scope('scope1'): with tf.compat.v1.variable_scope('scope2'): var = tf.compat.v1.get_variable('foo') update_1 = tf.assign_moving_average(var, 0.0, 1.0) update_2 = tf.assign_moving_average(var, 0.0, 0.9) # var.name: 'scope1/scope2/foo' # shadow var names: 'scope1/scope2/scope1/scope2/foo/biased' # 'scope1/scope2/scope1/scope2/foo/biased_1' ``` Args: variable: A Variable. value: A tensor with the same shape as 'variable'. decay: A float Tensor or float value. The moving average decay. zero_debias: A python bool. If true, assume the variable is 0-initialized and unbias it, as in (Kingma et al., 2015). See docstring in `_zero_debias` for more details. name: Optional name of the returned operation. Returns: A tensor which if evaluated will compute and return the new moving average. References: Adam - A Method for Stochastic Optimization: [Kingma et al., 2015](https://arxiv.org/abs/1412.6980) ([pdf](https://arxiv.org/pdf/1412.6980.pdf))" 12248,weighted_moving_average,tensorflow/tensorflow/python/training/moving_averages.py,117,function,"Compute the weighted moving average of `value`. Conceptually, the weighted moving average is: `moving_average(value * weight) / moving_average(weight)`, where a moving average updates by the rule `new_value = decay * old_value + (1 - decay) * update` Internally, this Op keeps moving average variables of both `value * weight` and `weight`. Args: value: A numeric `Tensor`. decay: A float `Tensor` or float value. The moving average decay. weight: `Tensor` that keeps the current value of a weight. Shape should be able to multiply `value`. truediv: Boolean, if `True`, dividing by `moving_average(weight)` is floating point division. If `False`, use division implied by dtypes. collections: List of graph collections keys to add the internal variables `value * weight` and `weight` to. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. name: Optional name of the returned operation. Defaults to ""WeightedMovingAvg"". Returns: An Operation that updates and returns the weighted moving average." 12249,_update,tensorflow/tensorflow/python/training/moving_averages.py,181,function,Applies updates depending on the context. 12250,_zero_debias,tensorflow/tensorflow/python/training/moving_averages.py,195,function,"Compute the delta required for a debiased Variable. All exponential moving averages initialized with Tensors are initialized to 0, and therefore are biased to 0. Variables initialized to 0 and used as EMAs are similarly biased. This function creates the debias updated amount according to a scale factor, as in (Kingma et al., 2015). To demonstrate the bias the results from 0-initialization, take an EMA that was initialized to `0` with decay `b`. After `t` timesteps of seeing the constant `c`, the variable have the following value: ``` EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ... = c*(1 - b^t) ``` To have the true value `c`, we would divide by the scale factor `1 - b^t`. In order to perform debiasing, we use two shadow variables. One keeps track of the biased estimate, and the other keeps track of the number of updates that have occurred. Args: strategy: `Strategy` used to create and update variables. unbiased_var: A Variable representing the current value of the unbiased EMA. value: A Tensor representing the most recent value. decay: A Tensor representing `1-decay` for the EMA. Returns: The amount that the unbiased variable should be updated. Computing this tensor will also update the shadow variables appropriately. References: Adam - A Method for Stochastic Optimization: [Kingma et al., 2015](https://arxiv.org/abs/1412.6980) ([pdf](https://arxiv.org/pdf/1412.6980.pdf))" 12251,ExponentialMovingAverage,tensorflow/tensorflow/python/training/moving_averages.py,285,class,"Maintains moving averages of variables by employing an exponential decay. When training a model, it is often beneficial to maintain moving averages of the trained parameters. Evaluations that use averaged parameters sometimes produce significantly better results than the final trained values. The `apply()` method adds shadow copies of trained variables and add ops that maintain a moving average of the trained variables in their shadow copies. It is used when building the training model. The ops that maintain moving averages are typically run after each training step. The `average()` and `average_name()` methods give access to the shadow variables and their names. They are useful when building an evaluation model, or when restoring a model from a checkpoint file. They help use the moving averages in place of the last trained values for evaluations. The moving averages are computed using exponential decay. You specify the decay value when creating the `ExponentialMovingAverage` object. The shadow variables are initialized with the same initial values as the trained variables. When you run the ops to maintain the moving averages, each shadow variable is updated with the formula: `shadow_variable -= (1 - decay) * (shadow_variable - variable)` This is mathematically equivalent to the classic formula below, but the use of an `assign_sub` op (the `""-=""` in the formula) allows concurrent lockless updates to the variables: `shadow_variable = decay * shadow_variable + (1 - decay) * variable` Reasonable values for `decay` are close to 1.0, typically in the multiple-nines range: 0.999, 0.9999, etc. Example usage when creating a training model: ```python # Create variables. var0 = tf.Variable(...) var1 = tf.Variable(...) # ... use the variables to build a training model... ... # Create an op that applies the optimizer. This is what we usually # would use as a training op. opt_op = opt.minimize(my_loss, [var0, var1]) # Create an ExponentialMovingAverage object ema = tf.train.ExponentialMovingAverage(decay=0.9999) with tf.control_dependencies([opt_op]): # Create the shadow variables, and add ops to maintain moving averages # of var0 and var1. This also creates an op that will update the moving # averages after each training step. This is what we will use in place # of the usual training op. training_op = ema.apply([var0, var1]) ...train the model by running training_op... ``` There are two ways to use the moving averages for evaluations: * Build a model that uses the shadow variables instead of the variables. For this, use the `average()` method which returns the shadow variable for a given variable. * Build a model normally but load the checkpoint files to evaluate by using the shadow variable names. For this use the `average_name()` method. See the `tf.compat.v1.train.Saver` for more information on restoring saved variables. Example of restoring the shadow variable values: ```python # Create a Saver that loads variables from their saved shadow values. shadow_var0_name = ema.average_name(var0) shadow_var1_name = ema.average_name(var1) saver = tf.compat.v1.train.Saver({shadow_var0_name: var0, shadow_var1_name: var1}) saver.restore(...checkpoint filename...) # var0 and var1 now hold the moving average values ```" 12252,MovingAveragesTest,tensorflow/tensorflow/python/training/moving_averages_test.py,35,class, 12253,_Repeat,tensorflow/tensorflow/python/training/moving_averages_test.py,159,function, 12254,ExponentialMovingAverageTest,tensorflow/tensorflow/python/training/moving_averages_test.py,165,class, 12255,get_filtered_grad_fn,tensorflow/tensorflow/python/training/optimizer.py,48,function, 12256,_deduplicate_indexed_slices,tensorflow/tensorflow/python/training/optimizer.py,64,function,"Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index." 12257,_var_key,tensorflow/tensorflow/python/training/optimizer.py,83,function, 12258,_OptimizableVariable,tensorflow/tensorflow/python/training/optimizer.py,91,class,Interface for abstracting over variables in the optimizers. 12259,_RefVariableProcessor,tensorflow/tensorflow/python/training/optimizer.py,105,class,Processor for Variable. 12260,_DenseReadResourceVariableProcessor,tensorflow/tensorflow/python/training/optimizer.py,135,class,Processor for dense ResourceVariables. 12261,_DenseResourceVariableProcessor,tensorflow/tensorflow/python/training/optimizer.py,154,class,Processor for dense ResourceVariables. 12262,_TensorProcessor,tensorflow/tensorflow/python/training/optimizer.py,179,class,"Processor for ordinary Tensors. Even though a Tensor can't really be updated, sometimes it is useful to compute the gradients with respect to a Tensor using the optimizer. Updating the Tensor is, of course, unsupported." 12263,_get_processor,tensorflow/tensorflow/python/training/optimizer.py,197,function,The processor of v. 12264,Optimizer,tensorflow/tensorflow/python/training/optimizer.py,217,class,"Base class for optimizers. This class defines the API to add Ops to train a model. You never use this class directly, but instead instantiate one of its subclasses such as `GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`. ### Usage ```python # Create an optimizer with the desired parameters. opt = GradientDescentOptimizer(learning_rate=0.1) # Add Ops to the graph to minimize a cost by updating a list of variables. # ""cost"" is a Tensor, and the list of variables contains tf.Variable # objects. opt_op = opt.minimize(cost, var_list=) ``` In the training program you will just have to run the returned Op. ```python # Execute opt_op to do one step of training: opt_op.run() ``` ### Processing gradients before applying them. Calling `minimize()` takes care of both computing the gradients and applying them to the variables. If you want to process the gradients before applying them you can instead use the optimizer in three steps: 1. Compute the gradients with `compute_gradients()`. 2. Process the gradients as you wish. 3. Apply the processed gradients with `apply_gradients()`. Example: ```python # Create an optimizer. opt = GradientDescentOptimizer(learning_rate=0.1) # Compute the gradients for a list of variables. grads_and_vars = opt.compute_gradients(loss, ) # grads_and_vars is a list of tuples (gradient, variable). Do whatever you # need to the 'gradient' part, for example cap them, etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars] # Ask the optimizer to apply the capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Gating Gradients Both `minimize()` and `compute_gradients()` accept a `gate_gradients` argument that controls the degree of parallelism during the application of the gradients. The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`. `GATE_NONE`: Compute and apply gradients in parallel. This provides the maximum parallelism in execution, at the cost of some non-reproducibility in the results. For example the two gradients of `matmul` depend on the input values: With `GATE_NONE` one of the gradients could be applied to one of the inputs _before_ the other gradient is computed resulting in non-reproducible results. `GATE_OP`: For each Op, make sure all gradients are computed before they are used. This prevents race conditions for Ops that generate gradients for multiple inputs where the gradients depend on the inputs. `GATE_GRAPH`: Make sure all gradients for all variables are computed before any one of them is used. This provides the least parallelism but can be useful if you want to process all gradients before applying any of them. ### Slots Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer` allocate and manage additional variables associated with the variables to train. These are called Slots. Slots have names and you can ask the optimizer for the names of the slots that it uses. Once you have a slot name you can ask the optimizer for the variable it created to hold the slot value. This can be useful if you want to log debug a training algorithm, report stats about the slots, etc." 12265,OptimizerTest,tensorflow/tensorflow/python/training/optimizer_test.py,35,class, 12266,ProximalAdagradOptimizer,tensorflow/tensorflow/python/training/proximal_adagrad.py,30,class,"Optimizer that implements the Proximal Adagrad algorithm. References: Adaptive Subgradient Methods for Online Learning and Stochastic Optimization: [Duchi et al., 2011](http://jmlr.org/papers/v12/duchi11a.html) ([pdf](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)) Efficient Learning using Forward-Backward Splitting: [Duchi et al., 2009](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting) ([pdf](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf))" 12267,ProximalAdagradOptimizerTest,tensorflow/tensorflow/python/training/proximal_adagrad_test.py,35,class, 12268,ProximalGradientDescentOptimizer,tensorflow/tensorflow/python/training/proximal_gradient_descent.py,31,class,"Optimizer that implements the proximal gradient descent algorithm. References: Efficient Learning using Forward-Backward Splitting: [Duchi et al., 2009](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting) ([pdf](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf))" 12269,ProximalGradientDescentOptimizerTest,tensorflow/tensorflow/python/training/proximal_gradient_descent_test.py,35,class, 12270,error_translator,tensorflow/tensorflow/python/training/py_checkpoint_reader.py,27,function,Translate the tensor_slice_reader.cc errors. 12271,get_variable_to_dtype_map,tensorflow/tensorflow/python/training/py_checkpoint_reader.py,51,function, 12272,has_tensor,tensorflow/tensorflow/python/training/py_checkpoint_reader.py,60,function, 12273,get_tensor,tensorflow/tensorflow/python/training/py_checkpoint_reader.py,66,function,Get the tensor from the Checkpoint object. 12274,NewCheckpointReader,tensorflow/tensorflow/python/training/py_checkpoint_reader.py,85,function,"A function that returns a CheckPointReader. Args: filepattern: The filename. Returns: A CheckpointReader object." 12275,do_quantize_training_on_graphdef,tensorflow/tensorflow/python/training/quantize_training.py,31,function,"A general quantization scheme is being developed in `tf.contrib.quantize`. Consider using that instead, though since it is in the tf.contrib namespace, it is not subject to backward compatibility guarantees. Args: input_graph: A `GraphDef`. num_bits: The number of bits for quantize training. Returns: The graph with quantize training done." 12276,PywrapQuantizeTrainingTest,tensorflow/tensorflow/python/training/quantize_training_test.py,35,class, 12277,QueueRunner,tensorflow/tensorflow/python/training/queue_runner_impl.py,38,class,"Holds a list of enqueue operations for a queue, each to be run in a thread. Queues are a convenient TensorFlow mechanism to compute tensors asynchronously using multiple threads. For example in the canonical 'Input Reader' setup one set of threads generates filenames in a queue; a second set of threads read records from the files, processes them, and enqueues tensors on a second queue; a third set of threads dequeues these input records to construct batches and runs them through training operations. There are several delicate issues when running multiple threads that way: closing the queues in sequence as the input is exhausted, correctly catching and reporting exceptions, etc. The `QueueRunner`, combined with the `Coordinator`, helps handle these issues. @compatibility(eager) QueueRunners are not compatible with eager execution. Instead, please use `tf.data` to get data into your model. @end_compatibility" 12278,add_queue_runner,tensorflow/tensorflow/python/training/queue_runner_impl.py,396,function,"Adds a `QueueRunner` to a collection in the graph. When building a complex model that uses many queues it is often difficult to gather all the queue runners that need to be run. This convenience function allows you to add a queue runner to a well known collection in the graph. The companion method `start_queue_runners()` can be used to start threads for all the collected queue runners. Args: qr: A `QueueRunner`. collection: A `GraphKey` specifying the graph collection to add the queue runner to. Defaults to `GraphKeys.QUEUE_RUNNERS`." 12279,start_queue_runners,tensorflow/tensorflow/python/training/queue_runner_impl.py,417,function,"Starts all queue runners collected in the graph. This is a companion method to `add_queue_runner()`. It just starts threads for all queue runners collected in the graph. It returns the list of all threads. Args: sess: `Session` used to run the queue ops. Defaults to the default session. coord: Optional `Coordinator` for coordinating the started threads. daemon: Whether the threads should be marked as `daemons`, meaning they don't block program exit. start: Set to `False` to only create the threads, not start them. collection: A `GraphKey` specifying the graph collection to get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`. Raises: ValueError: if `sess` is None and there isn't any default session. TypeError: if `sess` is not a `tf.compat.v1.Session` object. Returns: A list of threads. Raises: RuntimeError: If called with eager execution enabled. ValueError: If called without a default `tf.compat.v1.Session` registered. @compatibility(eager) Not compatible with eager execution. To ingest data under eager execution, use the `tf.data` API instead. @end_compatibility" 12280,QueueRunnerTest,tensorflow/tensorflow/python/training/queue_runner_test.py,43,class, 12281,RMSPropOptimizer,tensorflow/tensorflow/python/training/rmsprop.py,54,class,"Optimizer that implements the RMSProp algorithm (Tielemans et al. 2012). References: Coursera slide 29: Hinton, 2012 ([pdf](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf))" 12282,RMSPropOptimizerTest,tensorflow/tensorflow/python/training/rmsprop_test.py,59,class, 12283,BaseSaverBuilder,tensorflow/tensorflow/python/training/saver.py,70,class,"Base class for Savers. Can be extended to create different Ops." 12284,BulkSaverBuilder,tensorflow/tensorflow/python/training/saver.py,567,class,SaverBuilder with support for bulk restoring multiple saveables. 12285,_get_saver_or_default,tensorflow/tensorflow/python/training/saver.py,586,function,"Returns the saver from SAVERS collection, or creates a default one. This method is used by other members of the training module, such as `Scaffold`, or `CheckpointSaverHook`. Returns: `Saver`. Raises: RuntimeError: If the SAVERS collection already has more than one items." 12286,Saver,tensorflow/tensorflow/python/training/saver.py,614,class,"Saves and restores variables. See [Variables](https://tensorflow.org/guide/variables) for an overview of variables, saving and restoring. The `Saver` class adds ops to save and restore variables to and from *checkpoints*. It also provides convenience methods to run these ops. Checkpoints are binary files in a proprietary format which map variable names to tensor values. The best way to examine the contents of a checkpoint is to load it using a `Saver`. Savers can automatically number checkpoint filenames with a provided counter. This lets you keep multiple checkpoints at different steps while training a model. For example you can number the checkpoint filenames with the training step number. To avoid filling up disks, savers manage checkpoint files automatically. For example, they can keep only the N most recent files, or one checkpoint for every N hours of training. You number checkpoint filenames by passing a value to the optional `global_step` argument to `save()`: ```python saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0' ... saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000' ``` Additionally, optional arguments to the `Saver()` constructor let you control the proliferation of checkpoint files on disk: * `max_to_keep` indicates the maximum number of recent checkpoint files to keep. As new files are created, older files are deleted. If None or 0, no checkpoints are deleted from the filesystem but only the last one is kept in the `checkpoint` file. Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) * `keep_checkpoint_every_n_hours`: In addition to keeping the most recent `max_to_keep` checkpoint files, you might want to keep one checkpoint file for every N hours of training. This can be useful if you want to later analyze how a model progressed during a long training session. For example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep one checkpoint file for every 2 hours of training. The default value of 10,000 hours effectively disables the feature. Note that you still have to call the `save()` method to save the model. Passing these arguments to the constructor will not save variables automatically for you. A training program that saves regularly looks like: ```python ... # Create a saver. saver = tf.compat.v1.train.Saver(...variables...) # Launch the graph and train, saving the model every 1,000 steps. sess = tf.compat.v1.Session() for step in xrange(1000000): sess.run(..training_op..) if step % 1000 == 0: # Append the step number to the checkpoint name: saver.save(sess, 'my-model', global_step=step) ``` In addition to checkpoint files, savers keep a protocol buffer on disk with the list of recent checkpoints. This is used to manage numbered checkpoint files and by `latest_checkpoint()`, which makes it easy to discover the path to the most recent checkpoint. That protocol buffer is stored in a file named 'checkpoint' next to the checkpoint files. If you create several savers, you can specify a different filename for the protocol buffer file in the call to `save()`." 12287,import_meta_graph,tensorflow/tensorflow/python/training/saver.py,1351,function,"Recreates a Graph saved in a `MetaGraphDef` proto. This function takes a `MetaGraphDef` protocol buffer as input. If the argument is a file containing a `MetaGraphDef` protocol buffer , it constructs a protocol buffer from the file content. The function then adds all the nodes from the `graph_def` field to the current graph, recreates all the collections, and returns a saver constructed from the `saver_def` field. In combination with `export_meta_graph()`, this function can be used to * Serialize a graph along with other Python objects such as `QueueRunner`, `Variable` into a `MetaGraphDef`. * Restart training from a saved graph and checkpoints. * Run inference from a saved graph and checkpoints. ```Python ... # Create a saver. saver = tf.compat.v1.train.Saver(...variables...) # Remember the training_op we want to run by adding it to a collection. tf.compat.v1.add_to_collection('train_op', train_op) sess = tf.compat.v1.Session() for step in xrange(1000000): sess.run(train_op) if step % 1000 == 0: # Saves checkpoint, which by default also exports a meta_graph # named 'my-model-global_step.meta'. saver.save(sess, 'my-model', global_step=step) ``` Later we can continue training from this saved `meta_graph` without building the model from scratch. ```Python with tf.Session() as sess: new_saver = tf.train.import_meta_graph('my-save-dir/my-model-10000.meta') new_saver.restore(sess, 'my-save-dir/my-model-10000') # tf.get_collection() returns a list. In this example we only want # the first one. train_op = tf.get_collection('train_op')[0] for step in xrange(1000000): sess.run(train_op) ``` NOTE: Restarting training from saved `meta_graph` only works if the device assignments have not changed. Example: Variables, placeholders, and independent operations can also be stored, as shown in the following example. ```Python # Saving contents and operations. v1 = tf.placeholder(tf.float32, name=""v1"") v2 = tf.placeholder(tf.float32, name=""v2"") v3 = tf.math.multiply(v1, v2) vx = tf.Variable(10.0, name=""vx"") v4 = tf.add(v3, vx, name=""v4"") saver = tf.train.Saver([vx]) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(vx.assign(tf.add(vx, vx))) result = sess.run(v4, feed_dict={v1:12.0, v2:3.3}) print(result) saver.save(sess, ""./model_ex1"") ``` Later this model can be restored and contents loaded. ```Python # Restoring variables and running operations. saver = tf.train.import_meta_graph(""./model_ex1.meta"") sess = tf.Session() saver.restore(sess, ""./model_ex1"") result = sess.run(""v4:0"", feed_dict={""v1:0"": 12.0, ""v2:0"": 3.3}) print(result) ``` Args: meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including the path) containing a `MetaGraphDef`. clear_devices: Whether or not to clear the device field for an `Operation` or `Tensor` during import. import_scope: Optional `string`. Name scope to add. Only used when initializing from protocol buffer. **kwargs: Optional keyed arguments. Returns: A saver constructed from `saver_def` in `MetaGraphDef` or None. A None value is returned if no variables exist in the `MetaGraphDef` (i.e., there are no variables to restore). Raises: RuntimeError: If called with eager execution enabled. @compatibility(eager) Exporting/importing meta graphs is not supported. No graph exists when eager execution is enabled. @end_compatibility" 12288,_import_meta_graph_with_return_elements,tensorflow/tensorflow/python/training/saver.py,1465,function,"Import MetaGraph, and return both a saver and returned elements." 12289,_create_saver_from_imported_meta_graph,tensorflow/tensorflow/python/training/saver.py,1493,function,Return a saver for restoring variable values to an imported MetaGraph. 12290,export_meta_graph,tensorflow/tensorflow/python/training/saver.py,1518,function,"Returns `MetaGraphDef` proto. Optionally writes it to filename. This function exports the graph, saver, and collection objects into `MetaGraphDef` protocol buffer with the intention of it being imported at a later time or location to restart training, run inference, or be a subgraph. Args: filename: Optional filename including the path for writing the generated `MetaGraphDef` protocol buffer. meta_info_def: `MetaInfoDef` protocol buffer. graph_def: `GraphDef` protocol buffer. saver_def: `SaverDef` protocol buffer. collection_list: List of string keys to collect. as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto. graph: The `Graph` to export. If `None`, use the default graph. export_scope: Optional `string`. Name scope under which to extract the subgraph. The scope name will be striped from the node definitions for easy import later into new name scopes. If `None`, the whole graph is exported. graph_def and export_scope cannot both be specified. clear_devices: Whether or not to clear the device field for an `Operation` or `Tensor` during export. clear_extraneous_savers: Remove any Saver-related information from the graph (both Save/Restore ops and SaverDefs) that are not associated with the provided SaverDef. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. For a detailed guide, see [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes). save_debug_info: If `True`, save the GraphDebugInfo to a separate file, which in the same directory of filename and with `_debug` added before the file extend. **kwargs: Optional keyed arguments. Returns: A `MetaGraphDef` proto. Raises: ValueError: When the `GraphDef` is larger than 2GB. RuntimeError: If called with eager execution enabled. @compatibility(eager) Exporting/importing meta graphs is not supported unless both `graph_def` and `graph` are provided. No graph exists when eager execution is enabled. @end_compatibility" 12291,_wrap_restore_error_with_msg,tensorflow/tensorflow/python/training/saver.py,1602,function, 12292,object_graph_key_mapping,tensorflow/tensorflow/python/training/saver.py,1617,function,"Return name to key mappings from the checkpoint. Args: checkpoint_path: string, path to object-based checkpoint Returns: Dictionary mapping tensor names to checkpoint keys." 12293,saver_from_object_based_checkpoint,tensorflow/tensorflow/python/training/saver.py,1637,function,"Return a `Saver` which reads from an object-based checkpoint. This function validates that all variables in the variables list are remapped in the object-based checkpoint (or `names_to_keys` dict if provided). A saver will be created with the list of remapped variables. The `cached_saver` argument allows the user to pass in a previously created saver, so multiple `saver.restore()` calls don't pollute the graph when graph building. This assumes that keys are consistent, meaning that the 1) `checkpoint_path` checkpoint, and 2) checkpoint used to create the `cached_saver` are the same type of object-based checkpoint. If this argument is set, this function will simply validate that all variables have been remapped by the checkpoint at `checkpoint_path`. Note that in general, `tf.train.Checkpoint` should be used to restore/save an object-based checkpoint. Args: checkpoint_path: string, path to object-based checkpoint var_list: list of `Variables` that appear in the checkpoint. If `None`, `var_list` will be set to all saveable objects. builder: a `BaseSaverBuilder` instance. If `None`, a new `BulkSaverBuilder` will be created. names_to_keys: dict mapping string tensor names to checkpoint keys. If `None`, this dict will be generated from the checkpoint file. cached_saver: Cached `Saver` object with remapped variables. Returns: `Saver` with remapped variables for reading from an object-based checkpoint. Raises: ValueError if the checkpoint provided is not an object-based checkpoint. NotFoundError: If one of the variables in `var_list` can not be found in the checkpoint. This could mean the checkpoint or `names_to_keys` mapping is missing the variable." 12294,SaverLargePartitionedVariableTest,tensorflow/tensorflow/python/training/saver_large_partitioned_variable_test.py,34,class, 12295,SaverLargeVariableTest,tensorflow/tensorflow/python/training/saver_large_variable_test.py,34,class, 12296,SaverTest,tensorflow/tensorflow/python/training/saver_test.py,78,class, 12297,SaveRestoreShardedTest,tensorflow/tensorflow/python/training/saver_test.py,852,class, 12298,SaveRestoreShardedTestV2,tensorflow/tensorflow/python/training/saver_test.py,1107,class, 12299,MaxToKeepTest,tensorflow/tensorflow/python/training/saver_test.py,1241,class, 12300,RecoverLastCheckpointsTest,tensorflow/tensorflow/python/training/saver_test.py,1607,class, 12301,KeepCheckpointEveryNHoursTest,tensorflow/tensorflow/python/training/saver_test.py,1668,class, 12302,SaveRestoreWithVariableNameMap,tensorflow/tensorflow/python/training/saver_test.py,1727,class, 12303,MetaGraphTest,tensorflow/tensorflow/python/training/saver_test.py,1803,class, 12304,CheckpointReaderTest,tensorflow/tensorflow/python/training/saver_test.py,2585,class, 12305,CheckpointReaderForV2Test,tensorflow/tensorflow/python/training/saver_test.py,2637,class, 12306,WriteGraphTest,tensorflow/tensorflow/python/training/saver_test.py,2641,class, 12307,ScopedGraphTest,tensorflow/tensorflow/python/training/saver_test.py,2670,class, 12308,_OwnsAVariableSimple,tensorflow/tensorflow/python/training/saver_test.py,2977,class,A Trackable object which can be saved using a tf.train.Saver. 12309,_MirroringSaveable,tensorflow/tensorflow/python/training/saver_test.py,2993,class, 12310,_OwnsMirroredVariables,tensorflow/tensorflow/python/training/saver_test.py,3010,class,A Trackable object which returns a more complex SaveableObject. 12311,TrackableCompatibilityTests,tensorflow/tensorflow/python/training/saver_test.py,3033,class, 12312,CheckpointedOp,tensorflow/tensorflow/python/training/saver_test_utils.py,28,class,"Op with a custom checkpointing implementation. Defined as part of the test because the MutableHashTable Python code is currently in contrib." 12313,_make_server_def,tensorflow/tensorflow/python/training/server_lib.py,31,function,"Creates a `tf.train.ServerDef` protocol buffer. Args: server_or_cluster_def: A `tf.train.ServerDef` or `tf.train.ClusterDef` protocol buffer, or a `tf.train.ClusterSpec` object, describing the server to be defined and/or the cluster of which it is a member. job_name: (Optional.) Specifies the name of the job of which the server is a member. Defaults to the value in `server_or_cluster_def`, if specified. task_index: (Optional.) Specifies the task index of the server in its job. Defaults to the value in `server_or_cluster_def`, if specified. Otherwise defaults to 0 if the server's job has only one task. protocol: (Optional.) Specifies the protocol to be used by the server. Acceptable values include `""grpc"", ""grpc+verbs""`. Defaults to the value in `server_or_cluster_def`, if specified. Otherwise defaults to `""grpc""`. config: (Options.) A `tf.compat.v1.ConfigProto` that specifies default configuration options for all sessions that run on this server. Returns: A `tf.train.ServerDef`. Raises: TypeError: If the arguments do not have the appropriate type. ValueError: If an argument is not specified and cannot be inferred." 12314,Server,tensorflow/tensorflow/python/training/server_lib.py,100,class,"An in-process TensorFlow server, for use in distributed training. A `tf.distribute.Server` instance encapsulates a set of devices and a `tf.compat.v1.Session` target that can participate in distributed training. A server belongs to a cluster (specified by a `tf.train.ClusterSpec`), and corresponds to a particular task in a named job. The server can communicate with any other server in the same cluster." 12315,ClusterSpec,tensorflow/tensorflow/python/training/server_lib.py,247,class,"Represents a cluster as a set of ""tasks"", organized into ""jobs"". A `tf.train.ClusterSpec` represents the set of processes that participate in a distributed TensorFlow computation. Every `tf.distribute.Server` is constructed in a particular cluster. To create a cluster with two jobs and five tasks, you specify the mapping from job names to lists of network addresses (typically hostname-port pairs). ```python cluster = tf.train.ClusterSpec({""worker"": [""worker0.example.com:2222"", ""worker1.example.com:2222"", ""worker2.example.com:2222""], ""ps"": [""ps0.example.com:2222"", ""ps1.example.com:2222""]}) ``` Each job may also be specified as a sparse mapping from task indices to network addresses. This enables a server to be configured without needing to know the identity of (for example) all other worker tasks: ```python cluster = tf.train.ClusterSpec({""worker"": {1: ""worker1.example.com:2222""}, ""ps"": [""ps0.example.com:2222"", ""ps1.example.com:2222""]}) ```" 12316,ClusterDeviceFilters,tensorflow/tensorflow/python/training/server_lib.py,500,class,"Represent a collection of device filters for the remote workers in cluster. NOTE: this is an experimental API and subject to changes. Set device filters for selective jobs and tasks. For each remote worker, the device filters are a list of strings. When any filters are present, the remote worker will ignore all devices which do not match any of its filters. Each filter can be partially specified, e.g. ""/job:ps"", ""/job:worker/replica:3"", etc. Note that a device is always visible to the worker it is located on. For example, to set the device filters for a parameter server cluster: ```python cdf = tf.config.experimental.ClusterDeviceFilters() for i in range(num_workers): cdf.set_device_filters('worker', i, ['/job:ps']) for i in range(num_ps): cdf.set_device_filters('ps', i, ['/job:worker']) tf.config.experimental_connect_to_cluster(cluster_def, cluster_device_filters=cdf) ``` The device filters can be partically specified. For remote tasks that do not have device filters specified, all devices will be visible to them." 12317,MultipleContainersTest,tensorflow/tensorflow/python/training/server_lib_multiple_containers_test.py,30,class, 12318,SameVariablesClearContainerTest,tensorflow/tensorflow/python/training/server_lib_same_variables_clear_container_test.py,29,class, 12319,SameVariablesClearTest,tensorflow/tensorflow/python/training/server_lib_same_variables_clear_test.py,30,class, 12320,SameVariablesNoClearTest,tensorflow/tensorflow/python/training/server_lib_same_variables_no_clear_test.py,30,class, 12321,SparseJobTest,tensorflow/tensorflow/python/training/server_lib_sparse_job_test.py,29,class, 12322,GrpcServerTest,tensorflow/tensorflow/python/training/server_lib_test.py,42,class, 12323,ServerDefTest,tensorflow/tensorflow/python/training/server_lib_test.py,335,class, 12324,ClusterSpecTest,tensorflow/tensorflow/python/training/server_lib_test.py,423,class, 12325,_maybe_name,tensorflow/tensorflow/python/training/session_manager.py,33,function,"Returns object name if it has one, or a message otherwise. This is useful for names that apper in error messages. Args: obj: Object to get the name of. Returns: name, ""None"", or a ""no name"" message." 12326,SessionManager,tensorflow/tensorflow/python/training/session_manager.py,51,class,"Training helper that restores from checkpoint and creates session. This class is a small wrapper that takes care of session creation and checkpoint recovery. It also provides functions that to facilitate coordination among multiple training threads or processes. * Checkpointing trained variables as the training progresses. * Initializing variables on startup, restoring them from the most recent checkpoint after a crash, or wait for checkpoints to become available. ### Usage: ```python with tf.Graph().as_default(): ...add operations to the graph... # Create a SessionManager that will checkpoint the model in '/tmp/mydir'. sm = SessionManager() sess = sm.prepare_session(master, init_op, saver, checkpoint_dir) # Use the session to train the graph. while True: sess.run() ``` `prepare_session()` initializes or restores a model. It requires `init_op` and `saver` as an argument. A second process could wait for the model to be ready by doing the following: ```python with tf.Graph().as_default(): ...add operations to the graph... # Create a SessionManager that will wait for the model to become ready. sm = SessionManager() sess = sm.wait_for_session(master) # Use the session to train the graph. while True: sess.run() ``` `wait_for_session()` waits for a model to be initialized by other processes." 12327,_ready,tensorflow/tensorflow/python/training/session_manager.py,515,function,"Checks if the model is ready or not, as determined by op. Args: op: An op, either _ready_op or _ready_for_local_init_op, which defines the readiness of the model. sess: A `Session`. msg: A message to log to warning if not ready Returns: A tuple (is_ready, msg), where is_ready is True if ready and False otherwise, and msg is `None` if the model is ready, a `String` with the reason why it is not ready otherwise." 12328,_CountDownTimer,tensorflow/tensorflow/python/training/session_manager.py,554,class, 12329,SessionManagerTest,tensorflow/tensorflow/python/training/session_manager_test.py,41,class, 12330,ObsoleteSessionManagerTest,tensorflow/tensorflow/python/training/session_manager_test.py,677,class, 12331,SessionRunHook,tensorflow/tensorflow/python/training/session_run_hook.py,98,class,Hook to extend calls to MonitoredSession.run(). 12332,SessionRunArgs,tensorflow/tensorflow/python/training/session_run_hook.py,190,class,"Represents arguments to be added to a `Session.run()` call. Args: fetches: Exactly like the 'fetches' argument to Session.Run(). Can be a single tensor or op, a list of 'fetches' or a dictionary of fetches. For example: fetches = global_step_tensor fetches = [train_op, summary_op, global_step_tensor] fetches = {'step': global_step_tensor, 'summ': summary_op} Note that this can recurse as expected: fetches = {'step': global_step_tensor, 'ops': [train_op, check_nan_op]} feed_dict: Exactly like the `feed_dict` argument to `Session.Run()` options: Exactly like the `options` argument to `Session.run()`, i.e., a config_pb2.RunOptions proto." 12333,SessionRunContext,tensorflow/tensorflow/python/training/session_run_hook.py,215,class,"Provides information about the `session.run()` call being made. Provides information about original request to `Session.Run()` function. SessionRunHook objects can stop the loop by calling `request_stop()` of `run_context`. In the future we may use this object to add more information about run without changing the Hook API." 12334,SessionRunValues,tensorflow/tensorflow/python/training/session_run_hook.py,267,class,"Contains the results of `Session.run()`. In the future we may use this object to add more information about result of run without changing the Hook API. Args: results: The return values from `Session.run()` corresponding to the fetches attribute returned in the RunArgs. Note that this has the same shape as the RunArgs fetches. For example: fetches = global_step_tensor => results = nparray(int) fetches = [train_op, summary_op, global_step_tensor] => results = [None, nparray(string), nparray(int)] fetches = {'step': global_step_tensor, 'summ': summary_op} => results = {'step': nparray(int), 'summ': nparray(string)} options: `RunOptions` from the `Session.run()` call. run_metadata: `RunMetadata` from the `Session.run()` call." 12335,_create_slot_var,tensorflow/tensorflow/python/training/slot_creator.py,50,function,Helper function for creating a slot variable. 12336,create_slot,tensorflow/tensorflow/python/training/slot_creator.py,104,function,"Create a slot initialized to the given value. The type of the slot is determined by the given value. Args: primary: The primary `Variable` or `Tensor`. val: A `Tensor` specifying the initial value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. Returns: A `Variable` object." 12337,create_slot_with_initializer,tensorflow/tensorflow/python/training/slot_creator.py,138,function,"Creates a slot initialized using an `Initializer`. The type of the slot is determined by the given value. Args: primary: The primary `Variable` or `Tensor`. initializer: An `Initializer`. The initial value of the slot. shape: Shape of the initial value of the slot. dtype: Type of the value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. Returns: A `Variable` object." 12338,create_zeros_slot,tensorflow/tensorflow/python/training/slot_creator.py,177,function,"Create a slot initialized to 0 with same shape as the primary object. Args: primary: The primary `Variable` or `Tensor`. name: Name to use for the slot variable. dtype: Type of the slot variable. Defaults to the type of `primary`. colocate_with_primary: Boolean. If True the slot is located on the same device as `primary`. Returns: A `Variable` object." 12339,SlotCreatorTest,tensorflow/tensorflow/python/training/slot_creator_test.py,33,class, 12340,SummaryWriter,tensorflow/tensorflow/python/training/summary_io.py,29,class, 12341,Supervisor,tensorflow/tensorflow/python/training/supervisor.py,44,class,"A training helper that checkpoints models and computes summaries. This class is deprecated. Please use `tf.compat.v1.train.MonitoredTrainingSession` instead. The Supervisor is a small wrapper around a `Coordinator`, a `Saver`, and a `SessionManager` that takes care of common needs of TensorFlow training programs. #### Use for a single program ```python with tf.Graph().as_default(): ...add operations to the graph... # Create a Supervisor that will checkpoint the model in '/tmp/mydir'. sv = Supervisor(logdir='/tmp/mydir') # Get a TensorFlow session managed by the supervisor. with sv.managed_session(FLAGS.master) as sess: # Use the session to train the graph. while not sv.should_stop(): sess.run() ``` Within the `with sv.managed_session()` block all variables in the graph have been initialized. In addition, a few services have been started to checkpoint the model and add summaries to the event log. If the program crashes and is restarted, the managed session automatically reinitialize variables from the most recent checkpoint. The supervisor is notified of any exception raised by one of the services. After an exception is raised, `should_stop()` returns `True`. In that case the training loop should also stop. This is why the training loop has to check for `sv.should_stop()`. Exceptions that indicate that the training inputs have been exhausted, `tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True` but are not re-raised from the `with` block: they indicate a normal termination. #### Use for multiple replicas To train with replicas you deploy the same program in a `Cluster`. One of the tasks must be identified as the *chief*: the task that handles initialization, checkpoints, summaries, and recovery. The other tasks depend on the *chief* for these services. The only change you have to do to the single program code is to indicate if the program is running as the *chief*. ```python # Choose a task as the chief. This could be based on server_def.task_index, # or job_def.name, or job_def.tasks. It's entirely up to the end user. # But there can be only one *chief*. is_chief = (server_def.task_index == 0) server = tf.distribute.Server(server_def) with tf.Graph().as_default(): ...add operations to the graph... # Create a Supervisor that uses log directory on a shared file system. # Indicate if you are the 'chief' sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief) # Get a Session in a TensorFlow server on the cluster. with sv.managed_session(server.target) as sess: # Use the session to train the graph. while not sv.should_stop(): sess.run() ``` In the *chief* task, the `Supervisor` works exactly as in the first example above. In the other tasks `sv.managed_session()` waits for the Model to have been initialized before returning a session to the training code. The non-chief tasks depend on the chief task for initializing the model. If one of the tasks crashes and restarts, `managed_session()` checks if the Model is initialized. If yes, it just creates a session and returns it to the training code that proceeds normally. If the model needs to be initialized, the chief task takes care of reinitializing it; the other tasks just wait for the model to have been initialized. NOTE: This modified program still works fine as a single program. The single program marks itself as the chief. #### What `master` string to use Whether you are running on your machine or in the cluster you can use the following values for the --master flag: * Specifying `''` requests an in-process session that does not use RPC. * Specifying `'local'` requests a session that uses the RPC-based ""Master interface"" to run TensorFlow programs. See `tf.train.Server.create_local_server` for details. * Specifying `'grpc://hostname:port'` requests a session that uses the RPC interface to a specific host, and also allows the in-process master to access remote tensorflow workers. Often, it is appropriate to pass `server.target` (for some `tf.distribute.Server` named `server). #### Advanced use ##### Launching additional services `managed_session()` launches the Checkpoint and Summary services (threads). If you need more services to run you can simply launch them in the block controlled by `managed_session()`. Example: Start a thread to print losses. We want this thread to run every 60 seconds, so we launch it with `sv.loop()`. ```python ... sv = Supervisor(logdir='/tmp/mydir') with sv.managed_session(FLAGS.master) as sess: sv.loop(60, print_loss, (sess, )) while not sv.should_stop(): sess.run(my_train_op) ``` ##### Launching fewer services `managed_session()` launches the ""summary"" and ""checkpoint"" threads which use either the optionally `summary_op` and `saver` passed to the constructor, or default ones created automatically by the supervisor. If you want to run your own summary and checkpointing logic, disable these services by passing `None` to the `summary_op` and `saver` parameters. Example: Create summaries manually every 100 steps in the chief. ```python # Create a Supervisor with no automatic summaries. sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None) # As summary_op was None, managed_session() does not start the # summary thread. with sv.managed_session(FLAGS.master) as sess: for step in xrange(1000000): if sv.should_stop(): break if is_chief and step % 100 == 0: # Create the summary every 100 chief steps. sv.summary_computed(sess, sess.run(my_summary_op)) else: # Train normally sess.run(my_train_op) ``` ##### Custom model initialization `managed_session()` only supports initializing the model by running an `init_op` or restoring from the latest checkpoint. If you have special initialization needs, see how to specify a `local_init_op` when creating the supervisor. You can also use the `SessionManager` directly to create a session and check if it could be initialized automatically." 12342,SVSummaryThread,tensorflow/tensorflow/python/training/supervisor.py,1028,class,A thread to save summaries on a timer. 12343,SVStepCounterThread,tensorflow/tensorflow/python/training/supervisor.py,1054,class,Threads to count steps and measure their duration. 12344,SVTimerCheckpointThread,tensorflow/tensorflow/python/training/supervisor.py,1102,class,A thread to checkpoint on a timer. 12345,_summary_iterator,tensorflow/tensorflow/python/training/supervisor_test.py,56,function,"Reads events from test_dir/events. Args: test_dir: Name of the test directory. Returns: A summary_iterator" 12346,SupervisorTest,tensorflow/tensorflow/python/training/supervisor_test.py,69,class, 12347,SyncReplicasOptimizer,tensorflow/tensorflow/python/training/sync_replicas_optimizer.py,45,class,"Class to synchronize, aggregate gradients and pass them to the optimizer. This class is deprecated. For synchronous training, please use [Distribution Strategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute). In a typical asynchronous training environment, it's common to have some stale gradients. For example, with a N-replica asynchronous training, gradients will be applied to the variables N times independently. Depending on each replica's training speed, some gradients might be calculated from copies of the variable from several steps back (N-1 steps on average). This optimizer avoids stale gradients by collecting gradients from all replicas, averaging them, then applying them to the variables in one shot, after which replicas can fetch the new variables and continue. The following accumulators/queue are created: * N `gradient accumulators`, one per variable to train. Gradients are pushed to them and the chief worker will wait until enough gradients are collected and then average them before applying to variables. The accumulator will drop all stale gradients (more details in the accumulator op). * 1 `token` queue where the optimizer pushes the new global_step value after all variables are updated. The following local variable is created: * `sync_rep_local_step`, one per replica. Compared against the global_step in each accumulator to check for staleness of the gradients. The optimizer adds nodes to the graph to collect gradients and pause the trainers until variables are updated. For the Parameter Server job: 1. An accumulator is created for each variable, and each replica pushes the gradients into the accumulators instead of directly applying them to the variables. 2. Each accumulator averages once enough gradients (replicas_to_aggregate) have been accumulated. 3. Apply the averaged gradients to the variables. 4. Only after all variables have been updated, increment the global step. 5. Only after step 4, pushes `global_step` in the `token_queue`, once for each worker replica. The workers can now fetch the global step, use it to update its local_step variable and start the next batch. Please note that some workers can consume multiple minibatches, while some may not consume even one. This is because each worker fetches minibatches as long as a token exists. If one worker is stuck for some reason and does not consume a token, another worker can use it. For the replicas: 1. Start a step: fetch variables and compute gradients. 2. Once the gradients have been computed, push them into gradient accumulators. Each accumulator will check the staleness and drop the stale. 3. After pushing all the gradients, dequeue an updated value of global_step from the token queue and record that step to its local_step variable. Note that this is effectively a barrier. 4. Start the next batch. ### Usage ```python # Create any optimizer to update the variables, say a simple SGD: opt = GradientDescentOptimizer(learning_rate=0.1) # Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each # step the optimizer collects 50 gradients before applying to variables. # Note that if you want to have 2 backup replicas, you can change # total_num_replicas=52 and make sure this number matches how many physical # replicas you started in your job. opt = tf.compat.v1.train.SyncReplicasOptimizer(opt, replicas_to_aggregate=50, total_num_replicas=50) # Some models have startup_delays to help stabilize the model but when using # sync_replicas training, set it to 0. # Now you can call `minimize()` or `compute_gradients()` and # `apply_gradients()` normally training_op = opt.minimize(total_loss, global_step=self.global_step) # You can create the hook which handles initialization and queues. sync_replicas_hook = opt.make_session_run_hook(is_chief) ``` In the training program, every worker will run the train_op as if not synchronized. ```python with training.MonitoredTrainingSession( master=workers[worker_id].target, is_chief=is_chief, hooks=[sync_replicas_hook]) as mon_sess: while not mon_sess.should_stop(): mon_sess.run(training_op) ``` To use SyncReplicasOptimizer with an `Estimator`, you need to send sync_replicas_hook while calling the fit. ```python my_estimator = DNNClassifier(..., optimizer=opt) my_estimator.fit(..., hooks=[sync_replicas_hook]) ```" 12348,_SyncReplicasOptimizerHook,tensorflow/tensorflow/python/training/sync_replicas_optimizer.py,465,class,A SessionRunHook handles ops related to SyncReplicasOptimizer. 12349,get_workers,tensorflow/tensorflow/python/training/sync_replicas_optimizer_test.py,35,function, 12350,SyncReplicasOptimizerTest,tensorflow/tensorflow/python/training/sync_replicas_optimizer_test.py,87,class, 12351,SyncReplicasOptimizerHookTest,tensorflow/tensorflow/python/training/sync_replicas_optimizer_test.py,262,class, 12352,get_verbosity,tensorflow/tensorflow/python/training/tensorboard_logging.py,78,function, 12353,set_verbosity,tensorflow/tensorflow/python/training/tensorboard_logging.py,82,function, 12354,_check_verbosity,tensorflow/tensorflow/python/training/tensorboard_logging.py,88,function, 12355,set_summary_writer,tensorflow/tensorflow/python/training/tensorboard_logging.py,94,function,"Sets the summary writer that events will be logged to. Calling any logging methods inside this module without calling this method will fail. If you don't want to log, call `set_summary_writer(None)`. Args: summary_writer: Either a SummaryWriter or None. None will cause messages not to be logged to any SummaryWriter, but they will still be passed to the platform logging module." 12356,_clear_summary_writer,tensorflow/tensorflow/python/training/tensorboard_logging.py,109,function,"Makes all subsequent log invocations error. This is only used for testing. If you want to disable TensorBoard logging, call `set_summary_writer(None)` instead." 12357,log,tensorflow/tensorflow/python/training/tensorboard_logging.py,119,function,"Conditionally logs `message % args` at the level `level`. Note that tensorboard_logging verbosity and logging verbosity are separate; the message will always be passed through to the logging module regardless of whether it passes the tensorboard_logging verbosity check. Args: level: The verbosity level to use. Must be one of tensorboard_logging.{DEBUG, INFO, WARN, ERROR, FATAL}. message: The message template to use. *args: Arguments to interpolate to the message template, if any. Raises: ValueError: If `level` is not a valid logging level. RuntimeError: If the `SummaryWriter` to use has not been set." 12358,debug,tensorflow/tensorflow/python/training/tensorboard_logging.py,152,function, 12359,info,tensorflow/tensorflow/python/training/tensorboard_logging.py,156,function, 12360,warn,tensorflow/tensorflow/python/training/tensorboard_logging.py,160,function, 12361,error,tensorflow/tensorflow/python/training/tensorboard_logging.py,164,function, 12362,fatal,tensorflow/tensorflow/python/training/tensorboard_logging.py,168,function, 12363,EventLoggingTest,tensorflow/tensorflow/python/training/tensorboard_logging_test.py,37,class, 12364,TrainingOpsTest,tensorflow/tensorflow/python/training/training_ops_test.py,36,class, 12365,global_step,tensorflow/tensorflow/python/training/training_util.py,41,function,"Small helper to get the global step. ```python # Create a variable to hold the global_step. global_step_tensor = tf.Variable(10, trainable=False, name='global_step') # Create a session. sess = tf.compat.v1.Session() # Initialize the variable sess.run(global_step_tensor.initializer) # Get the variable value. print('global_step: %s' % tf.compat.v1.train.global_step(sess, global_step_tensor)) global_step: 10 ``` Args: sess: A TensorFlow `Session` object. global_step_tensor: `Tensor` or the `name` of the operation that contains the global step. Returns: The global step value." 12366,get_global_step,tensorflow/tensorflow/python/training/training_util.py,72,function,"Get the global step tensor. The global step tensor must be an integer variable. We first try to find it in the collection `GLOBAL_STEP`, or by name `global_step:0`. Args: graph: The graph to find the global step in. If missing, use default graph. Returns: The global step variable, or `None` if none was found. Raises: TypeError: If the global step tensor has a non-integer type, or if it is not a `Variable`." 12367,create_global_step,tensorflow/tensorflow/python/training/training_util.py,107,function,"Create global step tensor in graph. Args: graph: The graph in which to create the global step tensor. If missing, use default graph. Returns: Global step tensor. Raises: ValueError: if global step tensor is already defined." 12368,get_or_create_global_step,tensorflow/tensorflow/python/training/training_util.py,148,function,"Returns and create (if necessary) the global step tensor. Args: graph: The graph in which to create the global step tensor. If missing, use default graph. Returns: The global step tensor." 12369,assert_global_step,tensorflow/tensorflow/python/training/training_util.py,166,function,"Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`. Args: global_step_tensor: `Tensor` to test." 12370,_get_global_step_read,tensorflow/tensorflow/python/training/training_util.py,188,function,"Gets global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor. Raises: RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY." 12371,_get_or_create_global_step_read,tensorflow/tensorflow/python/training/training_util.py,212,function,"Gets or creates global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor if there is global_step_tensor else return None." 12372,_increment_global_step,tensorflow/tensorflow/python/training/training_util.py,242,function, 12373,GlobalStepTest,tensorflow/tensorflow/python/training/training_util_test.py,29,class, 12374,GlobalStepReadTest,tensorflow/tensorflow/python/training/training_util_test.py,96,class, 12375,VocabInfo,tensorflow/tensorflow/python/training/warm_starting_util.py,39,class,"Vocabulary information for warm-starting. See `tf.estimator.WarmStartSettings` for examples of using VocabInfo to warm-start. Args: new_vocab: [Required] A path to the new vocabulary file (used with the model to be trained). new_vocab_size: [Required] An integer indicating how many entries of the new vocabulary will used in training. num_oov_buckets: [Required] An integer indicating how many OOV buckets are associated with the vocabulary. old_vocab: [Required] A path to the old vocabulary file (used with the checkpoint to be warm-started from). old_vocab_size: [Optional] An integer indicating how many entries of the old vocabulary were used in the creation of the checkpoint. If not provided, the entire old vocabulary will be used. backup_initializer: [Optional] A variable initializer used for variables corresponding to new vocabulary entries and OOV. If not provided, these entries will be zero-initialized. axis: [Optional] Denotes what axis the vocabulary corresponds to. The default, 0, corresponds to the most common use case (embeddings or linear weights for binary classification / regression). An axis of 1 could be used for warm-starting output layers with class vocabularies. Returns: A `VocabInfo` which represents the vocabulary information for warm-starting. Raises: ValueError: `axis` is neither 0 or 1. Example Usage: ```python embeddings_vocab_info = tf.VocabInfo( new_vocab='embeddings_vocab', new_vocab_size=100, num_oov_buckets=1, old_vocab='pretrained_embeddings_vocab', old_vocab_size=10000, backup_initializer=tf.compat.v1.truncated_normal_initializer( mean=0.0, stddev=(1 / math.sqrt(embedding_dim))), axis=0) softmax_output_layer_kernel_vocab_info = tf.VocabInfo( new_vocab='class_vocab', new_vocab_size=5, num_oov_buckets=0, # No OOV for classes. old_vocab='old_class_vocab', old_vocab_size=8, backup_initializer=tf.compat.v1.glorot_uniform_initializer(), axis=1) softmax_output_layer_bias_vocab_info = tf.VocabInfo( new_vocab='class_vocab', new_vocab_size=5, num_oov_buckets=0, # No OOV for classes. old_vocab='old_class_vocab', old_vocab_size=8, backup_initializer=tf.compat.v1.zeros_initializer(), axis=0) #Currently, only axis=0 and axis=1 are supported. ``` " 12376,_infer_var_name,tensorflow/tensorflow/python/training/warm_starting_util.py,138,function,"Returns name of the `var`. Args: var: A list. The list can contain either of the following: (i) A single `Variable` (ii) A single `ResourceVariable` (iii) Multiple `Variable` objects which must be slices of the same larger variable. (iv) A single `PartitionedVariable` Returns: Name of the `var`" 12377,_get_var_info,tensorflow/tensorflow/python/training/warm_starting_util.py,159,function,"Helper method for standarizing Variable and naming. Args: var: Current graph's variable that needs to be warm-started (initialized). Can be either of the following: (i) `Variable` (ii) `ResourceVariable` (iii) list of `Variable`: The list must contain slices of the same larger variable. (iv) `PartitionedVariable` prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If None, we lookup tensor with same name as given `var`. Returns: A tuple of the Tensor name and var." 12378,_warm_start_var_with_vocab,tensorflow/tensorflow/python/training/warm_starting_util.py,195,function,"Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`. Use this method when the `var` is backed by vocabulary. This method stitches the given `var` such that values corresponding to individual features in the vocabulary remain consistent irrespective of changing order of the features between old and new vocabularies. Args: var: Current graph's variable that needs to be warm-started (initialized). Can be either of the following: (i) `Variable` (ii) `ResourceVariable` (iii) list of `Variable`: The list must contain slices of the same larger variable. (iv) `PartitionedVariable` current_vocab_path: Path to the vocab file used for the given `var`. current_vocab_size: An `int` specifying the number of entries in the current vocab. prev_ckpt: A string specifying the directory with checkpoint file(s) or path to checkpoint. The given checkpoint must have tensor with name `prev_tensor_name` (if not None) or tensor with name same as given `var`. prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`. previous_vocab_size: If provided, will constrain previous vocab to the first `previous_vocab_size` entries. -1 means use the entire previous vocab. current_oov_buckets: An `int` specifying the number of out-of-vocabulary buckets used for given `var`. prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If None, we lookup tensor with same name as given `var`. initializer: Variable initializer to be used for missing entries. If None, missing entries will be zero-initialized. axis: Axis of the variable that the provided vocabulary corresponds to. Raises: ValueError: If required args are not provided." 12379,_get_grouped_variables,tensorflow/tensorflow/python/training/warm_starting_util.py,317,function,"Collects and groups (possibly partitioned) variables into a dictionary. The variables can be provided explicitly through vars_to_warm_start, or they are retrieved from collections (see below). Args: vars_to_warm_start: One of the following: - A regular expression (string) that captures which variables to warm-start (see tf.compat.v1.get_collection). This expression will only consider variables in the TRAINABLE_VARIABLES collection. - A list of strings, each representing a full variable name to warm-start. These will consider variables in GLOBAL_VARIABLES collection. - A list of Variables to warm-start. - `None`, in which case all variables in TRAINABLE_VARIABLES will be used. Returns: A dictionary mapping variable names (strings) to lists of Variables. Raises: ValueError: If vars_to_warm_start is not a string, `None`, a list of `Variables`, or a list of strings." 12380,_get_object_checkpoint_renames,tensorflow/tensorflow/python/training/warm_starting_util.py,373,function,"Returns a dictionary mapping variable names to checkpoint keys. The warm-starting utility expects variable names to match with the variable names in the checkpoint. For object-based checkpoints, the variable names and names in the checkpoint are different. Thus, for object-based checkpoints, this function is used to obtain the map from variable names to checkpoint keys. Args: path: path to checkpoint directory or file. variable_names: list of variable names to load from the checkpoint. Returns: If the checkpoint is object-based, this function returns a map from variable names to their corresponding checkpoint keys. If the checkpoint is name-based, this returns an empty dict. Raises: ValueError: If the object-based checkpoint is missing variables." 12381,warm_start,tensorflow/tensorflow/python/training/warm_starting_util.py,413,function,"Warm-starts a model using the given settings. If you are using a tf.estimator.Estimator, this will automatically be called during training. Args: ckpt_to_initialize_from: [Required] A string specifying the directory with checkpoint file(s) or path to checkpoint from which to warm-start the model parameters. vars_to_warm_start: [Optional] One of the following: - A regular expression (string) that captures which variables to warm-start (see tf.compat.v1.get_collection). This expression will only consider variables in the TRAINABLE_VARIABLES collection -- if you need to warm-start non_TRAINABLE vars (such as optimizer accumulators or batch norm statistics), please use the below option. - A list of strings, each a regex scope provided to tf.compat.v1.get_collection with GLOBAL_VARIABLES (please see tf.compat.v1.get_collection). For backwards compatibility reasons, this is separate from the single-string argument type. - A list of Variables to warm-start. If you do not have access to the `Variable` objects at the call site, please use the above option. - `None`, in which case only TRAINABLE variables specified in `var_name_to_vocab_info` will be warm-started. Defaults to `'.*'`, which warm-starts all variables in the TRAINABLE_VARIABLES collection. Note that this excludes variables such as accumulators and moving statistics from batch norm. var_name_to_vocab_info: [Optional] Dict of variable names (strings) to `tf.estimator.VocabInfo`. The variable names should be ""full"" variables, not the names of the partitions. If not explicitly provided, the variable is assumed to have no (changes to) vocabulary. var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to name of the previously-trained variable in `ckpt_to_initialize_from`. If not explicitly provided, the name of the variable is assumed to be same between previous checkpoint and current model. Note that this has no effect on the set of variables that is warm-started, and only controls name mapping (use `vars_to_warm_start` for controlling what variables to warm-start). Raises: ValueError: If the WarmStartSettings contains prev_var_name or VocabInfo configuration for variable names that are not used. This is to ensure a stronger check for variable configuration than relying on users to examine the logs." 12382,WarmStartingUtilTest,tensorflow/tensorflow/python/training/warm_starting_util_test.py,45,class, 12383,LossScale,tensorflow/tensorflow/python/training/experimental/loss_scale.py,43,class,"Base class for all loss scales. This is an abstract base class, so you cannot instantiate it directly. Instead, use one of its concrete subclasses: * `tf.mixed_precision.experimental.DynamicLossScale` (recommended) * `tf.mixed_precision.experimental.FixedLossScale` It's recommended to use a loss scale with a `tf.keras.mixed_precision.experimental.LossScaleOptimizer`, as its easier than using a loss scale directly. Loss scaling is a process that multiplies the loss by a multiplier called the loss scale, and divides each gradient by the same multiplier. The pseudocode for this process is: ``` loss = ... loss *= loss_scale grads = gradients(loss, vars) grads /= loss_scale ``` Mathematically, loss scaling has no effect, but can help avoid numerical underflow in intermediate gradients when float16 tensors are used for mixed precision training. By multiplying the loss, each intermediate gradient will have the same multiplier applied. Instances of this class represent a loss scale. Calling instances of this class returns the loss scale as a scalar float32 tensor, while method `update()` updates the loss scale depending on the values of the gradients. Optimizers use instances of this class to scale loss and gradients. In most functions that accept a LossScale, you can also pass an int (such as 8) to create a `FixedLossScale` or the string `""dynamic""` to create a dynamic loss scale." 12384,FixedLossScale,tensorflow/tensorflow/python/training/experimental/loss_scale.py,204,class,"Loss scale with a fixed value. The loss scale is not updated for the lifetime of instances of this class. A given instance of this class always returns the same number when called." 12385,_is_all_finite,tensorflow/tensorflow/python/training/experimental/loss_scale.py,251,function,Returns a scalar boolean tensor indicating if all gradients are finite. 12386,_op_in_graph_mode,tensorflow/tensorflow/python/training/experimental/loss_scale.py,259,function,"Returns the tensor's op in graph mode, or the tensor in eager mode. This is useful because sometimes an op is needed in graph mode instead of a tensor. In eager mode, there are no ops. Args: tensor: A tensor. Returns: The tensor's op in graph mode. The tensor in eager mode." 12387,_assign_if_finite,tensorflow/tensorflow/python/training/experimental/loss_scale.py,276,function,Assigns a value to a variable if the value is finite. 12388,DynamicLossScale,tensorflow/tensorflow/python/training/experimental/loss_scale.py,286,class,"Loss scale that dynamically adjusts itself. Dynamic loss scaling works by adjusting the loss scale as training progresses. The goal is to keep the loss scale as high as possible without overflowing the gradients. As long as the gradients do not overflow, raising the loss scale never hurts. The algorithm starts by setting the loss scale to an initial value. Every N steps that the gradients are finite, the loss scale is increased by some factor. However, if a NaN or Inf gradient is found, the gradients for that step are not applied, and the loss scale is decreased by the factor. This process tends to keep the loss scale as high as possible without gradients overflowing." 12389,get,tensorflow/tensorflow/python/training/experimental/loss_scale.py,417,function,Get a loss scale object. 12390,MixedPrecisionLossScaleOptimizer,tensorflow/tensorflow/python/training/experimental/loss_scale_optimizer.py,31,class,"An optimizer that applies loss scaling. Loss scaling is a process that multiplies the loss by a multiplier called the loss scale, and divides each gradient by the same multiplier. The pseudocode for this process is: ``` loss = ... loss *= loss_scale grads = gradients(loss, vars) grads /= loss_scale ``` Mathematically, loss scaling has no effect, but can help avoid numerical underflow in intermediate gradients when float16 tensors are used for mixed precision training. By multiplying the loss, each intermediate gradient will have the same multiplier applied. The loss scale can either be a fixed constant, chosen by the user, or be dynamically determined. Dynamically determining the loss scale is convenient as a loss scale does not have to be explicitly chosen. However it reduces performance. This optimizer wraps another optimizer and applies loss scaling to it via a `LossScale`. Loss scaling is applied whenever gradients are computed, such as through `minimize()`." 12391,create_mirrored_strategy,tensorflow/tensorflow/python/training/experimental/loss_scale_optimizer_test.py,47,function, 12392,get_gradients,tensorflow/tensorflow/python/training/experimental/loss_scale_optimizer_test.py,63,function, 12393,create_identity_with_grad_check_fn,tensorflow/tensorflow/python/training/experimental/loss_scale_optimizer_test.py,69,function,"Returns a function that asserts it's gradient has a certain value. This serves as a hook to assert intermediate gradients have a certain value. This returns an identity function. The identity's gradient function is also the identity function, except it asserts that the gradient equals `expected_gradient` and has dtype `expected_dtype`. Args: expected_gradient: The gradient function asserts that the gradient is this value. expected_dtype: The gradient function asserts the gradient has this dtype. Returns: An identity function whose gradient function asserts the gradient has a certain value." 12394,MixedPrecisionLossScaleOptimizerTest,tensorflow/tensorflow/python/training/experimental/loss_scale_optimizer_test.py,113,class, 12395,create_mirrored_strategy,tensorflow/tensorflow/python/training/experimental/loss_scale_test.py,45,function, 12396,FixedLossScaleTest,tensorflow/tensorflow/python/training/experimental/loss_scale_test.py,61,class, 12397,_get_example_iter,tensorflow/tensorflow/python/training/experimental/loss_scale_test.py,101,function, 12398,DynamicLossScaleTest,tensorflow/tensorflow/python/training/experimental/loss_scale_test.py,106,class, 12399,_convert_to_per_replicas,tensorflow/tensorflow/python/training/experimental/loss_scaling_gradient_tape.py,32,function,"Converts tensors and DistributedVariables to PerReplica values. Args: distribution: The distribution strategy in effect. values: A list of tensors, variables, DistributedValues, or anything else that can be converted to a PerReplcia value Returns: `values`, but each element has been converted to a PerReplica value." 12400,LossScaleGradientTape,tensorflow/tensorflow/python/training/experimental/loss_scaling_gradient_tape.py,50,class,"A gradient tape that scales losses and unscales resulting gradients. Operates as a normal gradient tape, but takes in a `tf.mixed_precision.experimental.LossScale` object. Losses are scaled up by some amount before the gradients are calculated and the resulting gradients are scaled down by the same amount. This has no net mathematical effect, but can be used to prevent vanishing gradients, for example in the case of mixed precision training. If a DynamicLossScale object is used and non-finite gradients are encountered, the loss scale will be updated and the gradients recomputed until either finite gradients are encountered or the loss scale becomes 1. This class should *not* be used with a LossScaleOptimizer, as both classes update the LossScale object. Use a non-loss scaling optimizer instead. Usage: ``` opt = tf.keras.optimizers.SGD(1.0) model_loss_scale = tf.mixed_precision.experimental.DynamicLossScale() for step in training_steps: with LossScaleGradientTape(model_loss_scale) as tape: logits = ... # Run model and get logits loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels) loss = tf.reduce_mean(loss) vars = tape.watched_variables() grads = tape.gradient(loss, vars) opt.apply_gradients(zip(grads, vars)) ``` WARNING: Computing second-order (or higher) gradients with a `LossScaleGradientTape` does not yet work properly when a `tf.distribute.Strategy` is used. Computing second-order gradients will return None instead of the gradient tensors. This only occurs when you nest multiple gradient tapes under each other; if you do not nest them, this issue will not occur." 12401,_compute_gradients_until_finite,tensorflow/tensorflow/python/training/experimental/loss_scaling_gradient_tape.py,204,function,"Compute gradients and update the loss scale until the gradients are finite. This must be called in a cross-replica context. This is a function instead of a method of LossScaleGradientTape, as the `self` parameter would be meaningless. There is one LossScaleGradientTape per replica, but this function is called once total (not per replica), so there cannot be a singular `self` parameter. Args: distribution: The distribution strategy in effect. loss_scale_gradient_tapes: A PerReplica value of LossScaleGradientTapes. Contains the LossScaleGradientTape of each replica. loss_scale: The loss scale to use to scale the loss and unscale the gradient. target: a list or nested structure of Tensors or Variables to be differentiated. sources: a list or nested structure of Tensors or Variables. `target` will be differentiated against elements in `sources`. output_gradients: Passed to GradientTape.gradient unconnected_gradients: Pass to GradientTape.gradient. Returns: The gradients of `target` with respect to `sources`." 12402,create_mirrored_strategy,tensorflow/tensorflow/python/training/experimental/loss_scaling_gradient_tape_test.py,44,function, 12403,LossScaleGradientTapeTest,tensorflow/tensorflow/python/training/experimental/loss_scaling_gradient_tape_test.py,51,class, 12404,_register_wrapper_optimizer_cls,tensorflow/tensorflow/python/training/experimental/mixed_precision.py,37,function, 12405,_wrap_optimizer,tensorflow/tensorflow/python/training/experimental/mixed_precision.py,41,function,Wraps an optimizer with a LossScaleOptimizer. 12406,enable_mixed_precision_graph_rewrite,tensorflow/tensorflow/python/training/experimental/mixed_precision.py,65,function,"Enable mixed precision via a graph rewrite. Mixed precision is the use of both float32 and float16 data types when training a model to improve performance. This is achieved via a graph rewrite operation and a loss-scale optimizer. Performing arithmetic operations in float16 takes advantage of specialized processing units, such as NVIDIA Tensor Cores, for much higher arithmetic throughput. However, due to the smaller representable range, performing the entire training with float16 can result in gradient underflow, that is, small gradient values becoming zeroes. Instead, performing only select arithmetic operations in float16 results in higher throughput and decreased training time when using compatible hardware accelerators while also reducing memory usage, typically without sacrificing model accuracy. Note: While the mixed precision rewrite changes the datatype of various layers throughout the model, the same accuracy reached in float32 is expected. If a `NaN` gradient occurs with dynamic loss scaling, the model update for that batch is skipped. In this case, the global step count is not incremented, and the `LossScaleOptimizer` attempts to decrease the loss scaling value to avoid `NaN` values in subsequent iterations. This approach has been shown to achieve the same accuracy as float32 and, in most cases, better training throughput. Example: ```python model = tf.keras.models.Sequential([ tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='softmax'), ]) opt = tf.keras.optimizers.SGD() opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt) model.compile(loss=""mse"", optimizer=opt) x_train = np.random.random((1024, 64)) y_train = np.random.random((1024, 64)) model.fit(x_train, y_train) ``` Calling `enable_mixed_precision_graph_rewrite(opt)` enables the graph rewrite operation before computing gradients. The function additionally returns an `Optimizer` (`opt`) wrapped with a `LossScaleOptimizer`. This prevents underflow in the float16 tensors during the backward pass. An optimizer of type `tf.keras.optimizers.Optimizer` or `tf.compat.v1.train.Optimizer` must be passed to this function, which will then be wrapped to use loss scaling. The graph rewrite operation changes the dtype of certain operations in the graph from float32 to float16. There are several categories of operations that are either included or excluded by this rewrite operation. The following categories of Ops are defined inside corresponding functions under the class `AutoMixedPrecisionLists` in auto_mixed_precision_lists.h: * `ClearList`: Ops that do not have numerically significant adverse effects. E.g. `ArgMax` and `Floor`. * `AllowList`: Ops that are considered numerically safe for execution in float16, and thus are always converted. E.g. `Conv2D`. * `DenyList`: Ops that are numerically unsafe to execute in float16 and can negatively affect downstream nodes. E.g. `Softmax`. * `GrayList`: Ops that are considered numerically safe for execution in float16 unless downstream from a DenyList Op. E.g. `Add` and `AvgPool`. When this function is used, gradients should be computed and applied with the returned optimizer, either by calling `opt.minimize()` or `opt.compute_gradients()` followed by `opt.apply_gradients()`. If gradients are instead computed with `tf.gradients` or `tf.GradientTape`, loss scaling will not be applied, which will likely cause your model not to converge due to float16 underflow problems. To apply lossing scaling with `tf.gradients` or `tf.GradientTape`, `LossScaleOptimizer.get_scaled_loss` and `LossScaleOptimizer.get_unscaled_gradients`. See `keras.mixed_precision.experimental.LossScaleOptimizer` for details how to do this. When eager execution is enabled, the mixed precision graph rewrite is only enabled within `tf.function`s, as outside `tf.function`s, there is no graph. For NVIDIA GPUs with Tensor cores, as a general performance guide, dimensions (such as batch size, input size, output size, and channel counts) should be powers of two if under 256, or otherwise divisible by 8 if above 256. For more information, check out the [NVIDIA Deep Learning Performance Guide]( https://docs.nvidia.com/deeplearning/sdk/dl-performance-guide/index.html). Currently, mixed precision is only enabled on NVIDIA Tensor Core GPUs with Compute Capability 7.0 and above (Volta, Turing, or newer architectures). The parts of the graph on CPUs and TPUs are untouched by the graph rewrite. ## Comparison with the Keras mixed precision API Both this function and the [Keras mixed precision API](https://www.tensorflow.org/guide/keras/mixed_precision) enable the use of mixed precision in a model. Therefore, only one of the two APIs can be used. We recommend using the Keras mixed precision API, as it is more customizable and supports Eager execution. However, it only supports models which use Keras layers, while the graph rewrite works in any model that uses `tf.function`s. The core difference between the two APIs is that this function is a graph rewrite, and so it changes the graph to use mixed precision under the hood. You still build your graph in float32, and the graph rewrite will change certain ops to float16. The Keras mixed precision API directly builds the Keras Model using a mix of float16 and float32. One core advantage of the Keras API is it supports mixed precision with Eager execution, i.e. mixed precision outside `tf.function`s. The graph rewrite will only affect ops within `tf.function`s, making it harder to debug if issues occur with mixed precision. The Keras API is also more customizable, as you can override any layer to run in float32 by passing `dtype=""float32""` to the layer constructor. Additionally, you can query the dtype of tensors in the model by checking `tensor.dtype`. With the graph rewrite, all tensors appear to be float32 since the dtype is only changed under the hood. The main advantage of the graph rewrite (this function) is that it works even if you do not use Keras layers or any other part of Keras. The Keras mixed precision API requires models which use Keras layers, as it only inserts casts inside Keras layers and models. Another advantage is that the graph rewrite never results in a TypeError, which the Keras API may introduce if you do certain operations outside Keras. For example, the following will result in a TypeError if the Keras mixed precision API is enabled, as a float16 and float32 tensor will be added: `tf.keras.layers.Dense(2)(x) + tf.keras.layers.Dense(2, dtype=""float32"")(x)` Raises: `ValueError`, if the `tf.keras.mixed_precision` API is also used by calling `tf.keras.mixed_precision.experimental.set_policy`. Only one mixed precision API can be used. Args: opt: An instance of a `tf.keras.optimizers.Optimizer`. loss_scale: Either an int/float, the string `""dynamic""`, or an instance of a `tf.mixed_precision.experimental.LossScale`. The loss scale to use. It is recommended to keep this as its default value of `""dynamic""`, which will adjust the scaling automatically to prevent `Inf` or `NaN` values. Returns: A version of `opt` that will use loss scaling to prevent underflow." 12407,enable_mixed_precision_graph_rewrite_v1,tensorflow/tensorflow/python/training/experimental/mixed_precision.py,210,function,"Enable mixed precision via a graph rewrite. Mixed precision is the use of both float32 and float16 data types when training a model to improve performance. This is achieved via a graph rewrite operation and a loss-scale optimizer. Performing arithmetic operations in float16 takes advantage of specialized processing units, such as NVIDIA Tensor Cores, for much higher arithmetic throughput. However, due to the smaller representable range, performing the entire training with float16 can result in gradient underflow, that is, small gradient values becoming zeroes. Instead, performing only select arithmetic operations in float16 results in higher throughput and decreased training time when using compatible hardware accelerators while also reducing memory usage, typically without sacrificing model accuracy. Note: While the mixed precision rewrite changes the datatype of various layers throughout the model, the same accuracy reached in float32 is expected. If a `NaN` gradient occurs with dynamic loss scaling, the model update for that batch is skipped. In this case, the global step count is not incremented, and the `LossScaleOptimizer` attempts to decrease the loss scaling value to avoid `NaN` values in subsequent iterations. This approach has been shown to achieve the same accuracy as float32 and, in most cases, better training throughput. Example: ```python model = tf.keras.models.Sequential([ tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(64, activation='softmax'), ]) opt = tf.keras.optimizers.SGD() opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt) model.compile(loss=""mse"", optimizer=opt) x_train = np.random.random((1024, 64)) y_train = np.random.random((1024, 64)) model.fit(x_train, y_train) ``` Calling `enable_mixed_precision_graph_rewrite(opt)` enables the graph rewrite operation before computing gradients. The function additionally returns an `Optimizer` (`opt`) wrapped with a `LossScaleOptimizer`. This prevents underflow in the float16 tensors during the backward pass. An optimizer of type `tf.train.Optimizer` or `tf.keras.optimizers.Optimizer` must be passed to this function, which will then be wrapped to use loss scaling. The graph rewrite operation changes the `dtype` of certain operations in the graph from float32 to float16. There are several categories of operations that are either included or excluded by this rewrite operation. The following categories of Ops are defined inside corresponding functions under the class `AutoMixedPrecisionLists` in auto_mixed_precision_lists.h: * `ClearList`: Ops that do not have numerically significant adverse effects. E.g. `ArgMax` and `Floor`. * `AllowList`: Ops that are considered numerically safe for execution in float16, and thus are always converted. E.g. `Conv2D`. * `DenyList`: Ops that are numerically unsafe to execute in float16 and can negatively affect downstream nodes. E.g. `Softmax`. * `GrayList`: Ops that are considered numerically safe for execution in float16 unless downstream from a DenyList Op. E.g. `Add` and `AvgPool`. When this function is used, gradients should only be computed and applied with the returned optimizer, either by calling `opt.minimize()` or `opt.compute_gradients()` followed by `opt.apply_gradients()`. Gradients should not be computed with `tf.gradients` or `tf.GradientTape`. This is because the returned optimizer will apply loss scaling, and `tf.gradients` or `tf.GradientTape` will not. If you do directly use `tf.gradients` or `tf.GradientTape`, your model may not converge due to float16 underflow problems. When eager execution is enabled, the mixed precision graph rewrite is only enabled within `tf.function`s, as outside `tf.function`s, there is no graph. For NVIDIA GPUs with Tensor cores, as a general performance guide, dimensions (such as batch size, input size, output size, and channel counts) should be powers of two if under 256, or otherwise divisible by 8 if above 256. For more information, check out the [NVIDIA Deep Learning Performance Guide]( https://docs.nvidia.com/deeplearning/sdk/dl-performance-guide/index.html). Currently, mixed precision is only enabled on NVIDIA Tensor Core GPUs with Compute Capability 7.0 and above (Volta, Turing, or newer architectures). The parts of the graph on CPUs and TPUs are untouched by the graph rewrite. Raises: `ValueError`, if the `tf.keras.mixed_precision` API is also used by calling `tf.keras.mixed_precision.experimental.set_policy`. Only one mixed precision API can be used. Args: opt: An instance of a `tf.keras.optimizers.Optimizer` or a `tf.train.Optimizer`. loss_scale: Either an int/float, the string `""dynamic""`, or an instance of a `tf.mixed_precision.experimental.LossScale`. The loss scale to use. It is recommended to keep this as its default value of `""dynamic""`, which will adjust the scaling automatically to prevent `Inf` or `NaN` values. Returns: A version of `opt` that will use loss scaling to prevent underflow." 12408,_enable_mixed_precision_graph_rewrite_base,tensorflow/tensorflow/python/training/experimental/mixed_precision.py,322,function,Enables mixed precision. See `enable_mixed_precision_graph_rewrite`. 12409,disable_mixed_precision_graph_rewrite,tensorflow/tensorflow/python/training/experimental/mixed_precision.py,352,function,"Disables the mixed precision graph rewrite. After this is called, the mixed precision graph rewrite will no longer run for tf.functions, and so float32 operations will no longer be converted to float16. This does not undo the effects of loss scaling. Any optimizers wrapped with a LossScaleOptimizer will continue to do loss scaling, although this loss scaling will no longer be useful, as the graph rewrite no longer converts tf.functions to use float16. This function is useful for unit testing. A unit test can test using the mixed precision graph rewrite, then disable it so future unit tests continue using float32." 12410,disable_mixed_precision_graph_rewrite_v1,tensorflow/tensorflow/python/training/experimental/mixed_precision.py,376,function,"Disables the mixed precision graph rewrite. After this is called, the mixed precision graph rewrite will no longer run for new Sessions, and so float32 operations will no longer be converted to float16 in such Sessions. However, any existing Sessions will continue to have the graph rewrite enabled if they were created after `enable_mixed_precision_graph_rewrite` was called but before `disable_mixed_precision_graph_rewrite` was called. This does not undo the effects of loss scaling. Any optimizers wrapped with a LossScaleOptimizer will continue to do loss scaling, although this loss scaling will no longer be useful if the optimizer is used in new Sessions, as the graph rewrite no longer converts the graph to use float16. This function is useful for unit testing. A unit tests can test using the mixed precision graph rewrite, then disable it so future unit tests continue using float32. If this is done, unit tests should not share a single session, as `enable_mixed_precision_graph_rewrite` and `disable_mixed_precision_graph_rewrite` have no effect on existing sessions." 12411,MixedPrecisionTest,tensorflow/tensorflow/python/training/experimental/mixed_precision_test.py,50,class, 12412,CheckpointOptions,tensorflow/tensorflow/python/training/saving/checkpoint_options.py,25,class,"Options for constructing a Checkpoint. Used as the `_options` argument to the `tf.Checkpoint` constructor to adjust how variables are saved. Example: Run IO ops on ""localhost"" while saving a checkpoint: ``` step = tf.Variable(0, name=""step"") checkpoint = tf.Checkpoint(step=step) options = tf.CheckpointOptions(experimental_io_device=""/job:localhost"") checkpoint.save(""/tmp/ckpt"", options=options) ```" 12413,_SingleDeviceSaver,tensorflow/tensorflow/python/training/saving/functional_saver.py,41,class,Saves and restores checkpoints from the current device. 12414,sharded_filename,tensorflow/tensorflow/python/training/saving/functional_saver.py,117,function,"Append sharding information to a filename. Args: filename_tensor: A string tensor. shard: Integer. The shard for the filename. num_shards: An int Tensor for the number of shards. Returns: A string tensor." 12415,MultiDeviceSaver,tensorflow/tensorflow/python/training/saving/functional_saver.py,131,class,"Saves checkpoints directly from multiple devices. Note that this is a low-level utility which stores Tensors in the keys specified by `SaveableObject`s. Higher-level utilities for object-based checkpointing are built on top of it." 12416,SaverTest,tensorflow/tensorflow/python/training/saving/functional_saver_test.py,42,class, 12417,SaveableHook,tensorflow/tensorflow/python/training/saving/saveable_hook.py,24,class,"Base class for running callbacks at Save/Restore time. Subclasses should override one or both methods to modify or read variables during the saving process. No guarantees are made regarding the precedence of execution between multiple `SaveableHook` objects, but execution is guaranteed to occur before or after the respective event. Users should emit the SaveableHook alongside other SaveableObjects, such as in Trackable._gather_saveables_for_checkpoint(). Saves a single constant in order to be compliant with the SaveableObject API." 12418,SaveSpec,tensorflow/tensorflow/python/training/saving/saveable_object.py,21,class,Class used to describe tensor slices that need to be saved. 12419,SaveableObject,tensorflow/tensorflow/python/training/saving/saveable_object.py,58,class,Base class for saving and restoring saveable objects. 12420,set_cpu0,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,54,function,"Creates a new device string based on `device_string` but using /CPU:0. If the device is already on /CPU:0, this is a no-op. Args: device_string: A device string. Returns: A device string." 12421,ReferenceVariableSaveable,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,70,class,SaveableObject implementation that handles reference variables. 12422,ResourceVariableSaveable,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,88,class,SaveableObject implementation that handles ResourceVariables. 12423,_tensor_comes_from_variable,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,130,function, 12424,saveable_objects_for_op,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,134,function,"Create `SaveableObject`s from an operation. Args: op: A variable, operation, or SaveableObject to coerce into a SaveableObject. name: A string name for the SaveableObject. Yields: `SaveableObject`s which together save/restore `op`. Raises: TypeError: If `name` is not a string. ValueError: For operations with no known conversion to SaveableObject." 12425,op_list_to_dict,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,224,function,"Create a dictionary of names to operation lists. Args: op_list: A (nested) list, tuple, or set of Variables or SaveableObjects. convert_variable_to_tensor: Whether or not to convert single Variables with no slice info into Tensors. Returns: A dictionary of names to the operations that must be saved under that name. Variables with save_slice_info are grouped together under the same key in no particular order. Raises: TypeError: If the type of op_list or its elements is not supported. ValueError: If at least two saveables share the same name." 12426,_add_saveable,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,317,function,"Adds the saveable to the saveables list. Args: saveables: List to append the SaveableObject to. seen_ops: Set of the ops of the saveables already processed. Used to check that each saveable is only saved once. saveable: The saveable. Raises: ValueError: If the saveable has already been processed." 12427,validate_and_slice_inputs,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,336,function,"Returns the variables and names that will be used for a Saver. Args: names_to_saveables: A dict (k, v) where k is the name of an operation and v is an operation to save or a BaseSaverBuilder.Saver. Returns: A list of SaveableObjects. Raises: TypeError: If any of the keys are not strings or any of the values are not one of Tensor or Variable or a trackable operation. ValueError: If the same operation is given in more than one value (this also applies to slices of SlicedVariables)." 12428,trace_save_restore_functions,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,365,function,Gathers all SaveableObjects and traces the save and restore ops. 12429,_trace_save_and_restore_function,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,390,function,Traces the save and restore concrete functions. 12430,RestoredSaveableObject,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,444,class,SaveableObject restored from SavedModel using the traced save/restore. 12431,restored_saved_object_factory,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,467,function, 12432,create_saveable_object,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,473,function,"Creates a SaveableObject while potentially in a different graph. When creating the frozen saver for SavedModel, the save and restore ops are placed in a separate graph. Since RestoredSaveableObject uses tf.functions to save and restore, the function captures must be mapped to the new graph. Args: factory: Factory method for creating the SaveableObject. name: Checkpoint key of this SaveableObject. call_with_mapped_captures: Helper that calls a tf.function while remapping the captures. Returns: a SaveableObject." 12433,is_factory_for_restored_saveable_object,tensorflow/tensorflow/python/training/saving/saveable_object_util.py,504,function, 12434,CheckpointInitialValue,tensorflow/tensorflow/python/training/tracking/base.py,57,class,"Tensor wrapper for managing update UIDs in `Variables`. When supplied as an initial value, objects of this type let a `Variable` (`Variable`, `ResourceVariable`, etc.) know the UID of the restore the initial value came from. This allows deferred restorations to be sequenced in the order the user specified them, and lets us fall back on assignment if an initial value is not set (e.g. due to a custom getter interfering). See comments in _add_variable_with_custom_getter for more information about how `CheckpointInitialValue` is used." 12435,NoRestoreSaveable,tensorflow/tensorflow/python/training/tracking/base.py,89,class,Embeds a tensor in a checkpoint with no restore ops. 12436,PythonStateSaveable,tensorflow/tensorflow/python/training/tracking/base.py,102,class,An interface for saving/restoring volatile Python state. 12437,PythonStringStateSaveable,tensorflow/tensorflow/python/training/tracking/base.py,129,class,Saves Python state in a checkpoint. 12438,CheckpointPosition,tensorflow/tensorflow/python/training/tracking/base.py,190,class,Indicates a position within a `_CheckpointRestoreCoordinator`. 12439,no_automatic_dependency_tracking,tensorflow/tensorflow/python/training/tracking/base.py,443,function,"Disables automatic dependency tracking on attribute assignment. Use to decorate any method of a Trackable object. Attribute assignment in that method will not add dependencies (also respected in Model). Harmless if used in a class which does not do automatic dependency tracking (which means it's safe to use in base classes which may have subclasses which also inherit from Trackable). Args: method: The method to decorate. Returns: A decorated method which sets and un-sets automatic dependency tracking for the object the method is called on (not thread safe)." 12440,no_manual_dependency_tracking_scope,tensorflow/tensorflow/python/training/tracking/base.py,474,function,"A context that disables manual dependency tracking for the given `obj`. Sometimes library methods might track objects on their own and we might want to disable that and do the tracking on our own. One can then use this context manager to disable the tracking the library method does and do your own tracking. For example: class TestLayer(tf.keras.Layer): def build(): with no_manual_dependency_tracking_scope(self): var = self.add_variable(""name1"") # Creates a var and doesn't track it self._track_trackable(""name2"", var) # We track variable with name `name2` Args: obj: A trackable object. Yields: a scope in which the object doesn't track dependencies manually." 12441,no_automatic_dependency_tracking_scope,tensorflow/tensorflow/python/training/tracking/base.py,506,function,"A context that disables automatic dependency tracking when assigning attrs. Objects that inherit from Autotrackable automatically creates dependencies to trackable objects through attribute assignments, and wraps data structures (lists or dicts) with trackable classes. This scope may be used to temporarily disable this behavior. This works similar to the decorator `no_automatic_dependency_tracking`. Example usage: ``` model = tf.keras.Model() model.arr1 = [] # Creates a ListWrapper object with no_automatic_dependency_tracking_scope(model): model.arr2 = [] # Creates a regular, untracked python list ``` Args: obj: A trackable object. Yields: a scope in which the object doesn't track dependencies." 12442,Trackable,tensorflow/tensorflow/python/training/tracking/base.py,537,class,"Base class for `Trackable` objects without automatic dependencies. This class has no __setattr__ override for performance reasons. Dependencies must be added explicitly. Unless attribute assignment is performance-critical, use `AutoTrackable` instead. Use `Trackable` for `isinstance` checks." 12443,InterfaceTests,tensorflow/tensorflow/python/training/tracking/base_test.py,29,class, 12444,_TrivialSaveable,tensorflow/tensorflow/python/training/tracking/benchmarks_test.py,37,class, 12445,_TrivialRestore,tensorflow/tensorflow/python/training/tracking/benchmarks_test.py,51,class, 12446,_LazyTrivialObjects,tensorflow/tensorflow/python/training/tracking/benchmarks_test.py,57,class, 12447,_save_checkpoint,tensorflow/tensorflow/python/training/tracking/benchmarks_test.py,69,function, 12448,SavingBenchmarks,tensorflow/tensorflow/python/training/tracking/benchmarks_test.py,75,class, 12449,NoDependency,tensorflow/tensorflow/python/training/tracking/data_structures.py,41,class,"Allows attribute assignment to `Trackable` objects with no dependency. Example usage: ```python obj = Trackable() obj.has_dependency = tf.Variable(0., name=""dep"") obj.no_dependency = NoDependency(tf.Variable(1., name=""nodep"")) assert obj.no_dependency.name == ""nodep:0"" ``` `obj` in this example has a dependency on the variable ""dep"", and both attributes contain un-wrapped `Variable` objects. `NoDependency` also works with `tf.keras.Model`, but only for checkpoint dependencies: wrapping a `Layer` in `NoDependency` will assign the (unwrapped) `Layer` to the attribute without a checkpoint dependency, but the `Model` will still track the `Layer` (so it will appear in `Model.layers`, and its variables will appear in `Model.variables`)." 12450,_should_wrap_tuple,tensorflow/tensorflow/python/training/tracking/data_structures.py,68,function,Determine if a tuple has any trackable components. 12451,wrap_or_unwrap,tensorflow/tensorflow/python/training/tracking/data_structures.py,82,function,"Wraps basic data structures, unwraps NoDependency objects." 12452,sticky_attribute_assignment,tensorflow/tensorflow/python/training/tracking/data_structures.py,105,function,"Adds dependencies, generally called from __setattr__. This behavior is shared between Trackable and Model. Respects NoDependency indicators, but otherwise makes trackable objects out of common data structures and tracks objects by their attribute names. Args: trackable: The object to add dependencies to (generally the one having an attribute assigned). name: The attribute name being assigned. value: The value being assigned. Not necessarily a trackable object. Returns: The value which should be stored in the attribute (unwrapped from a NoDependency object if necessary)." 12453,_UntrackableError,tensorflow/tensorflow/python/training/tracking/data_structures.py,140,class, 12454,TrackableDataStructure,tensorflow/tensorflow/python/training/tracking/data_structures.py,151,class,Base class for data structures which contain trackable objects. 12455,List,tensorflow/tensorflow/python/training/tracking/data_structures.py,276,class,"An append-only sequence type which is trackable. Maintains checkpoint dependencies on its contents (which must also be trackable), and forwards any `Layer` metadata such as updates and losses. Note that `List` is purely a container. It lets a `tf.keras.Model` or other trackable object know about its contents, but does not call any `Layer` instances which are added to it. To indicate a sequence of `Layer` instances which should be called sequentially, use `tf.keras.Sequential`. Example usage: ```python class HasList(tf.keras.Model): def __init__(self): super(HasList, self).__init__() self.layer_list = List([layers.Dense(3)]) self.layer_list.append(layers.Dense(4)) def call(self, x): aggregation = 0. for l in self.layer_list: x = l(x) aggregation += tf.reduce_sum(x) return aggregation ``` This kind of wrapping is necessary because `Trackable` objects do not (yet) deeply inspect regular Python data structures, so for example assigning a regular list (`self.layer_list = [layers.Dense(3)]`) does not create a checkpoint dependency and does not add the `Layer` instance's weights to its parent `Model`." 12456,ListWrapper,tensorflow/tensorflow/python/training/tracking/data_structures.py,398,class,"Wraps the built-in `list` to support restore-on-create for variables. Unlike `List`, this sequence type is mutable in the same ways built-in lists are. Instead of throwing an error immediately like `List`, it records problematic mutations (e.g. assigning a new element to a position already occupied, meaning both elements get the same names at different times) and refuses to save. On assignment to an attribute of a Model or Trackable object, Python lists are replaced with ListWrapper. Wrapping a list in a `NoDependency` object prevents this." 12457,Mapping,tensorflow/tensorflow/python/training/tracking/data_structures.py,658,class,"An append-only trackable mapping data structure with string keys. Maintains checkpoint dependencies on its contents (which must also be trackable), named based on its keys. Note that once a key has been added, it may not be deleted or replaced." 12458,_DictWrapper,tensorflow/tensorflow/python/training/tracking/data_structures.py,728,class,"Wraps built-in dicts to support restore-on-create for variables. _DictWrapper is to Mapping as ListWrapper is to List. Unlike Mapping, _DictWrapper allows non-string keys and values and arbitrary mutations (delete keys, reassign values). Like ListWrapper, these mutations mean that _DictWrapper will raise an exception on save." 12459,_TupleWrapper,tensorflow/tensorflow/python/training/tracking/data_structures.py,911,class,Trackable wrapper for tuples and namedtuples. 12460,_is_function,tensorflow/tensorflow/python/training/tracking/data_structures.py,1035,function, 12461,_set_list_item,tensorflow/tensorflow/python/training/tracking/data_structures.py,1052,function, 12462,_set_tuple_item,tensorflow/tensorflow/python/training/tracking/data_structures.py,1070,function, 12463,ListTests,tensorflow/tensorflow/python/training/tracking/data_structures_test.py,45,class, 12464,ListWrapperTest,tensorflow/tensorflow/python/training/tracking/data_structures_test.py,146,class, 12465,MappingTests,tensorflow/tensorflow/python/training/tracking/data_structures_test.py,335,class, 12466,TupleTests,tensorflow/tensorflow/python/training/tracking/data_structures_test.py,517,class, 12467,_escape_local_name,tensorflow/tensorflow/python/training/tracking/graph_view.py,51,function, 12468,_object_prefix_from_path,tensorflow/tensorflow/python/training/tracking/graph_view.py,61,function, 12469,_slot_variable_naming_for_optimizer,tensorflow/tensorflow/python/training/tracking/graph_view.py,67,function,Make a function for naming slot variables in an optimizer. 12470,_serialize_slot_variables,tensorflow/tensorflow/python/training/tracking/graph_view.py,89,function,Gather and name slot variables. 12471,ObjectGraphView,tensorflow/tensorflow/python/training/tracking/graph_view.py,142,class,Gathers and serializes an object graph. 12472,is_layer,tensorflow/tensorflow/python/training/tracking/layer_utils.py,37,function,Implicit check for Layer-like objects. 12473,has_weights,tensorflow/tensorflow/python/training/tracking/layer_utils.py,43,function,Implicit check for Layer-like objects. 12474,invalidate_recursive_cache,tensorflow/tensorflow/python/training/tracking/layer_utils.py,52,function,Convenience decorator to invalidate the cache when setting attributes. 12475,MutationSentinel,tensorflow/tensorflow/python/training/tracking/layer_utils.py,64,class,Container for tracking whether a property is in a cached state. 12476,AttributeSentinel,tensorflow/tensorflow/python/training/tracking/layer_utils.py,78,class,"Container for managing attribute cache state within a Layer. The cache can be invalidated either on an individual basis (for instance when an attribute is mutated) or a layer-wide basis (such as when a new dependency is added)." 12477,filter_empty_layer_containers,tensorflow/tensorflow/python/training/tracking/layer_utils.py,141,function,Filter out empty Layer-like containers and uniquify. 12478,gather_trainable_weights,tensorflow/tensorflow/python/training/tracking/layer_utils.py,161,function,"Lists the trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected trainable weights/variables." 12479,gather_non_trainable_weights,tensorflow/tensorflow/python/training/tracking/layer_utils.py,184,function,"Lists the non-trainable weights for an object with sub-layers. Args: trainable: Whether the object collecting the variables is trainable. sub_layers: A flat list of Layer objects owned by this object, to collect variables from. extra_variables: Any extra variables to include. Their `.trainable` property is used to categorize them. Returns: A list of collected non-trainable weights/variables." 12480,PythonState,tensorflow/tensorflow/python/training/tracking/python_state.py,31,class,"A mixin for putting Python state in an object-based checkpoint. This is an abstract class which allows extensions to TensorFlow's object-based checkpointing (see `tf.train.Checkpoint`). For example a wrapper for NumPy arrays: ```python import io import numpy class NumpyWrapper(tf.train.experimental.PythonState): def __init__(self, array): self.array = array def serialize(self): string_file = io.BytesIO() try: numpy.save(string_file, self.array, allow_pickle=False) serialized = string_file.getvalue() finally: string_file.close() return serialized def deserialize(self, string_value): string_file = io.BytesIO(string_value) try: self.array = numpy.load(string_file, allow_pickle=False) finally: string_file.close() ``` Instances of `NumpyWrapper` are checkpointable objects, and will be saved and restored from checkpoints along with TensorFlow state like variables. ```python root = tf.train.Checkpoint(numpy=NumpyWrapper(numpy.array([1.]))) save_path = root.save(prefix) root.numpy.array *= 2. assert [2.] == root.numpy.array root.restore(save_path) assert [1.] == root.numpy.array ```" 12481,_NumpyState,tensorflow/tensorflow/python/training/tracking/python_state_test.py,33,class,"A checkpointable object whose NumPy array attributes are saved/restored. Example usage: ```python arrays = _NumpyState() checkpoint = tf.train.Checkpoint(numpy_arrays=arrays) arrays.x = numpy.zeros([3, 4]) save_path = checkpoint.save(""/tmp/ckpt"") arrays.x[1, 1] = 4. checkpoint.restore(save_path) assert (arrays.x == numpy.zeros([3, 4])).all() second_checkpoint = tf.train.Checkpoint( numpy_arrays=_NumpyState()) # Attributes of NumpyState objects are created automatically by restore() second_checkpoint.restore(save_path) assert (second_checkpoint.numpy_arrays.x == numpy.zeros([3, 4])).all() ``` Note that `NumpyState` objects re-create the attributes of the previously saved object on `restore()`. This is in contrast to TensorFlow variables, for which a `Variable` object must be created and assigned to an attribute. This snippet works both when graph building and when executing eagerly. On save, the NumPy array(s) are fed as strings to be saved in the checkpoint (via a placeholder when graph building, or as a string constant when executing eagerly). When restoring they skip the TensorFlow graph entirely, and so no restore ops need be run. This means that restoration always happens eagerly, rather than waiting for `checkpoint.restore(...).run_restore_ops()` like TensorFlow variables when graph building." 12482,_NumpyWrapper,tensorflow/tensorflow/python/training/tracking/python_state_test.py,106,class,Wraps a NumPy array for storage in an object-based checkpoint. 12483,NumpyStateTests,tensorflow/tensorflow/python/training/tracking/python_state_test.py,136,class, 12484,NotTrackable,tensorflow/tensorflow/python/training/tracking/tracking.py,41,class,"Marks instances of child classes as unsaveable using an object-based API. Useful for marking objects which would otherwise look trackable because of inheritance (e.g. through `Layer`) as not trackable. Inheriting from `NotTrackable` does not prevent an object from being assigned to any attributes, but will throw an error on save/restore." 12485,AutoTrackable,tensorflow/tensorflow/python/training/tracking/tracking.py,52,class,"Manages dependencies on other objects. `Trackable` objects may have dependencies: other `Trackable` objects which should be saved if the object declaring the dependency is saved. A correctly saveable program has a dependency graph such that if changing a global variable affects an object (e.g. changes the behavior of any of its methods) then there is a chain of dependencies from the influenced object to the variable. Dependency edges have names, and are created implicitly when a `Trackable` object is assigned to an attribute of another `Trackable` object. For example: ``` obj = Trackable() obj.v = ResourceVariable(0.) ``` The `Trackable` object `obj` now has a dependency named ""v"" on a variable. `Trackable` objects may specify `Tensor`s to be saved and restored directly (e.g. a `Variable` indicating how to save itself) rather than through dependencies on other objects. See `Trackable._gather_saveables_for_checkpoint` for details." 12486,delete_tracking,tensorflow/tensorflow/python/training/tracking/tracking.py,127,function,Removes the tracking of name from object. 12487,ResourceTracker,tensorflow/tensorflow/python/training/tracking/tracking.py,140,class,An object that tracks a list of resources. 12488,resource_tracker_scope,tensorflow/tensorflow/python/training/tracking/tracking.py,157,function,"A context to manage resource trackers. Use this in order to collect up all resources created within a block of code. Example usage: ```python resource_tracker = ResourceTracker() with resource_tracker_scope(resource_tracker): resource = TrackableResource() assert resource_tracker.resources == [resource] Args: resource_tracker: The passed in ResourceTracker object Yields: A scope in which the resource_tracker is active." 12489,CapturableResourceDeleter,tensorflow/tensorflow/python/training/tracking/tracking.py,185,class,Deleter to destroy CapturableResource without overriding its __del__(). 12490,CapturableResource,tensorflow/tensorflow/python/training/tracking/tracking.py,209,class,"Holds a Tensor which a tf.function can capture. `CapturableResource`s are discovered by traversing the graph of object attributes, e.g. during `tf.saved_model.save`. They are excluded from the scope-based tracking of `TrackableResource`; generally things that require initialization should inherit from `TrackableResource` instead of `CapturableResource` directly." 12491,TrackableResource,tensorflow/tensorflow/python/training/tracking/tracking.py,286,class,Adds scope tracking to CapturableResource. 12492,Asset,tensorflow/tensorflow/python/training/tracking/tracking.py,307,class,"Represents a file asset to hermetically include in a SavedModel. A SavedModel can include arbitrary files, called assets, that are needed for its use. For example a vocabulary file used initialize a lookup table. When a trackable object is exported via `tf.saved_model.save()`, all the `Asset`s reachable from it are copied into the SavedModel assets directory. Upon loading, the assets and the serialized functions that depend on them will refer to the correct filepaths inside the SavedModel directory. Example: ``` filename = tf.saved_model.Asset(""file.txt"") @tf.function(input_signature=[]) def func(): return tf.io.read_file(filename) trackable_obj = tf.train.Checkpoint() trackable_obj.func = func trackable_obj.filename = filename tf.saved_model.save(trackable_obj, ""/tmp/saved_model"") # The created SavedModel is hermetic, it does not depend on # the original file and can be moved to another path. tf.io.gfile.remove(""file.txt"") tf.io.gfile.rename(""/tmp/saved_model"", ""/tmp/new_location"") reloaded_obj = tf.saved_model.load(""/tmp/new_location"") print(reloaded_obj.func()) ``` Attributes: asset_path: A 0-D `tf.string` tensor with path to the asset." 12493,cached_per_instance,tensorflow/tensorflow/python/training/tracking/tracking.py,360,function,"Lightweight decorator for caching lazily constructed properties. When to use: This decorator provides simple caching with minimal overhead. It is designed for properties which are expensive to compute and static over the life of a class instance, and provides no mechanism for cache invalidation. Thus it is best suited for lazily exposing derived properties of other static data. For classes with custom getattr / setattr behavior (such as trackable objects), storing cache results as object attributes is not performant. Instead, a specialized cache can significantly reduce property lookup overhead. (While still allowing the decorated property to be lazily computed.) Consider the following class: ``` class MyClass(object): def __setattr__(self, key, value): # Some expensive class specific code # ... # ... super(MyClass, self).__setattr__(key, value) @property def thing(self): # `thing` is expensive to compute (and may not even be requested), so we # want to lazily compute it and then cache it. output = getattr(self, '_thing', None) if output is None: self._thing = output = compute_thing(self) return output ``` It's also worth noting that ANY overriding of __setattr__, even something as simple as: ``` def __setattr__(self, key, value): super(MyClass, self).__setattr__(key, value) ``` Slows down attribute assignment by nearly 10x. By contrast, replacing the definition of `thing` with the following sidesteps the expensive __setattr__ altogether: ''' @property @tracking.cached_per_instance def thing(self): # `thing` is expensive to compute (and may not even be requested), so we # want to lazily compute it and then cache it. return compute_thing(self) ''' Performance: The overhead for this decorator is ~0.4 us / call. A much lower overhead implementation (~0.085 us / call) can be achieved by using a custom dict type: ``` def dict_based_cache(f): class Cache(dict): __slots__ = () def __missing__(self, key): self[key] = output = f(key) return output return property(Cache().__getitem__) ``` However, that implementation holds class instances as keys, and as a result blocks garbage collection. (And modifying it to use weakref's as keys raises the lookup overhead to ~0.4 us) As a result, the WeakKeyDictionary implementation below turns out to be more prudent. Args: f: The function to cache. Returns: f decorated with simple caching behavior." 12494,MyPickleableObject,tensorflow/tensorflow/python/training/tracking/tracking_test.py,41,class,"Needed for InterfaceTests.test_property_cache_serialization. This class must be at the top level. This is a constraint of pickle, unrelated to `cached_per_instance`." 12495,InterfaceTests,tensorflow/tensorflow/python/training/tracking/tracking_test.py,55,class, 12496,_DummyResource,tensorflow/tensorflow/python/training/tracking/tracking_test.py,287,class, 12497,ResourceTrackerTest,tensorflow/tensorflow/python/training/tracking/tracking_test.py,297,class, 12498,register_session_provider,tensorflow/tensorflow/python/training/tracking/util.py,65,function, 12499,get_session,tensorflow/tensorflow/python/training/tracking/util.py,71,function, 12500,_ObjectGraphProtoPrettyPrinter,tensorflow/tensorflow/python/training/tracking/util.py,81,class,"Lazily traverses an object graph proto to pretty print names. If no calls to `node_names` are made this object has no performance overhead. On the other hand, it will only traverse the object graph once, so repeated naming is cheap after the first." 12501,_CheckpointRestoreCoordinatorDeleter,tensorflow/tensorflow/python/training/tracking/util.py,126,class,Deleter to avoid overriding _CheckpointRestoreCoordinator.__del__(). 12502,_CheckpointRestoreCoordinator,tensorflow/tensorflow/python/training/tracking/util.py,175,class,Holds the status of an object-based checkpoint load. 12503,_NameBasedRestoreCoordinator,tensorflow/tensorflow/python/training/tracking/util.py,313,class,Keeps the status of a name-based checkpoint restore. 12504,_default_getter,tensorflow/tensorflow/python/training/tracking/util.py,400,function,A pared-down version of get_variable which does not reuse variables. 12505,add_variable,tensorflow/tensorflow/python/training/tracking/util.py,441,function,Add a variable to a Trackable with no scope influence. 12506,object_metadata,tensorflow/tensorflow/python/training/tracking/util.py,457,function,"Retrieves information about the objects in a checkpoint. Example usage: ```python object_graph = tf.contrib.checkpoint.object_metadata( tf.train.latest_checkpoint(checkpoint_directory)) ckpt_variable_names = set() for node in object_graph.nodes: for attribute in node.attributes: ckpt_variable_names.add(attribute.full_name) ``` Args: save_path: The path to the checkpoint, as returned by `save` or `tf.train.latest_checkpoint`. Returns: A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer. Raises: ValueError: If an object graph was not found in the checkpoint." 12507,list_objects,tensorflow/tensorflow/python/training/tracking/util.py,494,function,"Traverse the object graph and list all accessible objects. Looks for `Trackable` objects which are dependencies of `root_trackable`. Includes slot variables only if the variable they are slotting for and the optimizer are dependencies of `root_trackable` (i.e. if they would be saved with a checkpoint). Args: root_trackable: A `Trackable` object whose dependencies should be flattened. Returns: A flat list of objects." 12508,gather_initializers,tensorflow/tensorflow/python/training/tracking/util.py,511,function,"Traverse the object graph and find initialization ops. Looks for `Trackable` objects which are dependencies of `root_trackable` and which have an `initializer` property. Includes initializers for slot variables only if the variable they are slotting for and the optimizer are dependencies of `root_trackable` (i.e. if they would be saved with a checkpoint). Args: root_trackable: A `Trackable` object to gather initializers for. Returns: A list of initialization ops." 12509,capture_dependencies,tensorflow/tensorflow/python/training/tracking/util.py,535,function,"Capture variables created within this scope as `Template` dependencies. Requires that `template.variable_scope` is active. This scope is intended as a compatibility measure, allowing a trackable object to add dependencies on variables created in a block of code which is not aware of object-based saving (and instead uses variable names heavily). This is how `Template` objects add dependencies on variables and sub-`Template`s. Where possible, use `tf.compat.v1.make_template` directly. Args: template: The `Template` object to register dependencies with. Yields: None (when used as a context manager)." 12510,_LoadStatus,tensorflow/tensorflow/python/training/tracking/util.py,625,class,Abstract base for load status callbacks. 12511,streaming_restore,tensorflow/tensorflow/python/training/tracking/util.py,658,function,"When graph building, runs restore ops as soon as they come in. Args: status: A _LoadStatus objects from an object-based saver's restore(). Streaming restore from name-based checkpoints is not currently supported. session: A session to run new restore ops in." 12512,_objects_with_attributes,tensorflow/tensorflow/python/training/tracking/util.py,684,function,Filters out objects with no direct variable dependencies for assertions. 12513,CheckpointLoadStatus,tensorflow/tensorflow/python/training/tracking/util.py,689,class,"Checks the status of checkpoint loading and manages restore ops. Returned from `Saver.restore`. Since `restore` may defer the loading of values in the checkpoint which don't yet have corresponding Python objects, `CheckpointLoadStatus` provides a callback to verify that checkpoint loading is complete (`assert_consumed`). When graph building, `restore` does not run restore ops itself since their creation may be deferred. The `run_restore_ops` method must be called once all Python objects with values to restore have been created and added to the dependency graph (this does not necessarily have to be the whole checkpoint; calling `run_restore_ops` while `assert_consumed` fails is supported and will partially restore the checkpoint). See `Saver.restore` for usage examples." 12514,InitializationOnlyStatus,tensorflow/tensorflow/python/training/tracking/util.py,868,class,"Returned from `Saver.restore` when no checkpoint has been specified. Objects of this type have the same `assert_consumed` method as `CheckpointLoadStatus`, but it always fails. However, `initialize_or_restore` works on objects of both types, and will initialize variables in `InitializationOnlyStatus` objects or restore them otherwise." 12515,NameBasedSaverStatus,tensorflow/tensorflow/python/training/tracking/util.py,948,class,Status for loading a name-based training checkpoint. 12516,_SessionWithFeedDictAdditions,tensorflow/tensorflow/python/training/tracking/util.py,1047,class,"Pretends to be a session, inserts extra feeds on run()." 12517,TrackableSaver,tensorflow/tensorflow/python/training/tracking/util.py,1064,class,"Saves and restores a `Trackable` object and its dependencies. See `Trackable` for details of dependency management. `Saver` wraps `tf.compat.v1.train.Saver` for saving, including extra information about the graph of dependencies between Python objects. When restoring, it uses this information about the save-time dependency graph to more robustly match objects with their checkpointed values. When executing eagerly, it supports restoring variables on object creation (see `Saver.restore`). Values in a checkpoint are mapped to `Trackable` Python objects (`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the checkpoint was written. To avoid breaking existing checkpoints when modifying a class, dependency names (the names of attributes to which `Trackable` objects are assigned) may not change. These names are local to objects, in contrast to the `Variable.name`-based save/restore from `tf.compat.v1.train.Saver`, and so allow additional program transformations." 12518,frozen_saver,tensorflow/tensorflow/python/training/tracking/util.py,1335,function,"Creates a static `tf.compat.v1.train.Saver` from a trackable object. The returned `Saver` saves object-based checkpoints, but these checkpoints will no longer reflect structural changes to the object graph, only changes to the values of `Variable`s added as dependencies of the root object before `freeze` was called. `restore` works on the returned `Saver`, but requires that the object graph of the checkpoint being loaded exactly matches the object graph when `freeze` was called. This is in contrast the object-based restore performed by `tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's object graph and the current Python object graph. Args: root_trackable: A trackable object to save. Returns: A saver which saves object-based checkpoints for the object graph frozen at the time `frozen_saver` was called." 12519,saver_with_op_caching,tensorflow/tensorflow/python/training/tracking/util.py,1361,function,A TrackableSaver with a SaveableObject cache when graph building. 12520,CheckpointV1,tensorflow/tensorflow/python/training/tracking/util.py,1374,class,"Groups trackable objects, saving and restoring them. `Checkpoint`'s constructor accepts keyword arguments whose values are types that contain trackable state, such as `tf.compat.v1.train.Optimizer` implementations, `tf.Variable`, `tf.keras.Layer` implementations, or `tf.keras.Model` implementations. It saves these values with a checkpoint, and maintains a `save_counter` for numbering checkpoints. Example usage when graph building: ```python import tensorflow as tf import os checkpoint_directory = ""/tmp/training_checkpoints"" checkpoint_prefix = os.path.join(checkpoint_directory, ""ckpt"") checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory)) train_op = optimizer.minimize( ... ) status.assert_consumed() # Optional sanity checks. with tf.compat.v1.Session() as session: # Use the Session to restore variables, or initialize them if # tf.train.latest_checkpoint returned None. status.initialize_or_restore(session) for _ in range(num_training_steps): session.run(train_op) checkpoint.save(file_prefix=checkpoint_prefix) ``` Example usage with eager execution enabled: ```python import tensorflow as tf import os tf.compat.v1.enable_eager_execution() checkpoint_directory = ""/tmp/training_checkpoints"" checkpoint_prefix = os.path.join(checkpoint_directory, ""ckpt"") checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory)) for _ in range(num_training_steps): optimizer.minimize( ... ) # Variables will be restored on creation. status.assert_consumed() # Optional sanity checks. checkpoint.save(file_prefix=checkpoint_prefix) ``` `Checkpoint.save` and `Checkpoint.restore` write and read object-based checkpoints, in contrast to `tf.compat.v1.train.Saver` which writes and reads `variable.name` based checkpoints. Object-based checkpointing saves a graph of dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s, etc.) with named edges, and this graph is used to match variables when restoring a checkpoint. It can be more robust to changes in the Python program, and helps to support restore-on-create for variables when executing eagerly. Prefer `tf.train.Checkpoint` over `tf.compat.v1.train.Saver` for new code. `Checkpoint` objects have dependencies on the objects passed as keyword arguments to their constructors, and each dependency is given a name that is identical to the name of the keyword argument for which it was created. TensorFlow classes like `Layer`s and `Optimizer`s will automatically add dependencies on their variables (e.g. ""kernel"" and ""bias"" for `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing dependencies easy in user-defined classes, since `Model` hooks into attribute assignment. For example: ```python class Regress(tf.keras.Model): def __init__(self): super(Regress, self).__init__() self.input_transform = tf.keras.layers.Dense(10) # ... def call(self, inputs): x = self.input_transform(inputs) # ... ``` This `Model` has a dependency named ""input_transform"" on its `Dense` layer, which in turn depends on its variables. As a result, saving an instance of `Regress` using `tf.train.Checkpoint` will also save all the variables created by the `Dense` layer. When variables are assigned to multiple workers, each worker writes its own section of the checkpoint. These sections are then merged/re-indexed to behave as a single checkpoint. This avoids copying all variables to one worker, but does require that all workers see a common filesystem. While `tf.keras.Model.save_weights` and `tf.train.Checkpoint.save` save in the same format, note that the root of the resulting checkpoint is the object the save method is attached to. This means saving a `tf.keras.Model` using `save_weights` and loading into a `tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match the `Model`'s variables. See the [guide to training checkpoints](https://www.tensorflow.org/guide/checkpoint) for details. Prefer `tf.train.Checkpoint` over `tf.keras.Model.save_weights` for training checkpoints. Attributes: save_counter: Incremented when `save()` is called. Used to number checkpoints." 12521,Checkpoint,tensorflow/tensorflow/python/training/tracking/util.py,1739,class,"Groups trackable objects, saving and restoring them. `Checkpoint`'s constructor accepts keyword arguments whose values are types that contain trackable state, such as `tf.keras.optimizers.Optimizer` implementations, `tf.Variable`s, `tf.data.Dataset` iterators, `tf.keras.Layer` implementations, or `tf.keras.Model` implementations. It saves these values with a checkpoint, and maintains a `save_counter` for numbering checkpoints. Example usage: ```python import tensorflow as tf import os checkpoint_directory = ""/tmp/training_checkpoints"" checkpoint_prefix = os.path.join(checkpoint_directory, ""ckpt"") # Create a Checkpoint that will manage two objects with trackable state, # one we name ""optimizer"" and the other we name ""model"". checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory)) for _ in range(num_training_steps): optimizer.minimize( ... ) # Variables will be restored on creation. status.assert_consumed() # Optional sanity checks. checkpoint.save(file_prefix=checkpoint_prefix) ``` `Checkpoint.save()` and `Checkpoint.restore()` write and read object-based checkpoints, in contrast to TensorFlow 1.x's `tf.compat.v1.train.Saver` which writes and reads `variable.name` based checkpoints. Object-based checkpointing saves a graph of dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s, etc.) with named edges, and this graph is used to match variables when restoring a checkpoint. It can be more robust to changes in the Python program, and helps to support restore-on-create for variables. `Checkpoint` objects have dependencies on the objects passed as keyword arguments to their constructors, and each dependency is given a name that is identical to the name of the keyword argument for which it was created. TensorFlow classes like `Layer`s and `Optimizer`s will automatically add dependencies on their own variables (e.g. ""kernel"" and ""bias"" for `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing dependencies easy in user-defined classes, since `Model` hooks into attribute assignment. For example: ```python class Regress(tf.keras.Model): def __init__(self): super(Regress, self).__init__() self.input_transform = tf.keras.layers.Dense(10) # ... def call(self, inputs): x = self.input_transform(inputs) # ... ``` This `Model` has a dependency named ""input_transform"" on its `Dense` layer, which in turn depends on its variables. As a result, saving an instance of `Regress` using `tf.train.Checkpoint` will also save all the variables created by the `Dense` layer. When variables are assigned to multiple workers, each worker writes its own section of the checkpoint. These sections are then merged/re-indexed to behave as a single checkpoint. This avoids copying all variables to one worker, but does require that all workers see a common filesystem. While `tf.keras.Model.save_weights` and `tf.train.Checkpoint.save` save in the same format, note that the root of the resulting checkpoint is the object the save method is attached to. This means saving a `tf.keras.Model` using `save_weights` and loading into a `tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match the `Model`'s variables. See the [guide to training checkpoints](https://www.tensorflow.org/guide/checkpoint) for details. Prefer `tf.train.Checkpoint` over `tf.keras.Model.save_weights` for training checkpoints. Attributes: save_counter: Incremented when `save()` is called. Used to number checkpoints." 12522,NonLayerTrackable,tensorflow/tensorflow/python/training/tracking/util_test.py,49,class, 12523,InterfaceTests,tensorflow/tensorflow/python/training/tracking/util_test.py,57,class, 12524,_MirroringSaveable,tensorflow/tensorflow/python/training/tracking/util_test.py,165,class, 12525,_OwnsMirroredVariables,tensorflow/tensorflow/python/training/tracking/util_test.py,186,class,A Trackable object which returns a more complex SaveableObject. 12526,CheckpointingTests,tensorflow/tensorflow/python/training/tracking/util_test.py,209,class, 12527,TemplateTests,tensorflow/tensorflow/python/training/tracking/util_test.py,798,class, 12528,CheckpointingTests,tensorflow/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py,39,class, 12529,_ManualScope,tensorflow/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py,197,class, 12530,TemplateTests,tensorflow/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py,209,class, 12531,Tensor,tensorflow/tensorflow/python/types/core.py,33,class,"The base class of all dense Tensor objects. A dense tensor has a static data type (dtype), and may have a static rank and shape. Tensor objects are immutable. Mutable objects may be backed by a Tensor which holds the unique handle that identifies the mutable object." 12532,Symbol,tensorflow/tensorflow/python/types/core.py,50,class,"Symbolic ""graph"" Tensor. These objects represent the output of an op definition and do not carry a value." 12533,Value,tensorflow/tensorflow/python/types/core.py,59,class,"Tensor that can be associated with a value (aka ""eager tensor""). These objects represent the (usually future) output of executing an op immediately." 12534,Iterable,tensorflow/tensorflow/python/types/distribute.py,25,class,Interface for distributed objects that admit iteration/reduction. 12535,Iterator,tensorflow/tensorflow/python/types/distribute.py,51,class,Interface for distributed iterators. 12536,document,tensorflow/tensorflow/python/types/doc_typealias.py,24,function,"Adds a docstring to typealias by overriding the `__doc__` attribute. Note: Overriding `__doc__` is only possible after python 3.7. Args: obj: Typealias object that needs to be documented. doc: Docstring of the typealias. It should follow the standard pystyle docstring rules." 12537,NativeObject,tensorflow/tensorflow/python/types/internal.py,26,class,"Types natively supported by various TF operations. The most notable example of NativeObject is Tensor." 12538,my_fact,tensorflow/tensorflow/python/user_ops/user_ops.py,30,function,Example of overriding the generated code for an Op. 12539,make_all,tensorflow/tensorflow/python/util/all_util.py,30,function,"Generates `__all__` from the docstring of one or more modules. Usage: `make_all(__name__)` or `make_all(__name__, [sys.modules(__name__), other_module])`. The doc string modules must each a docstring, and `__all__` will contain all symbols with `@@` references, where that symbol currently exists in the module named `module_name`. Args: module_name: The name of the module (usually `__name__`). doc_string_modules: a list of modules from which to take docstring. If None, then a list containing only the module named `module_name` is used. Returns: A list suitable for use as `__all__`." 12540,reveal_undocumented,tensorflow/tensorflow/python/util/all_util.py,66,function,"Reveals a symbol that was previously removed by `remove_undocumented`. This should be used by tensorflow internal tests only. It explicitly defeats the encapsulation afforded by `remove_undocumented`. It throws an exception when the symbol was not hidden in the first place. Args: symbol_name: a string representing the full absolute path of the symbol. target_module: if specified, the module in which to restore the symbol." 12541,remove_undocumented,tensorflow/tensorflow/python/util/all_util.py,86,function,"Removes symbols in a module that are not referenced by a docstring. Args: module_name: the name of the module (usually `__name__`). allowed_exception_list: a list of names that should not be removed. doc_string_modules: a list of modules from which to take the docstrings. If None, then a list containing only the module named `module_name` is used. Furthermore, if a symbol previously added with `add_to_global_allowlist`, then it will always be allowed. This is useful for internal tests. Returns: None" 12542,as_bytes,tensorflow/tensorflow/python/util/compat.py,64,function,"Converts `bytearray`, `bytes`, or unicode python input types to `bytes`. Uses utf-8 encoding for text by default. Args: bytes_or_text: A `bytearray`, `bytes`, `str`, or `unicode` object. encoding: A string indicating the charset for encoding unicode. Returns: A `bytes` object. Raises: TypeError: If `bytes_or_text` is not a binary or unicode string." 12543,as_text,tensorflow/tensorflow/python/util/compat.py,90,function,"Converts any string-like python input types to unicode. Returns the input as a unicode string. Uses utf-8 encoding for text by default. Args: bytes_or_text: A `bytes`, `str`, or `unicode` object. encoding: A string indicating the charset for decoding unicode. Returns: A `unicode` (Python 2) or `str` (Python 3) object. Raises: TypeError: If `bytes_or_text` is not a binary or unicode string." 12544,as_str,tensorflow/tensorflow/python/util/compat.py,114,function, 12545,as_str_any,tensorflow/tensorflow/python/util/compat.py,126,function,"Converts input to `str` type. Uses `str(value)`, except for `bytes` typed inputs, which are converted using `as_str`. Args: value: A object that can be converted to `str`. Returns: A `str` object." 12546,path_to_str,tensorflow/tensorflow/python/util/compat.py,145,function,"Converts input which is a `PathLike` object to `str` type. Converts from any python constant representation of a `PathLike` object to a string. If the input is not a `PathLike` object, simply returns the input. Args: path: An object that can be converted to path representation. Returns: A `str` object. Usage: In case a simplified `str` version of the path is needed from an `os.PathLike` object Examples: ```python $ tf.compat.path_to_str('C:\XYZ\tensorflow\./.././tensorflow') 'C:\XYZ\tensorflow\./.././tensorflow' # Windows OS $ tf.compat.path_to_str(Path('C:\XYZ\tensorflow\./.././tensorflow')) 'C:\XYZ\tensorflow\..\tensorflow' # Windows OS $ tf.compat.path_to_str(Path('./corpus')) 'corpus' # Linux OS $ tf.compat.path_to_str('./.././Corpus') './.././Corpus' # Linux OS $ tf.compat.path_to_str(Path('./.././Corpus')) '../Corpus' # Linux OS $ tf.compat.path_to_str(Path('./..////../')) '../..' # Linux OS ```" 12547,path_to_bytes,tensorflow/tensorflow/python/util/compat.py,183,function,"Converts input which is a `PathLike` object to `bytes`. Converts from any python constant representation of a `PathLike` object or `str` to bytes. Args: path: An object that can be converted to path representation. Returns: A `bytes` object. Usage: In case a simplified `bytes` version of the path is needed from an `os.PathLike` object" 12548,path_to_str,tensorflow/tensorflow/python/util/compat_internal.py,24,function,"Returns the file system path representation of a `PathLike` object, else as it is. Args: path: An object that can be converted to path representation. Returns: A `str` object." 12549,get_qualified_name,tensorflow/tensorflow/python/util/decorator_utils.py,24,function, 12550,_normalize_docstring,tensorflow/tensorflow/python/util/decorator_utils.py,35,function,"Normalizes the docstring. Replaces tabs with spaces, removes leading and trailing blanks lines, and removes any indentation. Copied from PEP-257: https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation Args: docstring: the docstring to normalize Returns: The normalized docstring" 12551,add_notice_to_docstring,tensorflow/tensorflow/python/util/decorator_utils.py,76,function,"Adds a deprecation notice to a docstring. Args: doc: The original docstring. instructions: A string, describing how to fix the problem. no_doc_str: The default value to use for `doc` if `doc` is empty. suffix_str: Is added to the end of the first line. notice: A list of strings. The main notice warning body. Returns: A new docstring, with the notice attached. Raises: ValueError: If `notice` is empty." 12552,validate_callable,tensorflow/tensorflow/python/util/decorator_utils.py,117,function, 12553,classproperty,tensorflow/tensorflow/python/util/decorator_utils.py,126,class,"Class property decorator. Example usage: class MyClass(object): @classproperty def value(cls): return '123' > print MyClass.value 123" 12554,_test_function,tensorflow/tensorflow/python/util/decorator_utils_test.py,29,function, 12555,GetQualifiedNameTest,tensorflow/tensorflow/python/util/decorator_utils_test.py,33,class, 12556,AddNoticeToDocstringTest,tensorflow/tensorflow/python/util/decorator_utils_test.py,45,class, 12557,ValidateCallableTest,tensorflow/tensorflow/python/util/decorator_utils_test.py,99,class, 12558,DeprecatedNamesAlreadySet,tensorflow/tensorflow/python/util/deprecation.py,41,class,Raised when setting deprecated names multiple times for the same symbol. 12559,_add_deprecated_function_notice_to_docstring,tensorflow/tensorflow/python/util/deprecation.py,46,function,Adds a deprecation notice to a docstring for deprecated functions. 12560,_add_deprecated_arg_notice_to_docstring,tensorflow/tensorflow/python/util/deprecation.py,58,function,Adds a deprecation notice to a docstring for deprecated arguments. 12561,_add_deprecated_arg_value_notice_to_docstring,tensorflow/tensorflow/python/util/deprecation.py,74,function,Adds a deprecation notice to a docstring for deprecated arguments. 12562,_validate_deprecation_args,tensorflow/tensorflow/python/util/deprecation.py,93,function, 12563,_call_location,tensorflow/tensorflow/python/util/deprecation.py,100,function,Returns call location given level up from current call. 12564,_wrap_decorator,tensorflow/tensorflow/python/util/deprecation.py,113,function,"Indicate that one function wraps another. This decorator wraps a function using `tf_decorator.make_decorator` so that doc generation scripts can pick up original function signature. It would be better to use @functools.wrap decorator, but it would not update function signature to match wrapped function in Python 2. Args: wrapped_function: The function that decorated function wraps. Returns: Function that accepts wrapper function as an argument and returns `TFDecorator` instance." 12565,deprecated_alias,tensorflow/tensorflow/python/util/deprecation.py,134,function,"Deprecate a symbol in favor of a new name with identical semantics. This function is meant to be used when defining a backwards-compatibility alias for a symbol which has been moved. For example: module1.py: ```python class NewNameForClass: pass ``` module2.py: ```python import module1 DeprecatedNameForClass = deprecated_alias( deprecated_name='module2.DeprecatedNameForClass', name='module1.NewNameForClass', func_or_class=module1.NewNameForClass) ``` This function works for classes and functions. For classes, it creates a new class which is functionally identical (it inherits from the original, and overrides its constructor), but which prints a deprecation warning when an instance is created. It also adds a deprecation notice to the class' docstring. For functions, it returns a function wrapped by `tf_decorator.make_decorator`. That function prints a warning when used, and has a deprecation notice in its docstring. This is more or less equivalent (the deprecation warning has slightly different text) to writing: ```python @deprecated def deprecated_alias(original_args): real_function(original_args) ``` Args: deprecated_name: The name of the symbol that is being deprecated, to be used in the warning message. This should be its fully qualified name to avoid confusion. name: The name of the symbol that is to be used instead of the deprecated name. This should be a fully qualified name to avoid confusion. func_or_class: The (non-deprecated) class or function for which a deprecated alias should be created. warn_once: If True (the default), only print a deprecation warning the first time this function is used, or the class is instantiated. Returns: A wrapped version of `func_or_class` which prints a deprecation warning on use and has a modified docstring." 12566,deprecated_endpoints,tensorflow/tensorflow/python/util/deprecation.py,245,function,"Decorator for marking endpoints deprecated. This decorator does not print deprecation messages. TODO(annarev): eventually start printing deprecation warnings when @deprecation_endpoints decorator is added. Args: *args: Deprecated endpoint names. Returns: A function that takes symbol as an argument and adds _tf_deprecated_api_names to that symbol. _tf_deprecated_api_names would be set to a list of deprecated endpoint names for the symbol." 12567,deprecated,tensorflow/tensorflow/python/util/deprecation.py,274,function,"Decorator for marking functions or methods deprecated. This decorator logs a deprecation warning whenever the decorated function is called. It has the following format: (from ) is deprecated and will be removed after . Instructions for updating: If `date` is None, 'after ' is replaced with 'in a future version'. will include the class name if it is a method. It also edits the docstring of the function: ' (deprecated)' is appended to the first line of the docstring and a deprecation notice is prepended to the rest of the docstring. Args: date: String or None. The date the function is scheduled to be removed. Must be ISO 8601 (YYYY-MM-DD), or None. instructions: String. Instructions on how to update code using the deprecated function. warn_once: Boolean. Set to `True` to warn only the first time the decorated function is called. Otherwise, every call will log a warning. Returns: Decorated function or method. Raises: ValueError: If date is not None or in ISO 8601 format, or instructions are empty." 12568,deprecated_args,tensorflow/tensorflow/python/util/deprecation.py,336,function,"Decorator for marking specific function arguments as deprecated. This decorator logs a deprecation warning whenever the decorated function is called with the deprecated argument. It has the following format: Calling (from ) with is deprecated and will be removed after . Instructions for updating: If `date` is None, 'after ' is replaced with 'in a future version'. includes the class name if it is a method. It also edits the docstring of the function: ' (deprecated arguments)' is appended to the first line of the docstring and a deprecation notice is prepended to the rest of the docstring. Args: date: String or None. The date the function is scheduled to be removed. Must be ISO 8601 (YYYY-MM-DD), or None. instructions: String. Instructions on how to update code using the deprecated function. *deprecated_arg_names_or_tuples: String or 2-Tuple(String, [ok_vals]). The string is the deprecated argument name. Optionally, an ok-value may be provided. If the user provided argument equals this value, the warning is suppressed. **kwargs: If `warn_once=False` is passed, every call with a deprecated argument will log a warning. The default behavior is to only warn the first time the function is called with any given deprecated argument. All other kwargs raise `ValueError`. Returns: Decorated function or method. Raises: ValueError: If date is not None or in ISO 8601 format, instructions are empty, the deprecated arguments are not present in the function signature, the second element of a deprecated_tuple is not a list, or if a kwarg other than `warn_once` is passed." 12569,deprecated_arg_values,tensorflow/tensorflow/python/util/deprecation.py,516,function,"Decorator for marking specific function argument values as deprecated. This decorator logs a deprecation warning whenever the decorated function is called with the deprecated argument values. It has the following format: Calling (from ) with = is deprecated and will be removed after . Instructions for updating: If `date` is None, 'after ' is replaced with 'in a future version'. will include the class name if it is a method. It also edits the docstring of the function: ' (deprecated arguments)' is appended to the first line of the docstring and a deprecation notice is prepended to the rest of the docstring. Args: date: String or None. The date the function is scheduled to be removed. Must be ISO 8601 (YYYY-MM-DD), or None instructions: String. Instructions on how to update code using the deprecated function. warn_once: If `True`, warn only the first time this function is called with deprecated argument values. Otherwise, every call (with a deprecated argument value) will log a warning. **deprecated_kwargs: The deprecated argument values. Returns: Decorated function or method. Raises: ValueError: If date is not None or in ISO 8601 format, or instructions are empty." 12570,deprecated_argument_lookup,tensorflow/tensorflow/python/util/deprecation.py,583,function,"Looks up deprecated argument name and ensures both are not used. Args: new_name: new name of argument new_value: value of new argument (or None if not used) old_name: old name of argument old_value: value of old argument (or None if not used) Returns: The effective argument that should be used. Raises: ValueError: if new_value and old_value are both non-null" 12571,rewrite_argument_docstring,tensorflow/tensorflow/python/util/deprecation.py,604,function, 12572,silence,tensorflow/tensorflow/python/util/deprecation.py,610,function,Temporarily silence deprecation warnings. 12573,HiddenTfApiAttribute,tensorflow/tensorflow/python/util/deprecation.py,619,class,"Hides a class attribute from the public API. Attributes in public classes can be hidden from the API by having an '_' in front of the name (e.g. ClassName._variables). This doesn't work when attributes or methods are inherited from a parent class. To hide inherited attributes, set their values to be `deprecation.hide_attribute_from_api`. For example, this is used in V2 Estimator to hide the deprecated export_savedmodel method: class EstimatorV2(Estimator): export_savedmodel = deprecation.hide_attribute_from_api('...')" 12574,DeprecatedAliasTest,tensorflow/tensorflow/python/util/deprecation_test.py,29,class, 12575,DeprecationTest,tensorflow/tensorflow/python/util/deprecation_test.py,82,class, 12576,DeprecatedArgsTest,tensorflow/tensorflow/python/util/deprecation_test.py,459,class, 12577,DeprecatedArgValuesTest,tensorflow/tensorflow/python/util/deprecation_test.py,742,class, 12578,DeprecationArgumentsTest,tensorflow/tensorflow/python/util/deprecation_test.py,926,class, 12579,DeprecatedEndpointsTest,tensorflow/tensorflow/python/util/deprecation_test.py,961,class, 12580,OpDispatcher,tensorflow/tensorflow/python/util/dispatch.py,46,class,"Abstract base class for TensorFlow operator dispatchers. Each operation dispatcher acts as an override handler for a single TensorFlow operation, and its results are used when the handler indicates that it can handle the operation's arguments (by returning any value other than `OpDispatcher.NOT_SUPPORTED`)." 12581,GlobalOpDispatcher,tensorflow/tensorflow/python/util/dispatch.py,89,class,Abstract base class for TensorFlow global operator dispatchers. 12582,dispatch,tensorflow/tensorflow/python/util/dispatch.py,102,function,"Returns the result from the first successful dispatcher for a given op. Calls the `handle` method of each `OpDispatcher` that has been registered to handle `op`, and returns the value from the first successful handler. Args: op: Python function: the operation to dispatch for. args: The arguments to the operation. kwargs: They keyword arguments to the operation. Returns: The result of the operation, or `NOT_SUPPORTED` if no registered dispatcher can handle the given arguments." 12583,_TypeBasedDispatcher,tensorflow/tensorflow/python/util/dispatch.py,128,class,"Dispatcher that handles op if any arguments have a specified type. Checks the types of the arguments and keyword arguments (including elements of lists or tuples), and if any argument values have the indicated type(s), then delegates to an override function." 12584,dispatch_for_types,tensorflow/tensorflow/python/util/dispatch.py,156,function,"Decorator to declare that a Python function overrides an op for a type. The decorated function is used to override `op` if any of the arguments or keyword arguments (including elements of lists or tuples) have one of the specified types. Example: ```python @dispatch_for_types(math_ops.add, RaggedTensor, RaggedTensorValue) def ragged_add(x, y, name=None): ... ``` Args: op: Python function: the operation that should be overridden. *types: The argument types for which this function should be used." 12585,add_dispatch_list,tensorflow/tensorflow/python/util/dispatch.py,188,function,Decorator that adds a dispatch_list attribute to an op. 12586,add_dispatch_support,tensorflow/tensorflow/python/util/dispatch.py,196,function,Decorator that adds a dispatch handling wrapper to an op. 12587,CustomTensor,tensorflow/tensorflow/python/util/dispatch_test.py,37,class,"A fake composite tensor class, for testing type-based dispatching." 12588,test_op,tensorflow/tensorflow/python/util/dispatch_test.py,47,function,A fake op for testing dispatch of Python ops. 12589,TensorTracer,tensorflow/tensorflow/python/util/dispatch_test.py,52,class,"An object used to trace TensorFlow graphs. This is an example class that is used to test global op dispatchers. The global op dispatcher for TensorTracers is defined below." 12590,TensorTracerOpDispatcher,tensorflow/tensorflow/python/util/dispatch_test.py,93,class,Global op dispatcher for TensorTracer. 12591,DispatchTest,tensorflow/tensorflow/python/util/dispatch_test.py,120,class, 12592,extract_example_parser_configuration,tensorflow/tensorflow/python/util/example_parser_configuration.py,26,function,"Returns an ExampleParserConfig proto. Args: parse_example_op: A ParseExample or ParseExampleV2 `Operation` sess: A tf.compat.v1.Session needed to obtain some configuration values. Returns: A ExampleParserConfig proto. Raises: ValueError: If attributes are inconsistent." 12593,_extract_from_parse_example,tensorflow/tensorflow/python/util/example_parser_configuration.py,46,function,Extract ExampleParserConfig from ParseExample op. 12594,_extract_from_parse_example_v2,tensorflow/tensorflow/python/util/example_parser_configuration.py,135,function,Extract ExampleParserConfig from ParseExampleV2 op. 12595,ExampleParserConfigurationTest,tensorflow/tensorflow/python/util/example_parser_configuration_test.py,73,class, 12596,_is_bound_method,tensorflow/tensorflow/python/util/function_utils.py,30,function, 12597,_is_callable_object,tensorflow/tensorflow/python/util/function_utils.py,35,function, 12598,fn_args,tensorflow/tensorflow/python/util/function_utils.py,39,function,"Get argument names for function-like object. Args: fn: Function, or function-like object (e.g., result of `functools.partial`). Returns: `tuple` of string argument names. Raises: ValueError: if partial function has positionally bound arguments" 12599,has_kwargs,tensorflow/tensorflow/python/util/function_utils.py,66,function,"Returns whether the passed callable has **kwargs in its signature. Args: fn: Function, or function-like object (e.g., result of `functools.partial`). Returns: `bool`: if `fn` has **kwargs in its signature. Raises: `TypeError`: If fn is not a Function, or function-like object." 12600,get_func_name,tensorflow/tensorflow/python/util/function_utils.py,89,function,Returns name of passed callable. 12601,get_func_code,tensorflow/tensorflow/python/util/function_utils.py,104,function,"Returns func_code of passed callable, or None if not available." 12602,get_disabled_rewriter_config,tensorflow/tensorflow/python/util/function_utils.py,125,function, 12603,silly_example_function,tensorflow/tensorflow/python/util/function_utils_test.py,27,function, 12604,SillyCallableClass,tensorflow/tensorflow/python/util/function_utils_test.py,31,class, 12605,FnArgsTest,tensorflow/tensorflow/python/util/function_utils_test.py,37,class, 12606,HasKwargsTest,tensorflow/tensorflow/python/util/function_utils_test.py,147,class, 12607,GetFuncNameTest,tensorflow/tensorflow/python/util/function_utils_test.py,242,class, 12608,GetFuncCodeTest,tensorflow/tensorflow/python/util/function_utils_test.py,274,class, 12609,keyword_args_only,tensorflow/tensorflow/python/util/keyword_args.py,27,function,"Decorator for marking specific function accepting keyword args only. This decorator raises a `ValueError` if the input `func` is called with any non-keyword args. This prevents the caller from providing the arguments in wrong order. Args: func: The function or method needed to be decorated. Returns: Decorated function or method. Raises: ValueError: If `func` is not callable." 12610,KeywordArgsTest,tensorflow/tensorflow/python/util/keyword_args_test.py,25,class, 12611,LazyLoader,tensorflow/tensorflow/python/util/lazy_loader.py,27,class,"Lazily import a module, mainly to avoid pulling in large dependencies. `contrib`, and `ffmpeg` are examples of modules that are large and not always needed, and this allows them to only be loaded when they are used." 12612,GroupLock,tensorflow/tensorflow/python/util/lock_util.py,24,class,"A lock to allow many members of a group to access a resource exclusively. This lock provides a way to allow access to a resource by multiple threads belonging to a logical group at the same time, while restricting access to threads from all other groups. You can think of this as an extension of a reader-writer lock, where you allow multiple writers at the same time. We made it generic to support multiple groups instead of just two - readers and writers. Simple usage example with two groups accessing the same resource: ```python lock = GroupLock(num_groups=2) # In a member of group 0: with lock.group(0): # do stuff, access the resource # ... # In a member of group 1: with lock.group(1): # do stuff, access the resource # ... ``` Using as a context manager with `.group(group_id)` is the easiest way. You can also use the `acquire` and `release` method directly." 12613,GroupLockTest,tensorflow/tensorflow/python/util/lock_util_test.py,30,class, 12614,dismantle_ordered_dict,tensorflow/tensorflow/python/util/memory.py,24,function,"Remove reference cycle in OrderedDict `ordered_dict`. Helpful for making sure the garbage collector doesn't need to run after using an OrderedDict. Args: ordered_dict: A `OrderedDict` object to destroy. This object is unusable after this function runs." 12615,get_rename_v2,tensorflow/tensorflow/python/util/module_wrapper.py,34,function, 12616,_call_location,tensorflow/tensorflow/python/util/module_wrapper.py,40,function, 12617,contains_deprecation_decorator,tensorflow/tensorflow/python/util/module_wrapper.py,51,function, 12618,has_deprecation_decorator,tensorflow/tensorflow/python/util/module_wrapper.py,56,function,"Checks if given object has a deprecation decorator. We check if deprecation decorator is in decorators as well as whether symbol is a class whose __init__ method has a deprecation decorator. Args: symbol: Python object. Returns: True if symbol has deprecation decorator." 12619,TFModuleWrapper,tensorflow/tensorflow/python/util/module_wrapper.py,81,class,Wrapper for TF modules to support deprecation messages and lazyloading. 12620,MockModule,tensorflow/tensorflow/python/util/module_wrapper_test.py,34,class, 12621,DeprecationWrapperTest,tensorflow/tensorflow/python/util/module_wrapper_test.py,38,class, 12622,LazyLoadingWrapperTest,tensorflow/tensorflow/python/util/module_wrapper_test.py,72,class, 12623,PickleTest,tensorflow/tensorflow/python/util/module_wrapper_test.py,136,class, 12624,_get_attrs_items,tensorflow/tensorflow/python/util/nest.py,80,function,"Returns a list of (name, value) pairs from an attrs instance. The list will be sorted by name. Args: obj: an object. Returns: A list of (attr_name, attr_value) pairs, sorted by attr_name." 12625,_sorted,tensorflow/tensorflow/python/util/nest.py,96,function,"Returns a sorted list of the dict keys, with error if keys not sortable." 12626,_is_namedtuple,tensorflow/tensorflow/python/util/nest.py,104,function,"Returns True iff `instance` is a `namedtuple`. Args: instance: An instance of a Python object. strict: If True, `instance` is considered to be a `namedtuple` only if it is a ""plain"" namedtuple. For instance, a class inheriting from a `namedtuple` will be considered to be a `namedtuple` iff `strict=False`. Returns: True if `instance` is a `namedtuple`." 12627,_sequence_like,tensorflow/tensorflow/python/util/nest.py,129,function,"Converts the sequence `args` to the same type as `instance`. Args: instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, `collections.OrderedDict`, or `composite_tensor.Composite_Tensor` or `type_spec.TypeSpec`. args: elements to be converted to the `instance` type. Returns: `args` with the type of `instance`." 12628,_yield_value,tensorflow/tensorflow/python/util/nest.py,198,function, 12629,_yield_sorted_items,tensorflow/tensorflow/python/util/nest.py,203,function,"Yield (key, value) pairs for `iterable` in a deterministic order. For Sequences, the key will be an int, the array index of a value. For Mappings, the key will be the dictionary key. For objects (e.g. namedtuples), the key will be the attribute name. In all cases, the keys will be iterated in sorted order. Args: iterable: an iterable. Yields: The iterable's (key, value) pairs, in order of sorted keys." 12630,is_nested,tensorflow/tensorflow/python/util/nest.py,261,function,"Returns true if its input is a collections.abc.Sequence (except strings). Args: seq: an input sequence. Returns: True if the sequence is a not a string and is a collections.abc.Sequence or a dict." 12631,flatten,tensorflow/tensorflow/python/util/nest.py,275,function,"Returns a flat list from a given nested structure. If nest is not a structure , tuple (or a namedtuple), dict, or an attrs class, then returns a single-element list: [nest]. In the case of dict instances, the sequence consists of the values, sorted by key to ensure deterministic behavior. This is true also for OrderedDict instances: their sequence order is ignored, the sorting order of keys is used instead. The same convention is followed in pack_sequence_as. This correctly repacks dicts and OrderedDicts after they have been flattened, and also allows flattening an OrderedDict and then repacking it back using a corresponding plain dict, or vice-versa. Dictionaries with non-sortable keys cannot be flattened. Users must not modify any collections used in nest while this function is running. Examples: 1. Python dict (ordered by key): >>> dict = { ""key3"": ""value3"", ""key1"": ""value1"", ""key2"": ""value2"" } >>> tf.nest.flatten(dict) ['value1', 'value2', 'value3'] 2. For a nested python tuple: >>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), (6.0)) >>> tf.nest.flatten(tuple) [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 3. Numpy array (will not flatten): >>> array = np.array([[1, 2], [3, 4]]) >>> tf.nest.flatten(array) [array([[1, 2], [3, 4]])] 4. `tf.Tensor` (will not flatten): >>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) >>> tf.nest.flatten(tensor) [] Args: structure: an arbitrarily nested structure. Note, numpy arrays are considered atoms and are not flattened. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Returns: A Python list, the flattened version of the input. Raises: TypeError: The nest is or contains a dict with non-sortable keys." 12632,_DotString,tensorflow/tensorflow/python/util/nest.py,345,class, 12633,assert_same_structure,tensorflow/tensorflow/python/util/nest.py,360,function,"Asserts that two structures are nested in the same way. Note that namedtuples with identical name and fields are always considered to have the same shallow structure (even with `check_types=True`). For instance, this code will print `True`: ```python def nt(a, b): return collections.namedtuple('foo', 'a b')(a, b) print(assert_same_structure(nt(0, 1), nt(2, 3))) ``` Args: nest1: an arbitrarily nested structure. nest2: an arbitrarily nested structure. check_types: if `True` (default) types of sequences are checked as well, including the keys of dictionaries. If set to `False`, for example a list and a tuple of objects will look the same if they have the same size. Note that namedtuples with identical name and fields are always considered to have the same shallow structure. Two types will also be considered the same if they are both list subtypes (which allows ""list"" and ""_ListWrapper"" from trackable dependency tracking to compare equal). expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Raises: ValueError: If the two structures do not have the same number of elements or if the two structures are not nested in the same way. TypeError: If the two structures differ in the type of sequence in any of their substructures. Only possible if `check_types` is `True`." 12634,flatten_dict_items,tensorflow/tensorflow/python/util/nest.py,407,function,"Returns a dictionary with flattened keys and values. This function flattens the keys and values of a dictionary, which can be arbitrarily nested structures, and returns the flattened version of such structures: ```python example_dictionary = {(4, 5, (6, 8)): (""a"", ""b"", (""c"", ""d""))} result = {4: ""a"", 5: ""b"", 6: ""c"", 8: ""d""} flatten_dict_items(example_dictionary) == result ``` The input dictionary must satisfy two properties: 1. Its keys and values should have the same exact nested structure. 2. The set of all flattened keys of the dictionary must not contain repeated keys. Args: dictionary: the dictionary to zip Returns: The zipped dictionary. Raises: TypeError: If the input is not a dictionary. ValueError: If any key and value do not have the same structure layout, or if keys are not unique." 12635,_packed_nest_with_indices,tensorflow/tensorflow/python/util/nest.py,463,function,"Helper function for pack_sequence_as. Args: structure: Substructure (list / tuple / dict) to mimic. flat: Flattened values to output substructure for. index: Index at which to start reading from flat. is_seq: Function used to test if a value should be treated as a sequence. sequence_fn: Function used to generate a new sequence instance. Returns: The tuple (new_index, child), where: * new_index - the updated index into `flat` having processed `structure`. * packed - the subset of `flat` corresponding to `structure`, having started at `index`, and packed into the same nested format. Raises: ValueError: if `structure` contains more elements than `flat` (assuming indexing starts from `index`)." 12636,_pack_sequence_as,tensorflow/tensorflow/python/util/nest.py,498,function,"Implements sequence packing, with the option to alter the structure." 12637,pack_sequence_as,tensorflow/tensorflow/python/util/nest.py,539,function,"Returns a given flattened sequence packed into a given structure. If `structure` is a scalar, `flat_sequence` must be a single-element list; in this case the return value is `flat_sequence[0]`. If `structure` is or contains a dict instance, the keys will be sorted to pack the flat sequence in deterministic order. This is true also for `OrderedDict` instances: their sequence order is ignored, the sorting order of keys is used instead. The same convention is followed in `flatten`. This correctly repacks dicts and `OrderedDict`s after they have been flattened, and also allows flattening an `OrderedDict` and then repacking it back using a corresponding plain dict, or vice-versa. Dictionaries with non-sortable keys cannot be flattened. Args: structure: Nested structure, whose structure is given by nested lists, tuples, and dicts. Note: numpy arrays and strings are considered scalars. flat_sequence: flat sequence to pack. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Returns: packed: `flat_sequence` converted to have the same recursive structure as `structure`. Raises: ValueError: If `flat_sequence` and `structure` have different element counts. TypeError: `structure` is or contains a dict with non-sortable keys." 12638,map_structure,tensorflow/tensorflow/python/util/nest.py,576,function,"Applies `func` to each entry in `structure` and returns a new structure. Applies `func(x[0], x[1], ...)` where x[i] is an entry in `structure[i]`. All structures in `structure` must have the same arity, and the return value will contain results with the same structure layout. Args: func: A callable that accepts as many arguments as there are structures. *structure: scalar, or tuple or dict or list of constructed scalars and/or other tuples/lists, or scalars. Note: numpy arrays are considered as scalars. **kwargs: Valid keyword args are: * `check_types`: If set to `True` (default) the types of iterables within the structures have to be same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow this set this argument to `False`. Note that namedtuples with identical name and fields are always considered to have the same shallow structure. * `expand_composites`: If set to `True`, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. If `False` (the default), then composite tensors are not expanded. Returns: A new structure with the same arity as `structure`, whose values correspond to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding location in `structure[i]`. If there are different sequence types and `check_types` is `False` the sequence types of the first structure will be used. Raises: TypeError: If `func` is not callable or if the structures do not match each other by depth tree. ValueError: If no structure is provided or if the structures do not match each other by type. ValueError: If wrong keyword arguments are provided." 12639,map_structure_with_paths,tensorflow/tensorflow/python/util/nest.py,641,function,"Applies `func` to each entry in `structure` and returns a new structure. Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in `structure[i]` and `path` is the common path to x[i] in the structures. All structures in `structure` must have the same arity, and the return value will contain the results with the same structure layout. Special kwarg `check_types` determines whether the types of iterables within the structure must be the same-- see **kwargs definition below. Args: func: A callable with the signature func(path, *values, **kwargs) that is evaluated on the leaves of the structure. *structure: A variable number of compatible structures to process. **kwargs: Optional kwargs to be passed through to func. Special kwarg `check_types` is not passed to func, but instead determines whether the types of iterables within the structures have to be same (e.g., `map_structure(func, [1], (1,))` raises a `TypeError` exception). By default, the types must match. To allow iteration over structures of different types (but common arity), set this kwarg to `False`. Returns: A structure of the same form as the input structures whose leaves are the result of evaluating func on corresponding leaves of the input structures. Raises: TypeError: If `func` is not callable or if the structures do not match each other by depth tree. TypeError: If `check_types` is not `False` and the two structures differ in the type of sequence in any of their substructures. ValueError: If no structures are provided." 12640,map_structure_with_tuple_paths,tensorflow/tensorflow/python/util/nest.py,683,function,"Applies `func` to each entry in `structure` and returns a new structure. Applies `func(tuple_path, x[0], x[1], ..., **kwargs)` where `x[i]` is an entry in `structure[i]` and `tuple_path` is a tuple of indices and/or dictionary keys (as returned by `nest.yield_flat_paths`), which uniquely specifies the common path to x[i] in the structures. All structures in `structure` must have the same arity, and the return value will contain the results in the same structure. Special kwarg `check_types` determines whether the types of iterables within the structure must be the same-- see **kwargs definition below. Args: func: A callable with the signature `func(tuple_path, *values, **kwargs)` that is evaluated on the leaves of the structure. *structure: A variable number of compatible structures to process. **kwargs: Optional kwargs to be passed through to func. Special kwarg `check_types` is not passed to func, but instead determines whether the types of iterables within the structures have to be same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow this set this argument to `False`. Returns: A structure of the same form as the input structures whose leaves are the result of evaluating func on corresponding leaves of the input structures. Raises: TypeError: If `func` is not callable or if the structures do not match each other by depth tree. TypeError: If `check_types` is not `False` and the two structures differ in the type of sequence in any of their substructures. ValueError: If no structures are provided." 12641,_yield_flat_up_to,tensorflow/tensorflow/python/util/nest.py,722,function,"Yields (path, value) pairs of input_tree flattened up to shallow_tree. Args: shallow_tree: Nested structure. Traverse no further than its leaf nodes. input_tree: Nested structure. Return the paths and values from this tree. Must have the same upper structure as shallow_tree. is_seq: Function used to test if a value should be treated as a sequence. path: Tuple. Optional argument, only used when recursing. The path from the root of the original shallow_tree, down to the root of the shallow_tree arg of this recursive call. Yields: Pairs of (path, value), where path the tuple path of a leaf node in shallow_tree, and value is the value of the corresponding node in input_tree." 12642,assert_shallow_structure,tensorflow/tensorflow/python/util/nest.py,752,function,"Asserts that `shallow_tree` is a shallow structure of `input_tree`. That is, this function tests if the `input_tree` structure can be created from the `shallow_tree` structure by replacing its leaf nodes with deeper tree structures. Examples: The following code will raise an exception: ```python shallow_tree = {""a"": ""A"", ""b"": ""B""} input_tree = {""a"": 1, ""c"": 2} assert_shallow_structure(shallow_tree, input_tree) ``` The following code will raise an exception: ```python shallow_tree = [""a"", ""b""] input_tree = [""c"", [""d"", ""e""], ""f""] assert_shallow_structure(shallow_tree, input_tree) ``` Args: shallow_tree: an arbitrarily nested structure. input_tree: an arbitrarily nested structure. check_types: if `True` (default) the sequence types of `shallow_tree` and `input_tree` have to be the same. Note that even with check_types==True, this function will consider two different namedtuple classes with the same name and _fields attribute to be the same class. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Raises: TypeError: If `shallow_tree` is a sequence but `input_tree` is not. TypeError: If the sequence types of `shallow_tree` are different from `input_tree`. Only raised if `check_types` is `True`. ValueError: If the sequence lengths of `shallow_tree` are different from `input_tree`." 12643,flatten_up_to,tensorflow/tensorflow/python/util/nest.py,875,function,"Flattens `input_tree` up to `shallow_tree`. Any further depth in structure in `input_tree` is retained as elements in the partially flatten output. If `shallow_tree` and `input_tree` are not sequences, this returns a single-element list: `[input_tree]`. Use Case: Sometimes we may wish to partially flatten a nested sequence, retaining some of the nested structure. We achieve this by specifying a shallow structure, `shallow_tree`, we wish to flatten up to. The input, `input_tree`, can be thought of as having the same structure layout as `shallow_tree`, but with leaf nodes that are themselves tree structures. Examples: ```python input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] shallow_tree = [[True, True], [False, True]] flattened_input_tree = flatten_up_to(shallow_tree, input_tree) flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree) # Output is: # [[2, 2], [3, 3], [4, 9], [5, 5]] # [True, True, False, True] ``` ```python input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) input_tree_flattened = flatten(input_tree) # Output is: # [('a', 1), ('b', 2), ('c', 3), ('d', 4)] # ['a', 1, 'b', 2, 'c', 3, 'd', 4] ``` Non-Sequence Edge Cases: ```python flatten_up_to(0, 0) # Output: [0] flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]] flatten_up_to([0, 1, 2], 0) # Output: TypeError flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2] ``` Args: shallow_tree: a possibly pruned structure of input_tree. input_tree: an arbitrarily nested structure or a scalar object. Note, numpy arrays are considered scalars. check_types: bool. If True, check that each node in shallow_tree has the same type as the corresponding node in input_tree. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Returns: A Python list, the partially flattened version of `input_tree` according to the structure of `shallow_tree`. Raises: TypeError: If `shallow_tree` is a sequence but `input_tree` is not. TypeError: If the sequence types of `shallow_tree` are different from `input_tree`. ValueError: If the sequence lengths of `shallow_tree` are different from `input_tree`." 12644,flatten_with_tuple_paths_up_to,tensorflow/tensorflow/python/util/nest.py,959,function,"Flattens `input_tree` up to `shallow_tree`. Any further depth in structure in `input_tree` is retained as elements in the partially flattened output. Returns a list of (path, value) pairs, where value a leaf node in the flattened tree, and path is the tuple path of that leaf in input_tree. If `shallow_tree` and `input_tree` are not sequences, this returns a single-element list: `[((), input_tree)]`. Use Case: Sometimes we may wish to partially flatten a nested sequence, retaining some of the nested structure. We achieve this by specifying a shallow structure, `shallow_tree`, we wish to flatten up to. The input, `input_tree`, can be thought of as having the same structure layout as `shallow_tree`, but with leaf nodes that are themselves tree structures. Examples: ```python input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]] shallow_tree = [[True, True], [False, True]] flattened_input_tree = flatten_with_tuple_paths_up_to(shallow_tree, input_tree) flattened_shallow_tree = flatten_with_tuple_paths_up_to(shallow_tree, shallow_tree) # Output is: # [((0, 0), [2, 2]), # ((0, 1), [3, 3]), # ((1, 0), [4, 9]), # ((1, 1), [5, 5])] # # [((0, 0), True), # ((0, 1), True), # ((1, 0), False), # ((1, 1), True)] ``` ```python input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]] shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]] input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree) input_tree_flattened = flatten(input_tree) # Output is: # [((0, 0), ('a', 1)), # ((0, 1, 0), ('b', 2)), # ((0, 1, 1, 0), ('c', 3)), # ((0, 1, 1, 1), ('d', 4))] # ['a', 1, 'b', 2, 'c', 3, 'd', 4] ``` Non-Sequence Edge Cases: ```python flatten_with_tuple_paths_up_to(0, 0) # Output: [(), 0] flatten_with_tuple_paths_up_to(0, [0, 1, 2]) # Output: [(), [0, 1, 2]] flatten_with_tuple_paths_up_to([0, 1, 2], 0) # Output: TypeError flatten_with_tuple_paths_up_to([0, 1, 2], [0, 1, 2]) # Output: [((0,) 0), ((1,), 1), ((2,), 2)] ``` Args: shallow_tree: a possibly pruned structure of input_tree. input_tree: an arbitrarily nested structure or a scalar object. Note, numpy arrays are considered scalars. check_types: bool. If True, check that each node in shallow_tree has the same type as the corresponding node in input_tree. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Returns: A Python list, the partially flattened version of `input_tree` according to the structure of `shallow_tree`. Raises: TypeError: If `shallow_tree` is a sequence but `input_tree` is not. TypeError: If the sequence types of `shallow_tree` are different from `input_tree`. ValueError: If the sequence lengths of `shallow_tree` are different from `input_tree`." 12645,map_structure_up_to,tensorflow/tensorflow/python/util/nest.py,1063,function,"Applies a function or op to a number of partially flattened inputs. The `inputs` are flattened up to `shallow_tree` before being mapped. Use Case: Sometimes we wish to apply a function to a partially flattened sequence (for example when the function itself takes sequence inputs). We achieve this by specifying a shallow structure, `shallow_tree` we wish to flatten up to. The `inputs`, can be thought of as having the same structure layout as `shallow_tree`, but with leaf nodes that are themselves tree structures. This function therefore will return something with the same base structure as `shallow_tree`. Examples: ```python shallow_tree = [None, None] inp_val = [1, 2, 3] out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val) # Output is: [2, 4] ``` ```python ab_tuple = collections.namedtuple(""ab_tuple"", ""a, b"") op_tuple = collections.namedtuple(""op_tuple"", ""add, mul"") inp_val = ab_tuple(a=2, b=3) inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3)) out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops) # Output is: ab_tuple(a=6, b=15) ``` ```python data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]] name_list = ['evens', ['odds', 'primes']] out = map_structure_up_to( name_list, lambda name, sec: ""first_{}_{}"".format(len(sec), name), name_list, data_list) # Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']] ``` Args: shallow_tree: a shallow tree, common to all the inputs. func: callable which will be applied to each input individually. *inputs: arbitrarily nested combination of objects that are compatible with shallow_tree. The function `func` is applied to corresponding partially flattened elements of each input, so the function must support arity of `len(inputs)`. **kwargs: kwargs to feed to func(). Special kwarg `check_types` is not passed to func, but instead determines whether the types of iterables within the structures have to be same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow this set this argument to `False`. Raises: TypeError: If `shallow_tree` is a sequence but `input_tree` is not. TypeError: If the sequence types of `shallow_tree` are different from `input_tree`. ValueError: If the sequence lengths of `shallow_tree` are different from `input_tree`. Returns: result of repeatedly applying `func`, with the same structure layout as `shallow_tree`." 12646,map_structure_with_tuple_paths_up_to,tensorflow/tensorflow/python/util/nest.py,1144,function,"Applies a function or op to a number of partially flattened inputs. Like map_structure_up_to(), except that the 'func' argument takes a path tuple as its first argument, followed by the corresponding values from *inputs. Example: ```python lowercase = {'a': 'a', 'b': ('b0', 'b1')} uppercase = {'a': 'A', 'b': ('B0', 'B1')} def print_path_and_values(path, *values): print(""path: {}, values: {}"".format(path, values)) shallow_tree = {'a': None} map_structure_with_tuple_paths_up_to(shallow_tree, print_path_and_values, lowercase, uppercase) path: ('a',), values: ('a', 'A') path: ('b', 0), values: ('b0', 'B0') path: ('b', 1), values: ('b1', 'B1') shallow_tree = {'b': None} map_structure_with_tuple_paths_up_to(shallow_tree, print_path_and_values, lowercase, uppercase, check_types=False) path: ('b', 1), values: (('bo', 'b1'), ('B0', 'B1')) shallow_tree = {'a': None, 'b': {1: None}} map_structure_with_tuple_paths_up_to(shallow_tree, print_path_and_values, lowercase, uppercase, check_types=False) path: ('a',), values: ('a', 'A') path: ('b', 1), values: ('b1', B1') ``` Args: shallow_tree: a shallow tree, common to all the inputs. func: callable that takes args (path, inputs_0_value, ... , inputs_N_value), where path is a tuple path to a leaf node in shallow_tree, and inputs_i_value is the corresponding value from inputs[i]. *inputs: nested structures that are all structurally compatible with shallow_tree. **kwargs: kwargs to feed to func(). Special kwarg `check_types` is not passed to func, but instead determines whether the types of iterables within the structures have to be same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow this set this argument to `False`. Raises: TypeError: If `shallow_tree` is a sequence but one of `*inputs` is not. TypeError: If the sequence types of `shallow_tree` are different from `input_tree`. ValueError: If the sequence lengths of `shallow_tree` are different from `input_tree`. Returns: Result of repeatedly applying `func`. Has the same structure layout as `shallow_tree`." 12647,get_traverse_shallow_structure,tensorflow/tensorflow/python/util/nest.py,1242,function,"Generates a shallow structure from a `traverse_fn` and `structure`. `traverse_fn` must accept any possible subtree of `structure` and return a depth=1 structure containing `True` or `False` values, describing which of the top-level subtrees may be traversed. It may also return scalar `True` or `False` ""traversal is OK / not OK for all subtrees."" Examples are available in the unit tests (nest_test.py). Args: traverse_fn: Function taking a substructure and returning either a scalar `bool` (whether to traverse that substructure or not) or a depth=1 shallow structure of the same type, describing which parts of the substructure to traverse. structure: The structure to traverse. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Returns: A shallow structure containing python bools, which can be passed to `map_structure_up_to` and `flatten_up_to`. Raises: TypeError: if `traverse_fn` returns a sequence for a non-sequence input, or a structure with depth higher than 1 for a sequence input, or if any leaf values in the returned structure or scalar are not type `bool`." 12648,yield_flat_paths,tensorflow/tensorflow/python/util/nest.py,1312,function,"Yields paths for some nested structure. Paths are lists of objects which can be str-converted, which may include integers or other types which are used as indices in a dict. The flat list will be in the corresponding order as if you called `nest.flatten` on the structure. This is handy for naming Tensors such the TF scope structure matches the tuple structure. E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))` ```shell nest.flatten(value) [3, 23, 42] list(nest.yield_flat_paths(value)) [('a',), ('b', 'c'), ('b', 'd')] ``` ```shell list(nest.yield_flat_paths({'a': [3]})) [('a', 0)] list(nest.yield_flat_paths({'a': 3})) [('a',)] ``` Args: nest: the value to produce a flattened paths list for. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Yields: Tuples containing index or key values which form the path to a specific leaf value in the nested structure." 12649,flatten_with_joined_string_paths,tensorflow/tensorflow/python/util/nest.py,1353,function,"Returns a list of (string path, data element) tuples. The order of tuples produced matches that of `nest.flatten`. This allows you to flatten a nested structure while keeping information about where in the structure each data element was located. See `nest.yield_flat_paths` for more information. Args: structure: the nested structure to flatten. separator: string to separate levels of hierarchy in the results, defaults to '/'. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Returns: A list of (string, data element) tuples." 12650,flatten_with_tuple_paths,tensorflow/tensorflow/python/util/nest.py,1381,function,"Returns a list of `(tuple_path, leaf_element)` tuples. The order of pairs produced matches that of `nest.flatten`. This allows you to flatten a nested structure while keeping information about where in the structure each data element was located. See `nest.yield_flat_paths` for more information about tuple paths. Args: structure: the nested structure to flatten. expand_composites: If true, then composite tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their component tensors. Returns: A list of `(tuple_path, leaf_element)` tuples. Each `tuple_path` is a tuple of indices and/or dictionary keys that uniquely specify the path to `leaf_element` within `structure`." 12651,list_to_tuple,tensorflow/tensorflow/python/util/nest.py,1405,function,"Replace all lists with tuples. The fork of nest that tf.data uses treats lists as single elements, while tf.nest treats them as structures to recurse into. Keras has chosen to adopt the latter convention, and must therefore deeply replace all lists with tuples before passing structures to Dataset.from_generator. Args: structure: A nested structure to be remapped. Returns: structure mapped to replace all lists with tuples." 12652,_CustomMapping,tensorflow/tensorflow/python/util/nest_test.py,43,class, 12653,_CustomSequenceThatRaisesException,tensorflow/tensorflow/python/util/nest_test.py,58,class, 12654,NestTest,tensorflow/tensorflow/python/util/nest_test.py,67,class, 12655,NestBenchmark,tensorflow/tensorflow/python/util/nest_test.py,1220,class, 12656,_ObjectIdentityWrapper,tensorflow/tensorflow/python/util/object_identity.py,25,class,"Wraps an object, mapping __eq__ on wrapper to ""is"" on wrapped. Since __eq__ is based on object identity, it's safe to also define __hash__ based on object ids. This lets us add unhashable types like trackable _ListWrapper objects to object-identity collections." 12657,_WeakObjectIdentityWrapper,tensorflow/tensorflow/python/util/object_identity.py,73,class, 12658,Reference,tensorflow/tensorflow/python/util/object_identity.py,83,class,"Reference that refers an object. ```python x = [1] y = [1] x_ref1 = Reference(x) x_ref2 = Reference(x) y_ref2 = Reference(y) print(x_ref1 == x_ref2) ==> True print(x_ref1 == y) ==> False ```" 12659,ObjectIdentityDictionary,tensorflow/tensorflow/python/util/object_identity.py,117,class,"A mutable mapping data structure which compares using ""is"". This is necessary because we have trackable objects (_ListWrapper) which have behavior identical to built-in Python lists (including being unhashable and comparing based on the equality of their contents by default)." 12660,ObjectIdentityWeakKeyDictionary,tensorflow/tensorflow/python/util/object_identity.py,153,class,"Like weakref.WeakKeyDictionary, but compares objects with ""is""." 12661,ObjectIdentitySet,tensorflow/tensorflow/python/util/object_identity.py,173,class,"Like the built-in set, but compares objects with ""is""." 12662,ObjectIdentityWeakSet,tensorflow/tensorflow/python/util/object_identity.py,221,class,"Like weakref.WeakSet, but compares objects with ""is""." 12663,ObjectIdentityWrapperTest,tensorflow/tensorflow/python/util/object_identity_test.py,26,class, 12664,ObjectIdentitySetTest,tensorflow/tensorflow/python/util/object_identity_test.py,71,class, 12665,get_json_type,tensorflow/tensorflow/python/util/serialization.py,29,function,"Serializes any object to a JSON-serializable structure. Arguments: obj: the object to serialize Returns: JSON-serializable structure representing `obj`. Raises: TypeError: if `obj` cannot be serialized." 12666,SerializationTests,tensorflow/tensorflow/python/util/serialization_test.py,28,class, 12667,contextmanager,tensorflow/tensorflow/python/util/tf_contextlib.py,25,function,"A tf_decorator-aware wrapper for `contextlib.contextmanager`. Usage is identical to `contextlib.contextmanager`. Args: target: A callable to be wrapped in a contextmanager. Returns: A callable that can be used inside of a `with` statement." 12668,test_yield_append_before_and_after_yield,tensorflow/tensorflow/python/util/tf_contextlib_test.py,29,function, 12669,test_yield_return_x_plus_1,tensorflow/tensorflow/python/util/tf_contextlib_test.py,36,function, 12670,test_params_and_defaults,tensorflow/tensorflow/python/util/tf_contextlib_test.py,41,function, 12671,TfContextlibTest,tensorflow/tensorflow/python/util/tf_contextlib_test.py,45,class, 12672,make_decorator,tensorflow/tensorflow/python/util/tf_decorator.py,67,function,"Make a decorator from a wrapper and a target. Args: target: The final callable to be wrapped. decorator_func: The wrapper function. decorator_name: The name of the decorator. If `None`, the name of the function calling make_decorator. decorator_doc: Documentation specific to this application of `decorator_func` to `target`. decorator_argspec: The new callable signature of this decorator. Returns: The `decorator_func` argument with new metadata attached." 12673,_has_tf_decorator_attr,tensorflow/tensorflow/python/util/tf_decorator.py,114,function,"Checks if object has _tf_decorator attribute. This check would work for mocked object as well since it would check if returned attribute has the right type. Args: obj: Python object." 12674,rewrap,tensorflow/tensorflow/python/util/tf_decorator.py,128,function,"Injects a new target into a function built by make_decorator. This function allows replacing a function wrapped by `decorator_func`, assuming the decorator that wraps the function is written as described below. The decorator function must use `.__wrapped__` instead of the wrapped function that is normally used: Example: # Instead of this: def simple_parametrized_wrapper(*args, **kwds): return wrapped_fn(*args, **kwds) tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) # Write this: def simple_parametrized_wrapper(*args, **kwds): return simple_parametrized_wrapper.__wrapped__(*args, **kwds) tf_decorator.make_decorator(simple_parametrized_wrapper, wrapped_fn) Note that this process modifies decorator_func. Args: decorator_func: Callable returned by `wrap`. previous_target: Callable that needs to be replaced. new_target: Callable to replace previous_target with. Returns: The updated decorator. If decorator_func is not a tf_decorator, new_target is returned." 12675,unwrap,tensorflow/tensorflow/python/util/tf_decorator.py,200,function,"Unwraps an object into a list of TFDecorators and a final target. Args: maybe_tf_decorator: Any callable object. Returns: A tuple whose first element is an list of TFDecorator-derived objects that were applied to the final callable target, and whose second element is the final undecorated callable target. If the `maybe_tf_decorator` parameter is not decorated by any TFDecorators, the first tuple element will be an empty list. The `TFDecorator` list is ordered from outermost to innermost decorators." 12676,TFDecorator,tensorflow/tensorflow/python/util/tf_decorator.py,229,class,"Base class for all TensorFlow decorators. TFDecorator captures and exposes the wrapped target, and provides details about the current decorator." 12677,test_tfdecorator,tensorflow/tensorflow/python/util/tf_decorator_test.py,30,function, 12678,test_decorator_increment_first_int_arg,tensorflow/tensorflow/python/util/tf_decorator_test.py,38,function,This test decorator skips past `self` as args[0] in the bound case. 12679,test_injectable_decorator_square,tensorflow/tensorflow/python/util/tf_decorator_test.py,55,function, 12680,test_injectable_decorator_increment,tensorflow/tensorflow/python/util/tf_decorator_test.py,63,function, 12681,test_function,tensorflow/tensorflow/python/util/tf_decorator_test.py,71,function,Test Function Docstring. 12682,test_decorated_function,tensorflow/tensorflow/python/util/tf_decorator_test.py,79,function,Test Decorated Function Docstring. 12683,test_rewrappable_decorated,tensorflow/tensorflow/python/util/tf_decorator_test.py,86,function, 12684,TestDecoratedClass,tensorflow/tensorflow/python/util/tf_decorator_test.py,91,class,Test Decorated Class. 12685,TfDecoratorTest,tensorflow/tensorflow/python/util/tf_decorator_test.py,110,class, 12686,test_wrapper,tensorflow/tensorflow/python/util/tf_decorator_test.py,195,function, 12687,TfMakeDecoratorTest,tensorflow/tensorflow/python/util/tf_decorator_test.py,199,class, 12688,TfDecoratorRewrapTest,tensorflow/tensorflow/python/util/tf_decorator_test.py,275,class, 12689,TfDecoratorUnwrapTest,tensorflow/tensorflow/python/util/tf_decorator_test.py,299,class, 12690,SymbolAlreadyExposedError,tensorflow/tensorflow/python/util/tf_export.py,88,class,Raised when adding API names to symbol that already has API names. 12691,InvalidSymbolNameError,tensorflow/tensorflow/python/util/tf_export.py,93,class,Raised when trying to export symbol as an invalid or unallowed name. 12692,get_symbol_from_name,tensorflow/tensorflow/python/util/tf_export.py,100,function, 12693,get_canonical_name_for_symbol,tensorflow/tensorflow/python/util/tf_export.py,104,function,"Get canonical name for the API symbol. Args: symbol: API function or class. api_name: API name (tensorflow or estimator). add_prefix_to_v1_names: Specifies whether a name available only in V1 should be prefixed with compat.v1. Returns: Canonical name for the API symbol (for e.g. initializers.zeros) if canonical name could be determined. Otherwise, returns None." 12694,get_canonical_name,tensorflow/tensorflow/python/util/tf_export.py,142,function,"Get preferred endpoint name. Args: api_names: API names iterable. deprecated_api_names: Deprecated API names iterable. Returns: Returns one of the following in decreasing preference: - first non-deprecated endpoint - first endpoint - None" 12695,get_v1_names,tensorflow/tensorflow/python/util/tf_export.py,164,function,"Get a list of TF 1.* names for this symbol. Args: symbol: symbol to get API names for. Returns: List of all API names for this symbol including TensorFlow and Estimator names." 12696,get_v2_names,tensorflow/tensorflow/python/util/tf_export.py,190,function,"Get a list of TF 2.0 names for this symbol. Args: symbol: symbol to get API names for. Returns: List of all API names for this symbol including TensorFlow and Estimator names." 12697,get_v1_constants,tensorflow/tensorflow/python/util/tf_export.py,216,function,"Get a list of TF 1.* constants in this module. Args: module: TensorFlow module. Returns: List of all API constants under the given module including TensorFlow and Estimator constants." 12698,get_v2_constants,tensorflow/tensorflow/python/util/tf_export.py,237,function,"Get a list of TF 2.0 constants in this module. Args: module: TensorFlow module. Returns: List of all API constants under the given module including TensorFlow and Estimator constants." 12699,api_export,tensorflow/tensorflow/python/util/tf_export.py,258,class,Provides ways to export symbols to the TensorFlow API. 12700,kwarg_only,tensorflow/tensorflow/python/util/tf_export.py,394,function,A wrapper that throws away all non-kwarg arguments. 12701,_test_function,tensorflow/tensorflow/python/util/tf_export_test.py,29,function, 12702,_test_function2,tensorflow/tensorflow/python/util/tf_export_test.py,33,function, 12703,TestClassA,tensorflow/tensorflow/python/util/tf_export_test.py,37,class, 12704,TestClassB,tensorflow/tensorflow/python/util/tf_export_test.py,41,class, 12705,ValidateExportTest,tensorflow/tensorflow/python/util/tf_export_test.py,45,class,Tests for tf_export class. 12706,_convert_maybe_argspec_to_fullargspec,tensorflow/tensorflow/python/util/tf_inspect.py,40,function, 12707,_getargspec,tensorflow/tensorflow/python/util/tf_inspect.py,55,function,"A python3 version of getargspec. Calls `getfullargspec` and assigns args, varargs, varkw, and defaults to a python 2/3 compatible `ArgSpec`. The parameter name 'varkw' is changed to 'keywords' to fit the `ArgSpec` struct. Args: target: the target object to inspect. Returns: An ArgSpec with args, varargs, keywords, and defaults parameters from FullArgSpec." 12708,_getfullargspec,tensorflow/tensorflow/python/util/tf_inspect.py,81,function,"A python2 version of getfullargspec. Args: target: the target object to inspect. Returns: A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations." 12709,currentframe,tensorflow/tensorflow/python/util/tf_inspect.py,93,function,TFDecorator-aware replacement for inspect.currentframe. 12710,getargspec,tensorflow/tensorflow/python/util/tf_inspect.py,98,function,"TFDecorator-aware replacement for `inspect.getargspec`. Note: `getfullargspec` is recommended as the python 2/3 compatible replacement for this function. Args: obj: A function, partial function, or callable object, possibly decorated. Returns: The `ArgSpec` that describes the signature of the outermost decorator that changes the callable's signature, or the `ArgSpec` that describes the object if not decorated. Raises: ValueError: When callable's signature can not be expressed with ArgSpec. TypeError: For objects of unsupported types." 12711,_get_argspec_for_partial,tensorflow/tensorflow/python/util/tf_inspect.py,150,function,"Implements `getargspec` for `functools.partial` objects. Args: obj: The `functools.partial` object Returns: An `inspect.ArgSpec` Raises: ValueError: When callable's signature can not be expressed with ArgSpec." 12712,getfullargspec,tensorflow/tensorflow/python/util/tf_inspect.py,238,function,"TFDecorator-aware replacement for `inspect.getfullargspec`. This wrapper emulates `inspect.getfullargspec` in[^)]* Python2. Args: obj: A callable, possibly decorated. Returns: The `FullArgSpec` that describes the signature of the outermost decorator that changes the callable's signature. If the callable is not decorated, `inspect.getfullargspec()` will be called directly on the callable." 12713,getcallargs,tensorflow/tensorflow/python/util/tf_inspect.py,260,function,"TFDecorator-aware replacement for inspect.getcallargs. Args: *func_and_positional: A callable, possibly decorated, followed by any positional arguments that would be passed to `func`. **named: The named argument dictionary that would be passed to `func`. Returns: A dictionary mapping `func`'s named arguments to the values they would receive if `func(*positional, **named)` were called. `getcallargs` will use the argspec from the outermost decorator that provides it. If no attached decorators modify argspec, the final unwrapped target's argspec will be used." 12714,getframeinfo,tensorflow/tensorflow/python/util/tf_inspect.py,297,function, 12715,getdoc,tensorflow/tensorflow/python/util/tf_inspect.py,301,function,"TFDecorator-aware replacement for inspect.getdoc. Args: object: An object, possibly decorated. Returns: The docstring associated with the object. The outermost-decorated object is intended to have the most complete documentation, so the decorated parameter is not unwrapped." 12716,getfile,tensorflow/tensorflow/python/util/tf_inspect.py,316,function,TFDecorator-aware replacement for inspect.getfile. 12717,getmembers,tensorflow/tensorflow/python/util/tf_inspect.py,330,function,TFDecorator-aware replacement for inspect.getmembers. 12718,getmodule,tensorflow/tensorflow/python/util/tf_inspect.py,335,function,TFDecorator-aware replacement for inspect.getmodule. 12719,getmro,tensorflow/tensorflow/python/util/tf_inspect.py,340,function,TFDecorator-aware replacement for inspect.getmro. 12720,getsource,tensorflow/tensorflow/python/util/tf_inspect.py,345,function,TFDecorator-aware replacement for inspect.getsource. 12721,getsourcefile,tensorflow/tensorflow/python/util/tf_inspect.py,350,function,TFDecorator-aware replacement for inspect.getsourcefile. 12722,getsourcelines,tensorflow/tensorflow/python/util/tf_inspect.py,355,function,TFDecorator-aware replacement for inspect.getsourcelines. 12723,isbuiltin,tensorflow/tensorflow/python/util/tf_inspect.py,360,function,TFDecorator-aware replacement for inspect.isbuiltin. 12724,isclass,tensorflow/tensorflow/python/util/tf_inspect.py,365,function,TFDecorator-aware replacement for inspect.isclass. 12725,isfunction,tensorflow/tensorflow/python/util/tf_inspect.py,370,function,TFDecorator-aware replacement for inspect.isfunction. 12726,isframe,tensorflow/tensorflow/python/util/tf_inspect.py,375,function,TFDecorator-aware replacement for inspect.ismodule. 12727,isgenerator,tensorflow/tensorflow/python/util/tf_inspect.py,380,function,TFDecorator-aware replacement for inspect.isgenerator. 12728,isgeneratorfunction,tensorflow/tensorflow/python/util/tf_inspect.py,385,function,TFDecorator-aware replacement for inspect.isgeneratorfunction. 12729,ismethod,tensorflow/tensorflow/python/util/tf_inspect.py,390,function,TFDecorator-aware replacement for inspect.ismethod. 12730,ismodule,tensorflow/tensorflow/python/util/tf_inspect.py,395,function,TFDecorator-aware replacement for inspect.ismodule. 12731,isroutine,tensorflow/tensorflow/python/util/tf_inspect.py,400,function,TFDecorator-aware replacement for inspect.isroutine. 12732,stack,tensorflow/tensorflow/python/util/tf_inspect.py,405,function,TFDecorator-aware replacement for inspect.stack. 12733,test_decorator,tensorflow/tensorflow/python/util/tf_inspect_test.py,31,function, 12734,test_undecorated_function,tensorflow/tensorflow/python/util/tf_inspect_test.py,39,function, 12735,test_decorated_function,tensorflow/tensorflow/python/util/tf_inspect_test.py,46,function,Test Decorated Function Docstring. 12736,test_decorated_function_with_defaults,tensorflow/tensorflow/python/util/tf_inspect_test.py,52,function,Test Decorated Function With Defaults Docstring. 12737,TestDecoratedClass,tensorflow/tensorflow/python/util/tf_inspect_test.py,58,class,Test Decorated Class. 12738,TfInspectTest,tensorflow/tensorflow/python/util/tf_inspect_test.py,68,class, 12739,TfInspectGetCallArgsTest,tensorflow/tensorflow/python/util/tf_inspect_test.py,588,class, 12740,_TFShouldUseHelper,tensorflow/tensorflow/python/util/tf_should_use.py,32,class,"Object stored in TFShouldUse-wrapped objects. When it is deleted it will emit a warning or error if its `sate` method has not been called by time of deletion, and Tensorflow is not executing eagerly or inside a tf.function (which use autodeps and resolve the main issues this wrapper warns about)." 12741,_new__init__,tensorflow/tensorflow/python/util/tf_should_use.py,96,function, 12742,_new__setattr__,tensorflow/tensorflow/python/util/tf_should_use.py,102,function, 12743,_new__getattribute__,tensorflow/tensorflow/python/util/tf_should_use.py,110,function, 12744,_new_mark_used,tensorflow/tensorflow/python/util/tf_should_use.py,119,function, 12745,_get_wrapper,tensorflow/tensorflow/python/util/tf_should_use.py,133,function,"Create a wrapper for object x, whose class subclasses type(x). The wrapper will emit a warning if it is deleted without any of its properties being accessed or methods being called. Args: x: The instance to wrap. tf_should_use_helper: The object that tracks usage. Returns: An object wrapping `x`, of type `type(x)`." 12746,_add_should_use_warning,tensorflow/tensorflow/python/util/tf_should_use.py,175,function,"Wraps object x so that if it is never used, a warning is logged. Args: x: Python object. error_in_function: Python bool. If `True`, a `RuntimeError` is raised if the returned value is never used when created during `tf.function` tracing. warn_in_eager: Python bool. If `True` raise warning if in Eager mode as well as graph mode. Returns: An instance of `TFShouldUseWarningWrapper` which subclasses `type(x)` and is a very shallow wrapper for `x` which logs access into `x`." 12747,should_use_result,tensorflow/tensorflow/python/util/tf_should_use.py,216,function,"Function wrapper that ensures the function's output is used. If the output is not used, a `logging.error` is logged. If `error_in_function` is set, then a `RuntimeError` will be raised at the end of function tracing if the output is not used by that point. An output is marked as used if any of its attributes are read, modified, or updated. Examples when the output is a `Tensor` include: - Using it in any capacity (e.g. `y = t + 0`, `sess.run(t)`) - Accessing a property (e.g. getting `t.name` or `t.op`). - Calling `t.mark_used()`. Note, certain behaviors cannot be tracked - for these the object may not be marked as used. Examples include: - `t != 0`. In this case, comparison is done on types / ids. - `isinstance(t, tf.Tensor)`. Similar to above. Args: fn: The function to wrap. warn_in_eager: Whether to create warnings in Eager as well. error_in_function: Whether to raise an error when creating a tf.function. Returns: The wrapped function." 12748,reroute_error,tensorflow/tensorflow/python/util/tf_should_use_test.py,36,function,Temporarily reroute errors written to tf_logging.error into `captured`. 12749,TfShouldUseTest,tensorflow/tensorflow/python/util/tf_should_use_test.py,42,class, 12750,StackTraceTransform,tensorflow/tensorflow/python/util/tf_stack.py,47,class,Base class for stack trace transformation functions. 12751,StackTraceMapper,tensorflow/tensorflow/python/util/tf_stack.py,79,class,Allows remapping traceback information to different source code. 12752,StackTraceFilter,tensorflow/tensorflow/python/util/tf_stack.py,91,class,Allows filtering traceback information by removing superfluous frames. 12753,CurrentModuleFilter,tensorflow/tensorflow/python/util/tf_stack.py,102,class,Filters stack frames from the module where this is used (best effort). 12754,extract_stack,tensorflow/tensorflow/python/util/tf_stack.py,131,function,"A lightweight, extensible re-implementation of traceback.extract_stack. NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for each stack frame using linecache, which results in an abundance of stat() calls. This implementation does not retrieve the code, and any consumer should apply _convert_stack to the result to obtain a traceback that can be formatted etc. using traceback methods. Args: limit: A limit on the number of frames to return. Returns: A sequence of FrameSummary objects (filename, lineno, name, line) corresponding to the call stack of the current thread." 12755,TFStackTest,tensorflow/tensorflow/python/util/tf_stack_test.py,27,class, 12756,extract_stack,tensorflow/tensorflow/python/util/tf_stack_test.py,56,function, 12757,convert_stack_frame,tensorflow/tensorflow/python/util/tf_stack_test.py,61,function,Converts a TF stack frame into Python's. 12758,assertProtoEqual,tensorflow/tensorflow/python/util/protobuf/compare.py,77,function,"Fails with a useful error if a and b aren't equal. Comparison of repeated fields matches the semantics of unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter. Args: self: googletest.TestCase a: proto2 PB instance, or text string representing one. b: proto2 PB instance -- message.Message or subclass thereof. check_initialized: boolean, whether to fail if either a or b isn't initialized. normalize_numbers: boolean, whether to normalize types and precision of numbers before comparison. msg: if specified, is used as the error message on failure." 12759,NormalizeNumberFields,tensorflow/tensorflow/python/util/protobuf/compare.py,121,function,"Normalizes types and precisions of number fields in a protocol buffer. Due to subtleties in the python protocol buffer implementation, it is possible for values to have different types and precision depending on whether they were set and retrieved directly or deserialized from a protobuf. This function normalizes integer values to ints and longs based on width, 32-bit floats to five digits of precision to account for python always storing them as 64-bit, and ensures doubles are floating point for when they're set to integers. Modifies pb in place. Recurses into nested objects. Args: pb: proto2 message. Returns: the given pb, modified in place." 12760,_IsMap,tensorflow/tensorflow/python/util/protobuf/compare.py,189,function, 12761,_IsRepeatedContainer,tensorflow/tensorflow/python/util/protobuf/compare.py,193,function, 12762,ProtoEq,tensorflow/tensorflow/python/util/protobuf/compare.py,203,function,"Compares two proto2 objects for equality. Recurses into nested messages. Uses list (not set) semantics for comparing repeated fields, ie duplicates and order matter. Args: a: A proto2 message or a primitive. b: A proto2 message or a primitive. Returns: `True` if the messages are equal." 12763,ProtoAssertions,tensorflow/tensorflow/python/util/protobuf/compare.py,258,class,"Mix this into a googletest.TestCase class to get proto2 assertions. Usage: class SomeTestCase(compare.ProtoAssertions, googletest.TestCase): ... def testSomething(self): ... self.assertProtoEqual(a, b) See module-level definitions for method documentation." 12764,LargePbs,tensorflow/tensorflow/python/util/protobuf/compare_test.py,34,function,Converts ASCII string Large PBs to messages. 12765,ProtoEqTest,tensorflow/tensorflow/python/util/protobuf/compare_test.py,45,class, 12766,NormalizeNumbersTest,tensorflow/tensorflow/python/util/protobuf/compare_test.py,211,class,Tests for NormalizeNumberFields(). 12767,AssertTest,tensorflow/tensorflow/python/util/protobuf/compare_test.py,270,class,Tests assertProtoEqual(). 12768,MixinTests,tensorflow/tensorflow/python/util/protobuf/compare_test.py,476,class, 12769,_SkipMember,tensorflow/tensorflow/tools/api/lib/python_object_to_proto_visitor.py,79,function, 12770,_SkipMember,tensorflow/tensorflow/tools/api/lib/python_object_to_proto_visitor.py,94,function, 12771,_NormalizeType,tensorflow/tensorflow/tools/api/lib/python_object_to_proto_visitor.py,111,function, 12772,_NormalizeIsInstance,tensorflow/tensorflow/tools/api/lib/python_object_to_proto_visitor.py,115,function, 12773,_SanitizedArgSpec,tensorflow/tensorflow/tools/api/lib/python_object_to_proto_visitor.py,119,function,"Get an ArgSpec string that is free of addresses. We have callables as function arg defaults. This results in addresses in getargspec output. This function returns a sanitized string list of base classes. Args: obj: A python routine for us the create the sanitized arspec of. Returns: string, a string representation of the argspec." 12774,_SanitizedMRO,tensorflow/tensorflow/tools/api/lib/python_object_to_proto_visitor.py,157,function,"Get a list of superclasses with minimal amount of non-TF classes. Based on many parameters like python version, OS, protobuf implementation or changes in google core libraries the list of superclasses of a class can change. We only return the first non-TF class to be robust to non API affecting changes. The Method Resolution Order returned by `tf_inspect.getmro` is still maintained in the return value. Args: obj: A python routine for us the create the sanitized arspec of. Returns: list of strings, string representation of the class names." 12775,_IsProtoClass,tensorflow/tensorflow/tools/api/lib/python_object_to_proto_visitor.py,191,function,Returns whether the passed obj is a Protocol Buffer class. 12776,PythonObjectToProtoVisitor,tensorflow/tensorflow/tools/api/lib/python_object_to_proto_visitor.py,196,class,A visitor that summarizes given python objects as protobufs. 12777,_InitPathConstants,tensorflow/tensorflow/tools/api/tests/api_compatibility_test.py,86,function, 12778,_KeyToFilePath,tensorflow/tensorflow/tools/api/tests/api_compatibility_test.py,114,function,"From a given key, construct a filepath. Filepath will be inside golden folder for api_version. Args: key: a string used to determine the file path api_version: a number indicating the tensorflow API version, e.g. 1 or 2. Returns: A string of file path to the pbtxt file which describes the public API" 12779,_FileNameToKey,tensorflow/tensorflow/tools/api/tests/api_compatibility_test.py,145,function,"From a given filename, construct a key we use for api objects." 12780,_VerifyNoSubclassOfMessageVisitor,tensorflow/tensorflow/tools/api/tests/api_compatibility_test.py,159,function,A Visitor that crashes on subclasses of generated proto classes. 12781,_FilterNonCoreGoldenFiles,tensorflow/tensorflow/tools/api/tests/api_compatibility_test.py,173,function,Filter out non-core API pbtxt files. 12782,_FilterGoldenProtoDict,tensorflow/tensorflow/tools/api/tests/api_compatibility_test.py,186,function,Filter out golden proto dict symbols that should be omitted. 12783,_GetTFNumpyGoldenPattern,tensorflow/tensorflow/tools/api/tests/api_compatibility_test.py,209,function, 12784,ApiCompatibilityTest,tensorflow/tensorflow/tools/api/tests/api_compatibility_test.py,215,class, 12785,ModuleTest,tensorflow/tensorflow/tools/api/tests/module_test.py,30,class, 12786,write_build_info,tensorflow/tensorflow/tools/build_info/gen_build_info.py,32,function,"Writes a Python that describes the build. Args: filename: filename to write to. key_value_list: A list of ""key=value"" strings that will be added to the module's ""build_info"" dictionary as additional entries." 12787,check_existence,tensorflow/tensorflow/tools/ci_build/copy_binary.py,40,function,Check the existence of file or dir. 12788,copy_binary,tensorflow/tensorflow/tools/ci_build/copy_binary.py,46,function,"Rename and copy binaries for different python versions. Arguments: directory: string of directory origin_tag: str of the old python version tag new_tag: str of the new tag version: the version of the package package: str, name of the package" 12789,main,tensorflow/tensorflow/tools/ci_build/copy_binary.py,94,function,"This script copies binaries. Requirements: filename: The path to the whl file AND new_py_ver: Create a nightly tag with current date Raises: RuntimeError: If the whl file was not found" 12790,check_existence,tensorflow/tensorflow/tools/ci_build/update_version.py,46,function,Check the existence of file or dir. 12791,check_all_files,tensorflow/tensorflow/tools/ci_build/update_version.py,53,function,Check all relevant files necessary for upgrade. 12792,replace_string_in_line,tensorflow/tensorflow/tools/ci_build/update_version.py,59,function,Replace with sed when regex is required. 12793,Version,tensorflow/tensorflow/tools/ci_build/update_version.py,67,class,Version class object that stores SemVer version information. 12794,get_current_semver_version,tensorflow/tensorflow/tools/ci_build/update_version.py,146,function,"Returns a Version object of current version. Returns: version: Version object of current SemVer string based on information from core/public/version.h" 12795,update_version_h,tensorflow/tensorflow/tools/ci_build/update_version.py,183,function,Update tensorflow/core/public/version.h. 12796,update_setup_dot_py,tensorflow/tensorflow/tools/ci_build/update_version.py,200,function,Update setup.py. 12797,update_readme,tensorflow/tensorflow/tools/ci_build/update_version.py,206,function,Update README. 12798,update_tensorflow_bzl,tensorflow/tensorflow/tools/ci_build/update_version.py,214,function,Update tensorflow.bzl. 12799,major_minor_change,tensorflow/tensorflow/tools/ci_build/update_version.py,224,function,Check if a major or minor change occurred. 12800,check_for_lingering_string,tensorflow/tensorflow/tools/ci_build/update_version.py,233,function,Check for given lingering strings. 12801,check_for_old_version,tensorflow/tensorflow/tools/ci_build/update_version.py,255,function,Check for old version references. 12802,main,tensorflow/tensorflow/tools/ci_build/update_version.py,265,function,"This script updates all instances of version in the tensorflow directory. Requirements: version: The version tag OR nightly: Create a nightly tag with current date Raises: RuntimeError: If the script is not being run from tf source dir" 12803,IntelPlatform,tensorflow/tensorflow/tools/ci_build/linux/mkl/set-build-env.py,34,class, 12804,NehalemPlatform,tensorflow/tensorflow/tools/ci_build/linux/mkl/set-build-env.py,89,class, 12805,SandyBridgePlatform,tensorflow/tensorflow/tools/ci_build/linux/mkl/set-build-env.py,105,class, 12806,HaswellPlatform,tensorflow/tensorflow/tools/ci_build/linux/mkl/set-build-env.py,121,class, 12807,SkylakePlatform,tensorflow/tensorflow/tools/ci_build/linux/mkl/set-build-env.py,140,class, 12808,CascadelakePlatform,tensorflow/tensorflow/tools/ci_build/linux/mkl/set-build-env.py,163,class, 12809,BuildEnvSetter,tensorflow/tensorflow/tools/ci_build/linux/mkl/set-build-env.py,183,class,Prepares the proper environment settings for various Intel platforms. 12810,PublicAPIVisitor,tensorflow/tensorflow/tools/common/public_api.py,29,class,Visitor to use with `traverse` to visit exactly the public TF API. 12811,PublicApiTest,tensorflow/tensorflow/tools/common/public_api_test.py,25,class, 12812,ModuleClass1,tensorflow/tensorflow/tools/common/test_module1.py,24,class, 12813,ModuleClass2,tensorflow/tensorflow/tools/common/test_module2.py,22,class, 12814,_traverse_internal,tensorflow/tensorflow/tools/common/traverse.py,32,function,Internal helper for traverse. 12815,traverse,tensorflow/tensorflow/tools/common/traverse.py,77,function,"Recursively enumerate all members of `root`. Similar to the Python library function `os.path.walk`. Traverses the tree of Python objects starting with `root`, depth first. Parent-child relationships in the tree are defined by membership in modules or classes. The function `visit` is called with arguments `(path, parent, children)` for each module or class `parent` found in the tree of python objects starting with `root`. `path` is a string containing the name with which `parent` is reachable from the current context. For example, if `root` is a local class called `X` which contains a class `Y`, `visit` will be called with `('Y', X.Y, children)`). If `root` is not a module or class, `visit` is never called. `traverse` never descends into built-in modules. `children`, a list of `(name, object)` pairs are determined by `tf_inspect.getmembers`. To avoid visiting parts of the tree, `children` can be modified in place, using `del` or slice assignment. Cycles (determined by reference equality, `is`) stop the traversal. A stack of objects is kept to find cycles. Objects forming cycles may appear in `children`, but `visit` will not be called with any object as `parent` which is already in the stack. Traversing system modules can take a long time, it is advisable to pass a `visit` callable which denylists such modules. Args: root: A python object with which to start the traversal. visit: A function taking arguments `(path, parent, children)`. Will be called for each object found in the traversal." 12816,TestVisitor,tensorflow/tensorflow/tools/common/traverse_test.py,27,class, 12817,TraverseTest,tensorflow/tensorflow/tools/common/traverse_test.py,36,class, 12818,add_contrib_direct_import_support,tensorflow/tensorflow/tools/compatibility/all_renames_v2.py,560,function,Add support for `tf.contrib.*` alias `contrib_*.` Updates dict in place. 12819,AllRenamesV2Test,tensorflow/tensorflow/tools/compatibility/all_renames_v2_test.py,28,class, 12820,full_name_node,tensorflow/tensorflow/tools/compatibility/ast_edits.py,49,function,"Make an Attribute or Name node for name. Translate a qualified name into nested Attribute nodes (and a Name node). Args: name: The name to translate to a node. ctx: What context this name is used in. Defaults to Load() Returns: A Name or Attribute node." 12821,get_arg_value,tensorflow/tensorflow/tools/compatibility/ast_edits.py,72,function,"Get the value of an argument from a ast.Call node. This function goes through the positional and keyword arguments to check whether a given argument was used, and if so, returns its value (the node representing its value). This cannot introspect *args or **args, but it safely handles *args in Python3.5+. Args: node: The ast.Call node to extract arg values from. arg_name: The name of the argument to extract. arg_pos: The position of the argument (in case it's passed as a positional argument). Returns: A tuple (arg_present, arg_value) containing a boolean indicating whether the argument is present, and its value in case it is." 12822,uses_star_args_in_call,tensorflow/tensorflow/tools/compatibility/ast_edits.py,111,function,"Check if an ast.Call node uses arbitrary-length positional *args. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args or keyword args. False if it does not." 12823,uses_star_kwargs_in_call,tensorflow/tensorflow/tools/compatibility/ast_edits.py,135,function,"Check if an ast.Call node uses arbitrary-length **kwargs. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args or keyword args. False if it does not." 12824,uses_star_args_or_kwargs_in_call,tensorflow/tensorflow/tools/compatibility/ast_edits.py,159,function,"Check if an ast.Call node uses arbitrary-length *args or **kwargs. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args or keyword args. False if it does not." 12825,excluded_from_module_rename,tensorflow/tensorflow/tools/compatibility/ast_edits.py,175,function,"Check if this module import should not be renamed. Args: module: (string) module name. import_rename_spec: ImportRename instance. Returns: True if this import should not be renamed according to the import_rename_spec." 12826,APIChangeSpec,tensorflow/tensorflow/tools/compatibility/ast_edits.py,192,class,"This class defines the transformations that need to happen. This class must provide the following fields: * `function_keyword_renames`: maps function names to a map of old -> new argument names * `symbol_renames`: maps function names to new function names * `change_to_function`: a set of function names that have changed (for notifications) * `function_reorders`: maps functions whose argument order has changed to the list of arguments in the new order * `function_warnings`: maps full names of functions to warnings that will be printed out if the function is used. (e.g. tf.nn.convolution()) * `function_transformers`: maps function names to custom handlers * `module_deprecations`: maps module names to warnings that will be printed if the module is still used after all other transformations have run * `import_renames`: maps import name (must be a short name without '.') to ImportRename instance. For an example, see `TFAPIChangeSpec`." 12827,NoUpdateSpec,tensorflow/tensorflow/tools/compatibility/ast_edits.py,227,class,A specification of an API change which doesn't change anything. 12828,_PastaEditVisitor,tensorflow/tensorflow/tools/compatibility/ast_edits.py,242,class,"AST Visitor that processes function calls. Updates function calls from old API version to new API version using a given change spec." 12829,AnalysisResult,tensorflow/tensorflow/tools/compatibility/ast_edits.py,787,class,"This class represents an analysis result and how it should be logged. This class must provide the following fields: * `log_level`: The log level to which this detection should be logged * `log_message`: The message that should be logged for this detection For an example, see `VersionedTFImport`." 12830,APIAnalysisSpec,tensorflow/tensorflow/tools/compatibility/ast_edits.py,799,class,"This class defines how `AnalysisResult`s should be generated. It specifies how to map imports and symbols to `AnalysisResult`s. This class must provide the following fields: * `symbols_to_detect`: maps function names to `AnalysisResult`s * `imports_to_detect`: maps imports represented as (full module name, alias) tuples to `AnalysisResult`s notifications) For an example, see `TFAPIImportAnalysisSpec`." 12831,PastaAnalyzeVisitor,tensorflow/tensorflow/tools/compatibility/ast_edits.py,815,class,"AST Visitor that looks for specific API usage without editing anything. This is used before any rewriting is done to detect if any symbols are used that require changing imports or disabling rewriting altogether." 12832,ASTCodeUpgrader,tensorflow/tensorflow/tools/compatibility/ast_edits.py,893,class,Handles upgrading a set of Python files using a given API change spec. 12833,ModuleDeprecationSpec,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,57,class,A specification which deprecates 'a.b'. 12834,RenameKeywordSpec,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,65,class,"A specification where kw2 gets renamed to kw3. The new API is def f(a, b, kw1, kw3): ..." 12835,ReorderKeywordSpec,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,82,class,"A specification where kw2 gets moved in front of kw1. The new API is def f(a, b, kw2, kw1): ..." 12836,ReorderAndRenameKeywordSpec,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,100,class,"A specification where kw2 gets moved in front of kw1 and is changed to kw3. The new API is def f(a, b, kw3, kw1): ..." 12837,RemoveDeprecatedAliasKeyword,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,116,class,"A specification where kw1_alias is removed in g. The new API is def g(a, b, kw1, c): ... def g2(a, b, kw1, c, d): ..." 12838,RemoveDeprecatedAliasAndReorderRest,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,132,class,"A specification where kw1_alias is removed in g. The new API is def g(a, b, c, kw1): ... def g2(a, b, c, d, kw1): ..." 12839,RemoveMultipleKeywordArguments,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,149,class,"A specification where both keyword aliases are removed from h. The new API is def h(a, kw1, kw2): ..." 12840,RenameImports,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,166,class,Specification for renaming imports. 12841,TestAstEdits,tensorflow/tensorflow/tools/compatibility/ast_edits_test.py,178,class, 12842,is_python,tensorflow/tensorflow/tools/compatibility/ipynb.py,33,function,Checks if the cell consists of Python code. 12843,process_file,tensorflow/tensorflow/tools/compatibility/ipynb.py,41,function,The function where we inject the support for ipynb upgrade. 12844,skip_magic,tensorflow/tensorflow/tools/compatibility/ipynb.py,71,function,"Checks if the cell has magic, that is not Python-based. Args: code_line: A line of Python code magic_list: A list of jupyter ""magic"" exceptions Returns: If the line jupyter ""magic"" line, not Python line >>> skip_magic('!ls -laF', ['%', '!', '?']) True" 12845,check_line_split,tensorflow/tensorflow/tools/compatibility/ipynb.py,92,function,"Checks if a line was split with `\`. Args: code_line: A line of Python code Returns: If the line was split with `\` >>> skip_magic(""!gcloud ml-engine models create ${MODEL} \\\n"") True" 12846,_get_code,tensorflow/tensorflow/tools/compatibility/ipynb.py,108,function,Loads the ipynb file and returns a list of CodeLines. 12847,_update_notebook,tensorflow/tensorflow/tools/compatibility/ipynb.py,155,function,"Updates notebook, once migration is done." 12848,TFAPIChangeSpec,tensorflow/tensorflow/tools/compatibility/tf_upgrade.py,29,class,List of maps that describe what changed in the API. 12849,TestUpgrade,tensorflow/tensorflow/tools/compatibility/tf_upgrade_test.py,31,class,"Test various APIs that have been changed in 1.0. We also test whether a converted file is executable. test_file_v0_11.py aims to exhaustively test that API changes are convertible and actually work when run with current TensorFlow." 12850,TestUpgradeFiles,tensorflow/tensorflow/tools/compatibility/tf_upgrade_test.py,138,class, 12851,UnaliasedTFImport,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,39,class, 12852,VersionedTFImport,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,48,class, 12853,TFAPIImportAnalysisSpec,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,61,class, 12854,CompatV1ImportReplacer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,72,class,"AST Visitor that replaces `import tensorflow.compat.v1 as tf`. Converts `import tensorflow.compat.v1 as tf` to `import tensorflow as tf`" 12855,TFAPIChangeSpec,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,92,class,List of maps that describe what changed in the API. 12856,_is_ast_str,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1678,function,Determine whether this node represents a string. 12857,_is_ast_true,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1690,function, 12858,_is_ast_false,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1697,function, 12859,_rename_if_arg_found_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1708,function,"Replaces the given call with tf.compat.v1 if the given arg is found. This requires the function to be called with all named args, so for using this transformer, the function should also be added to renames. If the arg is not found, the call site is left alone. If the arg is found, and if arg_ok_predicate is given, it is called with the ast Expression representing the argument value found. If it returns True, the function is left alone. If the arg is found, arg_ok_predicate is not None and returns ok, and remove_if_ok is True, the argument is removed from the call. Otherwise, `compat.v1` is inserted between tf and the function name. Args: parent: Parent of node. node: ast.Call node to maybe modify. full_name: full name of function to modify name: name of function to modify logs: list of logs to append to arg_name: name of the argument to look for arg_ok_predicate: predicate callable with the ast of the argument value, returns whether the argument value is allowed. remove_if_ok: remove the argument if present and ok as determined by arg_ok_predicate. message: message to print if a non-ok arg is found (and hence, the function is renamed to its compat.v1 version). Returns: node, if it was modified, else None." 12860,_add_argument_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1777,function,Adds an argument (as a final kwarg arg_name=arg_value_ast). 12861,_iterator_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1789,function,Transform iterator methods to compat function calls. 12862,_dropout_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1815,function,Replace keep_prob with 1-rate. 12863,_cast_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1856,function,"Transforms to_int and to_float to cast(..., dtype=...)." 12864,_softmax_cross_entropy_with_logits_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1895,function,Wrap labels argument with stop_gradients. 12865,_image_resize_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1934,function,"Transforms image.resize_* to image.resize(..., method=*, ...)." 12866,_pool_seed_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,1982,function,"Removes seed2 and deterministic, and adds non-zero seed if needed." 12867,_extract_glimpse_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2032,function, 12868,_add_summary_step_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2069,function,"Adds a step argument to the summary API call if not specified. The inserted argument value is tf.compat.v1.train.get_or_create_global_step()." 12869,_add_summary_recording_cond_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2088,function,"Adds cond argument to tf.contrib.summary.xxx_record_summaries(). This is in anticipation of them being renamed to tf.summary.record_if(), which requires the cond argument." 12870,_add_loss_reduction_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2103,function,"Adds a loss_reduction argument if not specified. Default value for tf.estimator.*Classifier and tf.estimator.*Regressor loss_reduction argument changed to SUM_OVER_BATCH_SIZE. So, we update existing calls to use the old default value `tf.keras.losses.Reduction.SUM`. Note: to apply this transformation, symbol must be added to reordered_function_names above." 12871,_rename_if_any_arg_found_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2128,function,"Replaces the given call with tf.compat.v1 if any of the arg_names is found. Args: parent: Parent of node. node: ast.Call node to modify. full_name: full name of function to modify. name: name of function to modify. logs: list of logs to append to. arg_names: list of names of the argument to look for. arg_ok_predicate: predicate callable with the ast of the argument value, returns whether the argument value is allowed. remove_if_ok: remove the argument if present and ok as determined by arg_ok_predicate. message: message to print if a non-ok arg is found (and hence, the function is renamed to its compat.v1 version). Returns: node, if it was modified, else None." 12872,_rename_if_arg_found_and_add_loss_reduction_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2167,function,"Combination of _rename_if_arg_found and _add_loss_reduction transformers. Args: parent: Parent of node. node: ast.Call node to maybe modify. full_name: full name of function to modify name: name of function to modify logs: list of logs to append to arg_names: list of names of the argument to look for arg_ok_predicate: predicate callable with the ast of the argument value, returns whether the argument value is allowed. remove_if_ok: remove the argument if present and ok as determined by arg_ok_predicate. message: message to print if a non-ok arg is found (and hence, the function is renamed to its compat.v1 version). Returns: node, if it was modified, else None." 12873,_add_uniform_scaling_initializer_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2208,function,"Updates references to uniform_unit_scaling_initializer. Transforms: tf.uniform_unit_scaling_initializer(factor, seed, dtype) to tf.compat.v1.keras.initializers.VarianceScaling( scale=factor, distribution=""uniform"", seed=seed) Note: to apply this transformation, symbol must be added to reordered_function_names above." 12874,_contrib_layers_xavier_initializer_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2238,function,"Updates references to contrib.layers.xavier_initializer. Transforms: tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to tf.compat.v1.keras.initializers.VarianceScaling( scale=1.0, mode=""fan_avg"", distribution=(""uniform"" if uniform else ""truncated_normal""), seed=seed, dtype=dtype) Returns: The new node" 12875,_contrib_layers_variance_scaling_initializer_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2316,function,"Updates references to contrib.layers.variance_scaling_initializer. Transforms: tf.contrib.layers.variance_scaling_initializer( factor, mode, uniform, seed, dtype ) to tf.compat.v1.keras.initializers.VarianceScaling( scale=factor, mode=mode.lower(), distribution=(""uniform"" if uniform else ""truncated_normal""), seed=seed, dtype=dtype) And handles the case where no factor is provided and scale needs to be set to 2.0 to match contrib's default instead of tf.keras.initializer's default of 1.0" 12876,_contrib_layers_l1_regularizer_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2402,function,"Replace slim l1 regularizer with Keras one. This entails renaming the 'scale' arg to 'l' and dropping any provided scope arg." 12877,_contrib_layers_l2_regularizer_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2441,function,"Replace slim l2 regularizer with Keras one, with l=0.5*scale. Also drops the scope argument." 12878,_name_scope_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2501,function,Fix name scope invocation to use 'default_name' and omit 'values' args. 12879,_rename_to_compat_v1,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2539,function, 12880,_rename_func,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2544,function, 12881,_string_split_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2553,function,"Update tf.string_split arguments: skip_empty, sep, result_type, source." 12882,_string_split_rtype_transformer,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2.py,2596,function,"Update tf.strings.split arguments: result_type, source." 12883,process_file,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_main.py,42,function,Process a file of type `.py` or `.ipynb`. 12884,main,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_main.py,58,function, 12885,TFAPIChangeSpec,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_safety.py,26,class,List of maps that describe what changed in the API. 12886,TfUpgradeV2SafetyTest,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_safety_test.py,28,class, 12887,testTensorFlowDontChangeContrib,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_safety_test.py,176,function, 12888,test_contrib_to_addons_move,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_safety_test.py,185,function, 12889,get_symbol_for_name,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_test.py,42,function, 12890,get_args,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_test.py,51,function, 12891,get_func_and_args_from_str,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_test.py,60,function,"Parse call string to get function and argument names. Args: call_str: Call string must be in the form: `tf.foo(arg1=val1, arg2=val2, ...)`. Returns: (function_name, list of arg names) tuple." 12892,TestUpgrade,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_test.py,81,class,"Test various APIs that have been changed in 2.0. We also test whether a converted file is executable. test_file_v1_10.py aims to exhaustively test that API changes are convertible and actually work when run with current TensorFlow." 12893,TestUpgradeFiles,tensorflow/tensorflow/tools/compatibility/tf_upgrade_v2_test.py,2388,class, 12894,TestUpgrade,tensorflow/tensorflow/tools/compatibility/testdata/test_file_v0_11.py,29,class,"Test various APIs that have been changed in 1.0. This test will not run in current TensorFlow, but did run in 0.11. This file is intended to be converted by a genrule() that uses the converter so that a 1.0 compatible version of this file is generated. That is run as a unit test if the converter is successful." 12895,TestUpgrade,tensorflow/tensorflow/tools/compatibility/testdata/test_file_v1_12.py,28,class,Test various APIs that have been changed in 2.0. 12896,get_canonical_name,tensorflow/tensorflow/tools/compatibility/update/generate_v2_renames_map.py,72,function, 12897,get_all_v2_names,tensorflow/tensorflow/tools/compatibility/update/generate_v2_renames_map.py,78,function,Get a set of function/class names available in TensorFlow 2.0. 12898,collect_constant_renames,tensorflow/tensorflow/tools/compatibility/update/generate_v2_renames_map.py,97,function,"Looks for constants that need to be renamed in TF 2.0. Returns: Set of tuples of the form (current name, new name)." 12899,collect_function_renames,tensorflow/tensorflow/tools/compatibility/update/generate_v2_renames_map.py,128,function,"Looks for functions/classes that need to be renamed in TF 2.0. Returns: Set of tuples of the form (current name, new name)." 12900,get_rename_line,tensorflow/tensorflow/tools/compatibility/update/generate_v2_renames_map.py,162,function, 12901,update_renames_v2,tensorflow/tensorflow/tools/compatibility/update/generate_v2_renames_map.py,166,function,"Writes a Python dictionary mapping deprecated to canonical API names. Args: output_file_path: File path to write output to. Any existing contents would be replaced." 12902,main,tensorflow/tensorflow/tools/compatibility/update/generate_v2_renames_map.py,191,function, 12903,collect_function_arg_names,tensorflow/tensorflow/tools/compatibility/update/generate_v2_reorders_map.py,69,function,"Determines argument names for reordered function signatures. Args: function_names: Functions to collect arguments for. Returns: Dictionary mapping function name to its arguments." 12904,get_reorder_line,tensorflow/tensorflow/tools/compatibility/update/generate_v2_reorders_map.py,111,function, 12905,update_reorders_v2,tensorflow/tensorflow/tools/compatibility/update/generate_v2_reorders_map.py,115,function,"Writes a Python dictionary mapping function name to argument order. Args: output_file_path: File path to write output to. Any existing contents would be replaced." 12906,main,tensorflow/tensorflow/tools/compatibility/update/generate_v2_reorders_map.py,137,function, 12907,TfDockerTagValidator,tensorflow/tensorflow/tools/dockerfiles/assembler.py,228,class,"Custom Cerberus validator for TF tag spec. Note: Each _validate_foo function's docstring must end with a segment describing its own validation schema, e.g. ""The rule's arguments are..."". If you add a new validator, you can copy/paste that section." 12908,eprint,tensorflow/tensorflow/tools/dockerfiles/assembler.py,272,function, 12909,aggregate_all_slice_combinations,tensorflow/tensorflow/tools/dockerfiles/assembler.py,276,function,Figure out all of the possible slice groupings for a tag spec. 12910,build_name_from_slices,tensorflow/tensorflow/tools/dockerfiles/assembler.py,289,function,Build the tag name (cpu-devel...) from a list of slices. 12911,update_args_dict,tensorflow/tensorflow/tools/dockerfiles/assembler.py,302,function,Update a dict of arg values with more values from a list or dict. 12912,get_slice_sets_and_required_args,tensorflow/tensorflow/tools/dockerfiles/assembler.py,315,function,"Extract used-slice-sets and required CLI arguments from a spec string. For example, {FOO}{bar}{bat} finds FOO, bar, and bat. Assuming bar and bat are both named slice sets, FOO must be specified on the command line. Args: slice_sets: Dict of named slice sets tag_spec: The tag spec string, e.g. {_FOO}{blep} Returns: (used_slice_sets, required_args), a tuple of lists" 12913,gather_tag_args,tensorflow/tensorflow/tools/dockerfiles/assembler.py,342,function,Build a dictionary of all the CLI and slice-specified args for a tag. 12914,gather_slice_list_items,tensorflow/tensorflow/tools/dockerfiles/assembler.py,360,function,"For a list of slices, get the flattened list of all of a certain key." 12915,find_first_slice_value,tensorflow/tensorflow/tools/dockerfiles/assembler.py,365,function,"For a list of slices, get the first value for a certain key." 12916,assemble_tags,tensorflow/tensorflow/tools/dockerfiles/assembler.py,373,function,"Gather all the tags based on our spec. Args: spec: Nested dict containing full Tag spec cli_args: List of ARG=foo arguments to pass along to Docker build enabled_releases: List of releases to parse. Empty list = all all_partials: Dict of every partial, for reference Returns: Dict of tags and how to build them" 12917,merge_partials,tensorflow/tensorflow/tools/dockerfiles/assembler.py,426,function,Merge all partial contents with their header. 12918,upload_in_background,tensorflow/tensorflow/tools/dockerfiles/assembler.py,432,function,Upload a docker image (to be used by multiprocessing). 12919,mkdir_p,tensorflow/tensorflow/tools/dockerfiles/assembler.py,438,function,"Create a directory and its parents, even if it already exists." 12920,gather_existing_partials,tensorflow/tensorflow/tools/dockerfiles/assembler.py,447,function,"Find and read all available partials. Args: partial_path (string): read partials from this directory. Returns: Dict[string, string] of partial short names (like ""ubuntu/python"" or ""bazel"") to the full contents of that partial." 12921,main,tensorflow/tensorflow/tools/dockerfiles/assembler.py,473,function, 12922,get_base_dirs_and_prefixes,tensorflow/tensorflow/tools/docs/base_dir.py,29,function,Returns the base_dirs and code_prefixes for OSS TensorFlow api gen. 12923,main,tensorflow/tensorflow/tools/docs/build_java_api_docs.py,60,function, 12924,do_not_generate_docs,tensorflow/tensorflow/tools/docs/doc_controls.py,24,function,"A decorator: Do not generate docs for this object. For example the following classes: ``` class Parent(object): def method1(self): pass def method2(self): pass class Child(Parent): def method1(self): pass def method2(self): pass ``` Produce the following api_docs: ``` /Parent.md # method1 # method2 /Child.md # method1 # method2 ``` This decorator allows you to skip classes or methods: ``` @do_not_generate_docs class Parent(object): def method1(self): pass def method2(self): pass class Child(Parent): @do_not_generate_docs def method1(self): pass def method2(self): pass ``` This will only produce the following docs: ``` /Child.md # method2 ``` Note: This is implemented by adding a hidden attribute on the object, so it cannot be used on objects which do not allow new attributes to be added. So this decorator must go *below* `@property`, `@classmethod`, or `@staticmethod`: ``` class Example(object): @property @do_not_generate_docs def x(self): return self._x ``` Args: obj: The object to hide from the generated docs. Returns: obj" 12925,do_not_doc_inheritable,tensorflow/tensorflow/tools/docs/doc_controls.py,105,function,"A decorator: Do not generate docs for this method. This version of the decorator is ""inherited"" by subclasses. No docs will be generated for the decorated method in any subclass. Even if the sub-class overrides the method. For example, to ensure that `method1` is **never documented** use this decorator on the base-class: ``` class Parent(object): @do_not_doc_inheritable def method1(self): pass def method2(self): pass class Child(Parent): def method1(self): pass def method2(self): pass ``` This will produce the following docs: ``` /Parent.md # method2 /Child.md # method2 ``` When generating docs for a class's arributes, the `__mro__` is searched and the attribute will be skipped if this decorator is detected on the attribute on any class in the `__mro__`. Note: This is implemented by adding a hidden attribute on the object, so it cannot be used on objects which do not allow new attributes to be added. So this decorator must go *below* `@property`, `@classmethod`, or `@staticmethod`: ``` class Example(object): @property @do_not_doc_inheritable def x(self): return self._x ``` Args: obj: The class-attribute to hide from the generated docs. Returns: obj" 12926,for_subclass_implementers,tensorflow/tensorflow/tools/docs/doc_controls.py,168,function,"A decorator: Only generate docs for this method in the defining class. Also group this method's docs with and `@abstractmethod` in the class's docs. No docs will generated for this class attribute in sub-classes. The canonical use case for this is `tf.keras.layers.Layer.call`: It's a public method, essential for anyone implementing a subclass, but it should never be called directly. Works on method, or other class-attributes. When generating docs for a class's arributes, the `__mro__` is searched and the attribute will be skipped if this decorator is detected on the attribute on any **parent** class in the `__mro__`. For example: ``` class Parent(object): @for_subclass_implementers def method1(self): pass def method2(self): pass class Child1(Parent): def method1(self): pass def method2(self): pass class Child2(Parent): def method1(self): pass def method2(self): pass ``` This will produce the following docs: ``` /Parent.md # method1 # method2 /Child1.md # method2 /Child2.md # method2 ``` Note: This is implemented by adding a hidden attribute on the object, so it cannot be used on objects which do not allow new attributes to be added. So this decorator must go *below* `@property`, `@classmethod`, or `@staticmethod`: ``` class Example(object): @property @for_subclass_implementers def x(self): return self._x ``` Args: obj: The class-attribute to hide from the generated docs. Returns: obj" 12927,should_skip,tensorflow/tensorflow/tools/docs/doc_controls.py,246,function,"Returns true if docs generation should be skipped for this object. checks for the `do_not_generate_docs` or `do_not_doc_inheritable` decorators. Args: obj: The object to document, or skip. Returns: True if the object should be skipped" 12928,should_skip_class_attr,tensorflow/tensorflow/tools/docs/doc_controls.py,264,function,"Returns true if docs should be skipped for this class attribute. Args: cls: The class the attribute belongs to. name: The name of the attribute. Returns: True if the attribute should be skipped." 12929,DocControlsTest,tensorflow/tensorflow/tools/docs/doc_controls_test.py,25,class, 12930,DocGeneratorVisitor,tensorflow/tensorflow/tools/docs/doc_generator_visitor.py,28,class,A visitor that generates docs for a python object when __call__ed. 12931,generate_raw_ops_doc,tensorflow/tensorflow/tools/docs/generate2.py,97,function,Generates docs for `tf.raw_ops`. 12932,TfExportAwareVisitor,tensorflow/tensorflow/tools/docs/generate2.py,134,class,"A `tf_export`, `keras_export` and `estimator_export` aware doc_visitor." 12933,_hide_layer_and_module_methods,tensorflow/tensorflow/tools/docs/generate2.py,154,function,Hide methods and properties defined in the base classes of keras layers. 12934,build_docs,tensorflow/tensorflow/tools/docs/generate2.py,176,function,"Build api docs for tensorflow v2. Args: output_dir: A string path, where to put the files. code_url_prefix: prefix for ""Defined in"" links. search_hints: Bool. Include meta-data search hints at the top of each file." 12935,main,tensorflow/tensorflow/tools/docs/generate2.py,283,function, 12936,Generate2Test,tensorflow/tensorflow/tools/docs/generate2_test.py,44,class, 12937,write_docs,tensorflow/tensorflow/tools/docs/generate_lib.py,40,function,"Write previously extracted docs to disk. Write a docs page for each symbol included in the indices of parser_config to a tree of docs at `output_dir`. Symbols with multiple aliases will have only one page written about them, which is referenced for all aliases. Args: output_dir: Directory to write documentation markdown files to. Will be created if it doesn't exist. parser_config: A `parser.ParserConfig` object, containing all the necessary indices. yaml_toc: Set to `True` to generate a ""_toc.yaml"" file. root_title: The title name for the root level index.md. search_hints: (bool) include meta-data search hints at the top of each output file. site_api_path: The output path relative to the site root. Used in the `_toc.yaml` and `_redirects.yaml` files. Raises: ValueError: if `output_dir` is not an absolute path" 12938,add_dict_to_dict,tensorflow/tensorflow/tools/docs/generate_lib.py,221,function, 12939,_get_default_private_map,tensorflow/tensorflow/tools/docs/generate_lib.py,230,function, 12940,_get_default_do_not_descend_map,tensorflow/tensorflow/tools/docs/generate_lib.py,239,function, 12941,DocControlsAwareCrawler,tensorflow/tensorflow/tools/docs/generate_lib.py,246,class,A `docs_controls` aware API-crawler. 12942,extract,tensorflow/tensorflow/tools/docs/generate_lib.py,255,function,Extract docs from tf namespace and write them to disk. 12943,_GetMarkdownTitle,tensorflow/tensorflow/tools/docs/generate_lib.py,278,class,Extract the title from a .md file. 12944,_DocInfo,tensorflow/tensorflow/tools/docs/generate_lib.py,290,class,A simple struct for holding a doc's url and title. 12945,build_doc_index,tensorflow/tensorflow/tools/docs/generate_lib.py,298,function,Build an index from a keyword designating a doc to _DocInfo objects. 12946,_GuideRef,tensorflow/tensorflow/tools/docs/generate_lib.py,332,class, 12947,_GenerateGuideIndex,tensorflow/tensorflow/tools/docs/generate_lib.py,344,class,Turn guide files into an index from symbol name to a list of _GuideRefs. 12948,_build_guide_index,tensorflow/tensorflow/tools/docs/generate_lib.py,378,function,Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`. 12949,_UpdateTags,tensorflow/tensorflow/tools/docs/generate_lib.py,387,class,"Rewrites a Python guide so that each section has an explicit id tag. ""section"" here refers to blocks delimited by second level headings." 12950,update_id_tags_inplace,tensorflow/tensorflow/tools/docs/generate_lib.py,397,function,"Set explicit ids on all second-level headings to ensure back-links work. Args: src_dir: The directory of md-files to convert (inplace)." 12951,replace_refs,tensorflow/tensorflow/tools/docs/generate_lib.py,421,function,"Fix @{} references in all files under `src_dir` matching `file_pattern`. A matching directory structure, with the modified files is written to `output_dir`. `{""__init__.py"",""OWNERS"",""README.txt""}` are skipped. Files not matching `file_pattern` (using `fnmatch`) are copied with no change. Also, files in the `api_guides/python` directory get explicit ids set on all heading-2s to ensure back-links work. Args: src_dir: The directory to convert files from. output_dir: The root directory to write the resulting files to. reference_resolver: A `parser.ReferenceResolver` to make the replacements. file_pattern: Only replace references in files matching file_patters, using fnmatch. Non-matching files are copied unchanged. api_docs_relpath: Relative-path string to the api_docs, from the src_dir." 12952,DocGenerator,tensorflow/tensorflow/tools/docs/generate_lib.py,483,class,Main entry point for generating docs. 12953,is_free_function,tensorflow/tensorflow/tools/docs/parser.py,40,function,"Check if input is a free function (and not a class- or static method). Args: py_object: The the object in question. full_name: The full name of the object, like `tf.module.symbol`. index: The {full_name:py_object} dictionary for the public API. Returns: True if the obeject is a stand-alone function, and not part of a class definition." 12954,TFDocsError,tensorflow/tensorflow/tools/docs/parser.py,66,class, 12955,_Errors,tensorflow/tensorflow/tools/docs/parser.py,70,class,A collection of errors. 12956,documentation_path,tensorflow/tensorflow/tools/docs/parser.py,101,function,"Returns the file path for the documentation for the given API symbol. Given the fully qualified name of a library symbol, compute the path to which to write the documentation for that symbol (relative to a base directory). Documentation files are organized into directories that mirror the python module/class structure. Args: full_name: Fully qualified name of a library symbol. is_fragment: If `False` produce a direct markdown link (`tf.a.b.c` --> `tf/a/b/c.md`). If `True` produce fragment link, `tf.a.b.c` --> `tf/a/b.md#c` Returns: The file path to which to write the documentation for `full_name`." 12957,_get_raw_docstring,tensorflow/tensorflow/tools/docs/parser.py,129,function,"Get the docs for a given python object. Args: py_object: A python object to retrieve the docs for (class, function/method, or module). Returns: The docstring, or the empty string if no docstring was found." 12958,ReferenceResolver,tensorflow/tensorflow/tools/docs/parser.py,164,class,"Class for replacing @{...} references with Markdown links. Attributes: current_doc_full_name: A string (or None) indicating the name of the document currently being processed, so errors can reference the broken doc." 12959,_handle_compatibility,tensorflow/tensorflow/tools/docs/parser.py,477,function,"Parse and remove compatibility blocks from the main docstring. Args: doc: The docstring that contains compatibility notes"" Returns: a tuple of the modified doc string and a hash that maps from compatibility note type to the text of the note." 12960,_gen_pairs,tensorflow/tensorflow/tools/docs/parser.py,496,function,"Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...]. Args: items: A list of items (length must be even) Yields: The original items, in pairs" 12961,_FunctionDetail,tensorflow/tensorflow/tools/docs/parser.py,514,class,"A simple class to contain function details. Composed of a ""keyword"", a possibly empty ""header"" string, and a possibly empty list of key-value pair ""items""." 12962,_parse_function_details,tensorflow/tensorflow/tools/docs/parser.py,535,function,"Given a docstring, split off the header and parse the function details. For example the docstring of tf.nn.relu: '''Computes rectified linear: `max(features, 0)`. Args: features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `features`. ''' This is parsed, and returned as: ``` ('Computes rectified linear: `max(features, 0)`.\n\n', [ _FunctionDetail( keyword='Args', header='', items=[ ('features', ' A `Tensor`. Must be ...'), ('name', ' A name for the operation (optional).\n\n')]), _FunctionDetail( keyword='Returns', header=' A `Tensor`. Has the same type as `features`.', items=[]) ]) ``` Args: docstring: The docstring to parse Returns: A (header, function_details) pair, where header is a string and function_details is a (possibly empty) list of `_FunctionDetail` objects." 12963,_parse_md_docstring,tensorflow/tensorflow/tools/docs/parser.py,607,function,"Parse the object's docstring and return a `_DocstringInfo`. This function clears @@'s from the docstring, and replaces @{} references with markdown links. For links within the same set of docs, the `relative_path_to_root` for a docstring on the page for `full_name` can be set to: ```python relative_path_to_root = os.path.relpath( path='.', start=os.path.dirname(documentation_path(full_name)) or '.') ``` Args: py_object: A python object to retrieve the docs for (class, function/method, or module). relative_path_to_root: The relative path from the location of the current document to the root of the Python API documentation. This is used to compute links for ""@{symbol}"" references. reference_resolver: An instance of ReferenceResolver. Returns: A _DocstringInfo object, all fields will be empty if no docstring was found." 12964,_get_arg_spec,tensorflow/tensorflow/tools/docs/parser.py,653,function,"Extracts signature information from a function or functools.partial object. For functions, uses `tf_inspect.getfullargspec`. For `functools.partial` objects, corrects the signature of the underlying function to take into account the removed arguments. Args: func: A function whose signature to extract. Returns: An `FullArgSpec` namedtuple `(args, varargs, varkw, defaults, etc.)`, as returned by `tf_inspect.getfullargspec`." 12965,_remove_first_line_indent,tensorflow/tensorflow/tools/docs/parser.py,704,function, 12966,_generate_signature,tensorflow/tensorflow/tools/docs/parser.py,713,function,"Given a function, returns a list of strings representing its args. This function produces a list of strings representing the arguments to a python function. It uses tf_inspect.getfullargspec, which does not generalize well to Python 3.x, which is more flexible in how *args and **kwargs are handled. This is not a problem in TF, since we have to remain compatible to Python 2.7 anyway. This function uses `__name__` for callables if it is available. This can lead to poor results for functools.partial and other callable objects. The returned string is Python code, so if it is included in a Markdown document, it should be typeset as code (using backticks), or escaped. Args: func: A function, method, or functools.partial to extract the signature for. reverse_index: A map from object ids to canonical full names to use. Returns: A list of strings representing the argument signature of `func` as python code." 12967,_get_guides_markdown,tensorflow/tensorflow/tools/docs/parser.py,812,function, 12968,_get_defining_class,tensorflow/tensorflow/tools/docs/parser.py,824,function, 12969,_LinkInfo,tensorflow/tensorflow/tools/docs/parser.py,831,class, 12970,_OtherMemberInfo,tensorflow/tensorflow/tools/docs/parser.py,841,class, 12971,_FunctionPageInfo,tensorflow/tensorflow/tools/docs/parser.py,859,class,Collects docs For a function Page. 12972,_ClassPageInfo,tensorflow/tensorflow/tools/docs/parser.py,947,class,"Collects docs for a class page. Attributes: full_name: The fully qualified name of the object at the master location. Aka `master_name`. For example: `tf.nn.sigmoid`. short_name: The last component of the `full_name`. For example: `sigmoid`. defined_in: The path to the file where this object is defined. aliases: The list of all fully qualified names for the locations where the object is visible in the public api. This includes the master location. doc: A `_DocstringInfo` object representing the object's docstring (can be created with `_parse_md_docstring`). guides: A markdown string, of back links pointing to the api_guides that reference this object. bases: A list of `_LinkInfo` objects pointing to the docs for the parent classes. properties: A list of `_PropertyInfo` objects documenting the class' properties (attributes that use `@property`). methods: A list of `_MethodInfo` objects documenting the class' methods. classes: A list of `_LinkInfo` objects pointing to docs for any nested classes. other_members: A list of `_OtherMemberInfo` objects documenting any other object's defined inside the class object (mostly enum style fields)." 12973,_ModulePageInfo,tensorflow/tensorflow/tools/docs/parser.py,1313,class,Collects docs for a module page. 12974,ParserConfig,tensorflow/tensorflow/tools/docs/parser.py,1459,class,Stores all indexes required to parse the docs. 12975,docs_for_object,tensorflow/tensorflow/tools/docs/parser.py,1501,function,"Return a PageInfo object describing a given object from the TF API. This function uses _parse_md_docstring to parse the docs pertaining to `object`. This function resolves '@{symbol}' references in the docstrings into links to the appropriate location. It also adds a list of alternative names for the symbol automatically. It assumes that the docs for each object live in a file given by `documentation_path`, and that relative links to files within the documentation are resolvable. Args: full_name: The fully qualified name of the symbol to be documented. py_object: The Python object to be documented. Its documentation is sourced from `py_object`'s docstring. parser_config: A ParserConfig object. Returns: Either a `_FunctionPageInfo`, `_ClassPageInfo`, or a `_ModulePageInfo` depending on the type of the python object being documented. Raises: RuntimeError: If an object is encountered for which we don't know how to make docs." 12976,_PythonBuiltin,tensorflow/tensorflow/tools/docs/parser.py,1570,class,"This class indicated that the object in question is a python builtin. This can be used for the `defined_in` slot of the `PageInfo` objects." 12977,_PythonFile,tensorflow/tensorflow/tools/docs/parser.py,1589,class,"This class indicates that the object is defined in a regular python file. This can be used for the `defined_in` slot of the `PageInfo` objects." 12978,_ProtoFile,tensorflow/tensorflow/tools/docs/parser.py,1615,class,"This class indicates that the object is defined in a .proto file. This can be used for the `defined_in` slot of the `PageInfo` objects." 12979,_GeneratedFile,tensorflow/tensorflow/tools/docs/parser.py,1641,class,"This class indicates that the object is defined in a generated python file. Generated files should not be linked to directly. This can be used for the `defined_in` slot of the `PageInfo` objects." 12980,_get_defined_in,tensorflow/tensorflow/tools/docs/parser.py,1666,function,"Returns a description of where the passed in python object was defined. Args: py_object: The Python object. parser_config: A ParserConfig object. Returns: Either a `_PythonBuiltin`, `_PythonFile`, or a `_GeneratedFile`" 12981,generate_global_index,tensorflow/tensorflow/tools/docs/parser.py,1711,function,"Given a dict of full names to python objects, generate an index page. The index page generated contains a list of links for all symbols in `index` that have their own documentation page. Args: library_name: The name for the documented library to use in the title. index: A dict mapping full names to python objects. reference_resolver: An instance of ReferenceResolver. Returns: A string containing an index page as Markdown." 12982,_Metadata,tensorflow/tensorflow/tools/docs/parser.py,1749,class,"A class for building a page's Metadata block. Attributes: name: The name of the page being described by the Metadata block. version: The source version." 12983,build_md_page,tensorflow/tensorflow/tools/docs/pretty_docs.py,36,function,"Given a PageInfo object, return markdown for the page. Args: page_info: must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or `parser.ModulePageInfo` Returns: Markdown for the page Raises: ValueError: if `page_info` is an instance of an unrecognized class" 12984,_build_function_page,tensorflow/tensorflow/tools/docs/pretty_docs.py,61,function,Given a FunctionPageInfo object Return the page as an md string. 12985,_build_class_page,tensorflow/tensorflow/tools/docs/pretty_docs.py,82,function,Given a ClassPageInfo object Return the page as an md string. 12986,_build_method_section,tensorflow/tensorflow/tools/docs/pretty_docs.py,170,function,"Generates a markdown section for a method. Args: method_info: A `MethodInfo` object. heading_level: An Int, which HTML heading level to use. Returns: A markdown string." 12987,_build_module_page,tensorflow/tensorflow/tools/docs/pretty_docs.py,197,function,Given a ClassPageInfo object Return the page as an md string. 12988,_build_signature,tensorflow/tensorflow/tools/docs/pretty_docs.py,260,function,Returns a md code block showing the function signature. 12989,_build_compatibility,tensorflow/tensorflow/tools/docs/pretty_docs.py,292,function,Return the compatibility section as an md string. 12990,_build_function_details,tensorflow/tensorflow/tools/docs/pretty_docs.py,306,function,Return the function details section as an md string. 12991,_build_aliases,tensorflow/tensorflow/tools/docs/pretty_docs.py,320,function, 12992,md_files_in_dir,tensorflow/tensorflow/tools/docs/py_guide_parser.py,29,function,"Returns a list of filename (full_path, base) pairs for guide files." 12993,PyGuideParser,tensorflow/tensorflow/tools/docs/py_guide_parser.py,38,class,"Simple parsing of a guide .md file. Descendants can override the process_*() functions (called by process()) to either record information from the guide, or call replace_line() to affect the return value of process()." 12994,recursive_import,tensorflow/tensorflow/tools/docs/tf_doctest.py,58,function,"Recursively imports all the sub-modules under a root package. Args: root: A python package." 12995,find_modules,tensorflow/tensorflow/tools/docs/tf_doctest.py,72,function,"Finds all the modules in the core package imported. Returns: A list containing all the modules in tensorflow.python." 12996,filter_on_submodules,tensorflow/tensorflow/tools/docs/tf_doctest.py,87,function,"Filters all the modules based on the modules flag. The module flag has to be relative to the core package imported. For example, if `module=keras.layers` then, this function will return all the modules in the submodule. Args: all_modules: All the modules in the core package. submodules: Submodules to filter from all the modules. Returns: All the modules in the submodule." 12997,get_module_and_inject_docstring,tensorflow/tensorflow/tools/docs/tf_doctest.py,109,function,"Replaces the docstring of the module with the changed file's content. Args: file_path: Path to the file Returns: A list containing the module changed by the file." 12998,setup_gpu,tensorflow/tensorflow/tools/docs/tf_doctest.py,132,function,"Sets up the GPU devices. If there're more available GPUs than needed, it hides the additional ones. If there're less, it creates logical devices. This is to make sure the tests see a fixed number of GPUs regardless of the environment. Args: required_gpus: an integer. The number of GPUs required. Raises: ValueError: if num_gpus is larger than zero but no GPU is available." 12999,TfTestCase,tensorflow/tensorflow/tools/docs/tf_doctest.py,164,class, 13000,load_tests,tensorflow/tensorflow/tools/docs/tf_doctest.py,173,function,Loads all the tests in the docstrings and runs them. 13001,setUpModule,tensorflow/tensorflow/tools/docs/tf_doctest.py,219,function, 13002,_FloatExtractor,tensorflow/tensorflow/tools/docs/tf_doctest_lib.py,29,class,"Class for extracting floats from a string. For example: >>> text_parts, floats = _FloatExtractor()(""Text 1.0 Text"") >>> text_parts [""Text "", "" Text""] >>> floats np.array([1.0])" 13003,TfDoctestOutputChecker,tensorflow/tensorflow/tools/docs/tf_doctest_lib.py,104,class,"Changes the `want` and `got` strings. This allows it to be customized before they are compared." 13004,TfDoctestOutputCheckerTest,tensorflow/tensorflow/tools/docs/tf_doctest_test.py,30,class, 13005,create_examples,tensorflow/tensorflow/tools/gcs_test/python/gcs_smoke.py,39,function,Create ExampleProto's containing data. 13006,create_dir_test,tensorflow/tensorflow/tools/gcs_test/python/gcs_smoke.py,54,function,Verifies file_io directory handling methods. 13007,create_object_test,tensorflow/tensorflow/tools/gcs_test/python/gcs_smoke.py,128,function,Verifies file_io's object manipulation methods . 13008,main,tensorflow/tensorflow/tools/gcs_test/python/gcs_smoke.py,192,function, 13009,parse_branch_ref,tensorflow/tensorflow/tools/git/gen_git_source.py,39,function,"Given a filename of a .git/HEAD file return ref path. In particular, if git is in detached head state, this will return None. If git is in attached head, it will return the branch reference. E.g. if on 'master', the HEAD will contain 'ref: refs/heads/master' so 'refs/heads/master' will be returned. Example: parse_branch_ref("".git/HEAD"") Args: filename: file to treat as a git HEAD file Returns: None if detached head, otherwise ref subpath Raises: RuntimeError: if the HEAD file is unparseable." 13010,configure,tensorflow/tensorflow/tools/git/gen_git_source.py,67,function,Configure `src_base_path` to embed git hashes if available. 13011,get_git_version,tensorflow/tensorflow/tools/git/gen_git_source.py,144,function,"Get the git version from the repository. This function runs `git describe ...` in the path given as `git_base_path`. This will return a string of the form: -- For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when compiled. 1585 commits are after that commit tag, and we can get back to this version by running `git checkout gbb717a6`. Args: git_base_path: where the .git directory is located git_tag_override: Override the value for the git tag. This is useful for releases where we want to build the release before the git tag is created. Returns: A bytestring representing the git version" 13012,write_version_info,tensorflow/tensorflow/tools/git/gen_git_source.py,190,function,"Write a c file that defines the version functions. Args: filename: filename to write to. git_version: the result of a git describe." 13013,generate,tensorflow/tensorflow/tools/git/gen_git_source.py,229,function,"Generate version_info.cc as given `destination_file`. Args: arglist: should be a sequence that contains spec, head_symlink, ref_symlink, destination_file. `destination_file` is the filename where version_info.cc will be written `spec` is a filename where the file contains a JSON dictionary 'git' bool that is true if the source is in a git repo 'path' base path of the source code 'branch' the name of the ref specification of the current branch/tag `head_symlink` is a filename to HEAD that is cross-referenced against what is contained in the json branch designation. `ref_symlink` is unused in this script but passed, because the build system uses that file to detect when commits happen. git_tag_override: Override the value for the git tag. This is useful for releases where we want to build the release before the git tag is created. Raises: RuntimeError: If ./configure needs to be run, RuntimeError will be raised." 13014,raw_generate,tensorflow/tensorflow/tools/git/gen_git_source.py,274,function,"Simple generator used for cmake/make build systems. This does not create any symlinks. It requires the build system to build unconditionally. Args: output_file: Output filename for the version info cc source_dir: Base path of the source code git_tag_override: Override the value for the git tag. This is useful for releases where we want to build the release before the git tag is created." 13015,TransformGraph,tensorflow/tensorflow/tools/graph_transforms/__init__.py,26,function,"Python wrapper for the Graph Transform Tool. Gives access to all graph transforms available through the command line tool. See documentation at https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md for full details of the options available. Args: input_graph_def: GraphDef object containing a model to be transformed. inputs: List of node names for the model inputs. outputs: List of node names for the model outputs. transforms: List of strings containing transform names and parameters. Returns: New GraphDef with transforms applied." 13016,TransformGraphTest,tensorflow/tensorflow/tools/graph_transforms/python/transform_graph_test.py,29,class, 13017,check_output_despite_error,tensorflow/tensorflow/tools/pip_package/check_load_py_test.py,28,function,"Get output of args from command line, even if there are errors. Args: args: a list of command line args. Returns: output as string." 13018,main,tensorflow/tensorflow/tools/pip_package/check_load_py_test.py,44,function, 13019,GetBuild,tensorflow/tensorflow/tools/pip_package/pip_smoke_test.py,44,function,Get the list of BUILD file all targets recursively startind at dir_base. 13020,BuildPyTestDependencies,tensorflow/tensorflow/tools/pip_package/pip_smoke_test.py,54,function, 13021,main,tensorflow/tensorflow/tools/pip_package/pip_smoke_test.py,102,function,"This script runs the pip smoke test. Raises: RuntimeError: If any dependencies for py_tests exist in subSet Prerequisites: 1. Bazel is installed. 2. Running in github repo of tensorflow. 3. Configure has been run." 13022,BinaryDistribution,tensorflow/tensorflow/tools/pip_package/setup.py,132,class, 13023,InstallCommand,tensorflow/tensorflow/tools/pip_package/setup.py,138,class,Override the dir where the headers go. 13024,InstallHeaders,tensorflow/tensorflow/tools/pip_package/setup.py,149,class,"Override how headers are copied. The install_headers that comes with setuptools copies all files to the same directory. But we need the files to be in a specific directory hierarchy for -I to work correctly." 13025,find_files,tensorflow/tensorflow/tools/pip_package/setup.py,217,function,Return all the files matching pattern below root dir. 13026,main,tensorflow/tensorflow/tools/pip_package/simple_console.py,26,function,Run an interactive console. 13027,main,tensorflow/tensorflow/tools/pip_package/simple_console_for_windows.py,26,function,Run an interactive console. 13028,_compare_versions,tensorflow/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py,35,function,"Compare two versions and return information on which is smaller vs. larger. Args: v1: String that is a version to be compared against `v2`. v2: String that is a version to be compared against `v1`. Returns: Dict that stores larger version with key `larger` and smaller version with key `smaller`. e.g. {`larger`: `1.5.0`, `smaller`: `1.2.0`} Raises: RuntimeError: If asked to compare `inf` to `inf`." 13029,_list_to_string,tensorflow/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py,96,function,"Concatenates list items into a single string separated by `s`. Args: l: List with items to be concatenated into a single string. s: String or char that will be concatenated in between each item. Returns: String that has all items in list `l` concatenated with `s` separator." 13030,_get_func_name,tensorflow/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py,110,function,"Get the name of current function. Returns: String that is the name of current function." 13031,ConfigCompatChecker,tensorflow/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py,119,class,"Class that checks configuration versions and dependency compatibilities. `ConfigCompatChecker` checks a given set of configurations and their versions against supported versions and dependency rules defined in `.ini` config file. For project `TensorFlow Builder`, it functions as a sub-module for the builder service that validates requested build configurations from a client prior to initiating a TensorFlow build." 13032,CompatCheckerTest,tensorflow/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker_test.py,70,class, 13033,run_shell_cmd,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,135,function,"Executes shell commands and returns output. Args: args: String of shell commands to run. Returns: Tuple output (stdoutdata, stderrdata) from running the shell commands." 13034,get_platform,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,153,function,"Retrieves platform information. Currently the script only support linux. If other platoforms such as Windows or MacOS is detected, it throws an error and terminates. Returns: String that is platform type. e.g. 'linux'" 13035,get_cpu_type,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,179,function,"Retrieves CPU (type) information. Returns: String that is name of the CPU. e.g. 'GenuineIntel'" 13036,get_cpu_arch,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,195,function,"Retrieves processor architecture type (32-bit or 64-bit). Returns: String that is CPU architecture. e.g. 'x86_64'" 13037,get_distrib,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,210,function,"Retrieves distribution name of the operating system. Returns: String that is the name of distribution. e.g. 'Ubuntu'" 13038,get_distrib_version,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,225,function,"Retrieves distribution version of the operating system. Returns: String that is the distribution version. e.g. '14.04'" 13039,get_gpu_type,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,242,function,"Retrieves GPU type. Returns: String that is the name of the detected NVIDIA GPU. e.g. 'Tesla K80' 'unknown' will be returned if detected GPU type is an unknown name. Unknown name refers to any GPU name that is not specified in this page: https://developer.nvidia.com/cuda-gpus" 13040,get_gpu_count,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,280,function,"Retrieves total number of GPU's available in the system. Returns: Integer that is the total # of GPU's found." 13041,get_cuda_version_all,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,294,function,"Retrieves all additional CUDA versions available (other than default). For retrieving default CUDA version, use `get_cuda_version` function. stderr is silenced by default. Setting FLAGS.debug mode will not enable it. Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable stderr. Returns: List of all CUDA versions found (except default version). e.g. ['10.1', '10.2']" 13042,get_cuda_version_default,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,327,function,"Retrieves default CUDA version. Default version is the version found in `/usr/local/cuda/` installation. stderr is silenced by default. Setting FLAGS.debug mode will not enable it. Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable stderr. It iterates through two types of version retrieval method: 1) Using `nvcc`: If `nvcc` is not available, then it uses next method. 2) Read version file (`version.txt`) found in CUDA install directory. Returns: String that is the default CUDA version. e.g. '10.1'" 13043,get_cuda_compute_capability,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,368,function,"Retrieves CUDA compute capability based on the detected GPU type. This function uses the `cuda_compute_capability` module to retrieve the corresponding CUDA compute capability for the given GPU type. Args: source_from_url: Boolean deciding whether to source compute capability from NVIDIA website or from a local golden file. Returns: List of all supported CUDA compute capabilities for the given GPU type. e.g. ['3.5', '3.7']" 13044,get_cudnn_version,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,402,function,"Retrieves the version of cuDNN library detected. Returns: String that is the version of cuDNN library detected. e.g. '7.5.0'" 13045,get_gcc_version,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,426,function,"Retrieves version of GCC detected. Returns: String that is the version of GCC. e.g. '7.3.0'" 13046,get_glibc_version,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,441,function,"Retrieves version of GLIBC detected. Returns: String that is the version of GLIBC. e.g. '2.24'" 13047,get_libstdcpp_version,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,456,function,"Retrieves version of libstdc++ detected. Returns: String that is the version of libstdc++. e.g. '3.4.25'" 13048,get_cpu_isa_version,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,472,function,"Retrieves all Instruction Set Architecture(ISA) available. Required ISA(s): 'avx', 'avx2', 'avx512f', 'sse4', 'sse4_1' Returns: Tuple (list of available ISA, list of missing ISA)" 13049,get_python_version,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,500,function,"Retrieves default Python version. Returns: String that is the version of default Python. e.g. '2.7.4'" 13050,get_all_configs,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,512,function,"Runs all functions for detecting user machine configurations. Returns: Tuple (List of all configurations found, List of all missing configurations, List of all configurations found with warnings, Dict of all configurations)" 13051,print_all_configs,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,581,function,"Prints the status and info on all configurations in a table format. Args: configs: List of all configurations found. missing: List of all configurations that are missing. warning: List of all configurations found with warnings." 13052,save_to_file,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,624,function,"Saves all detected configuration(s) into a JSON file. Args: json_data: Dict of all configurations found. filename: String that is the name of the output JSON file." 13053,manage_all_configs,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,641,function,"Manages configuration detection and retrieval based on user input. Args: save_results: Boolean indicating whether to save the results to a file. filename: String that is the name of the output JSON file." 13054,main,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py,657,function, 13055,retrieve_from_web,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py,55,function,"Retrieves list of all CUDA compute capability from NVIDIA webpage. Args: generate_csv: Boolean for generating an output file containing the results. Returns: OrderedDict that is a list of all CUDA compute capability listed on the NVIDIA page. Order goes from top to bottom of the webpage content (.html)." 13056,retrieve_from_golden,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py,91,function,"Retrieves list of all CUDA compute capability from a golden file. The following file is set as default: `./golden/compute_capability_golden.csv` Returns: Dictionary that lists of all CUDA compute capability in the following format: {'': ['.', ...], ...} If there are multiple versions available for a given GPU, then it appends all supported versions in the value list (in the key-value pair.)" 13057,create_gpu_capa_map,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py,118,function,"Generates a map between GPU types and corresponding compute capability. This method is used for retrieving CUDA compute capability from the web only. Args: match_list: List of all CUDA compute capability detected from the webpage. generate_csv: Boolean for creating csv file to store results. filename: String that is the name of the csv file (without `.csv` ending). Returns: OrderedDict that lists in the incoming order of all CUDA compute capability provided as `match_list`." 13058,write_csv_from_dict,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py,172,function,"Writes out a `.csv` file from an input dictionary. After writing out the file, it checks the new list against the golden to make sure golden file is up-to-date. Args: filename: String that is the output file name. input_dict: Dictionary that is to be written out to a `.csv` file." 13059,check_with_golden,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py,195,function,"Checks the newly created CUDA compute capability file with the golden. If differences are found, then it prints a list of all mismatches as a `WARNING`. Golden file must reside in `golden/` directory. Args: filename: String that is the name of the newly created file." 13060,print_dict,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py,227,function,"Prints dictionary with formatting (2 column table). Args: py_dict: Dictionary that is to be printed out in a table format." 13061,main,tensorflow/tensorflow/tools/tensorflow_builder/config_detector/data/cuda_compute_capability.py,237,function, 13062,check_file,tensorflow/tensorflow/tools/test/check_futures_test.py,57,function, 13063,main,tensorflow/tensorflow/tools/test/check_futures_test.py,88,function, 13064,main,tensorflow/tensorflow/tools/test/file_name_test.py,31,function, 13065,_gather_gpu_devices_proc,tensorflow/tensorflow/tools/test/gpu_info_lib.py,33,function,Try to gather NVidia GPU device information via /proc/driver. 13066,CUDADeviceProperties,tensorflow/tensorflow/tools/test/gpu_info_lib.py,51,class, 13067,_gather_gpu_devices_cudart,tensorflow/tensorflow/tools/test/gpu_info_lib.py,123,function,Try to gather NVidia GPU device information via libcudart. 13068,gather_gpu_devices,tensorflow/tensorflow/tools/test/gpu_info_lib.py,167,function,"Gather gpu device info. Returns: A list of test_log_pb2.GPUInfo messages." 13069,gather_build_configuration,tensorflow/tensorflow/tools/test/run_and_gather_logs.py,58,function, 13070,main,tensorflow/tensorflow/tools/test/run_and_gather_logs.py,69,function, 13071,MissingLogsError,tensorflow/tensorflow/tools/test/run_and_gather_logs_lib.py,37,class, 13072,get_git_commit_sha,tensorflow/tensorflow/tools/test/run_and_gather_logs_lib.py,41,function,"Get git commit SHA for this build. Attempt to get the SHA from environment variable GIT_COMMIT, which should be available on Jenkins build agents. Returns: SHA hash of the git commit used for the build, if available" 13073,process_test_logs,tensorflow/tensorflow/tools/test/run_and_gather_logs_lib.py,54,function,"Gather test information and put it in a TestResults proto. Args: name: Benchmark target identifier. test_name: A unique bazel target, e.g. ""//path/to:test"" test_args: A string containing all arguments to run the target with. benchmark_type: A string representing the BenchmarkType enum; the benchmark type for this target. start_time: Test starting time (epoch) run_time: Wall time that the test ran for log_files: Paths to the log files Returns: A TestResults proto" 13074,process_benchmarks,tensorflow/tensorflow/tools/test/run_and_gather_logs_lib.py,92,function, 13075,run_and_gather_logs,tensorflow/tensorflow/tools/test/run_and_gather_logs_lib.py,101,function,"Run the bazel test given by test_name. Gather and return the logs. Args: name: Benchmark target identifier. test_name: A unique bazel target, e.g. ""//path/to:test"" test_args: A string containing all arguments to run the target with. benchmark_type: A string representing the BenchmarkType enum; the benchmark type for this target. Returns: A tuple (test_results, mangled_test_name), where test_results: A test_log_pb2.TestResults proto test_adjusted_name: Unique benchmark name that consists of benchmark name optionally followed by GPU type. Raises: ValueError: If the test_name is not a valid target. subprocess.CalledProcessError: If the target itself fails. IOError: If there are problems gathering test log output from the test. MissingLogsError: If we couldn't find benchmark logs." 13076,main,tensorflow/tensorflow/tools/test/system_info.py,25,function, 13077,gather_machine_configuration,tensorflow/tensorflow/tools/test/system_info_lib.py,44,function,Gather Machine Configuration. This is the top level fn of this library. 13078,gather_hostname,tensorflow/tensorflow/tools/test/system_info_lib.py,66,function, 13079,gather_memory_info,tensorflow/tensorflow/tools/test/system_info_lib.py,70,function,Gather memory info. 13080,gather_cpu_info,tensorflow/tensorflow/tools/test/system_info_lib.py,79,function,Gather CPU Information. Assumes all CPUs are the same. 13081,gather_available_device_info,tensorflow/tensorflow/tools/test/system_info_lib.py,126,function,"Gather list of devices available to TensorFlow. Returns: A list of test_log_pb2.AvailableDeviceInfo messages." 13082,gather_platform_info,tensorflow/tensorflow/tools/test/system_info_lib.py,146,function,Gather platform info. 13083,is_real_file,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,94,function, 13084,get_mtime,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,99,function, 13085,list_files_by_mtime,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,104,function,"Return a list of files in the directory, sorted in increasing ""mtime"". Return a list of files in the given directory, sorted from older to newer file according to their modification times. Only return actual files, skipping directories, symbolic links, pipes, etc. Args: dirpath: directory pathname Returns: A list of file names relative to the given directory path." 13086,lock,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,125,function, 13087,unlock,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,129,function, 13088,trylock,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,133,function, 13089,upload_benchmark_data,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,141,function,"Parse benchmark data and use the client to upload it to the datastore. Parse the given benchmark data from the serialized JSON-format used to write the test results file. Create the different datastore Entities from that data and upload them to the datastore in a batch using the client connection. Args: client: datastore client connection data: JSON-encoded benchmark data" 13090,upload_benchmark_files,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,190,function,"Find benchmark files, process them, and upload their data to the datastore. Locate benchmark files in the data directory, process them, and upload their data to the datastore. After processing each file, move it to the archive directory for safe-keeping. Each file is locked for processing, which allows multiple uploader instances to run concurrently if needed, each one handling different benchmark files, skipping those already locked by another. Args: opts: command line options object Note: To use locking, the file is first opened, then its descriptor is used to lock and read it. The lock is released when the file is closed. Do not open that same file a 2nd time while the lock is already held, because when that 2nd file descriptor is closed, the lock will be released prematurely." 13091,parse_cmd_line,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,221,function,"Parse command line options. Returns: The parsed arguments object." 13092,main,tensorflow/tensorflow/tools/test/upload_test_benchmarks.py,242,function, 13093,ConfigError,tensorflow/third_party/gpus/check_cuda_libs.py,38,class, 13094,_is_windows,tensorflow/third_party/gpus/check_cuda_libs.py,42,function, 13095,check_cuda_lib,tensorflow/third_party/gpus/check_cuda_libs.py,46,function,"Tests if a library exists on disk and whether its soname matches the filename. Args: path: the path to the library. check_soname: whether to check the soname as well. Raises: ConfigError: If the library does not exist or if its soname does not match the filename." 13096,main,tensorflow/third_party/gpus/check_cuda_libs.py,69,function, 13097,main,tensorflow/third_party/gpus/compress_find_cuda_config.py,24,function, 13098,ConfigError,tensorflow/third_party/gpus/find_cuda_config.py,72,class, 13099,_is_linux,tensorflow/third_party/gpus/find_cuda_config.py,76,function, 13100,_is_windows,tensorflow/third_party/gpus/find_cuda_config.py,80,function, 13101,_is_macos,tensorflow/third_party/gpus/find_cuda_config.py,84,function, 13102,_matches_version,tensorflow/third_party/gpus/find_cuda_config.py,88,function,"Checks whether some version meets the requirements. All elements of the required_version need to be present in the actual_version. required_version actual_version result ----------------------------------------- 1 1.1 True 1.2 1 False 1.2 1.3 False 1 True Args: required_version: The version specified by the user. actual_version: The version detected from the CUDA installation. Returns: Whether the actual version matches the required one." 13103,_at_least_version,tensorflow/third_party/gpus/find_cuda_config.py,115,function, 13104,_get_header_version,tensorflow/third_party/gpus/find_cuda_config.py,121,function,Returns preprocessor defines in C header file. 13105,_cartesian_product,tensorflow/third_party/gpus/find_cuda_config.py,130,function,Returns all path combinations of first and second. 13106,_get_ld_config_paths,tensorflow/third_party/gpus/find_cuda_config.py,135,function,Returns all directories from 'ldconfig -p'. 13107,_get_default_cuda_paths,tensorflow/third_party/gpus/find_cuda_config.py,153,function, 13108,_header_paths,tensorflow/third_party/gpus/find_cuda_config.py,170,function,Returns hard-coded set of relative paths to look for header files. 13109,_library_paths,tensorflow/third_party/gpus/find_cuda_config.py,182,function,Returns hard-coded set of relative paths to look for library files. 13110,_not_found_error,tensorflow/third_party/gpus/find_cuda_config.py,194,function, 13111,_find_file,tensorflow/third_party/gpus/find_cuda_config.py,202,function, 13112,_find_library,tensorflow/third_party/gpus/find_cuda_config.py,209,function,Returns first valid path to the requested library. 13113,_find_versioned_file,tensorflow/third_party/gpus/find_cuda_config.py,222,function,Returns first valid path to a file that matches the requested version. 13114,_find_header,tensorflow/third_party/gpus/find_cuda_config.py,238,function,Returns first valid path to a header that matches the requested version. 13115,_find_cuda_config,tensorflow/third_party/gpus/find_cuda_config.py,244,function, 13116,_find_cublas_config,tensorflow/third_party/gpus/find_cuda_config.py,307,function, 13117,_find_cusolver_config,tensorflow/third_party/gpus/find_cuda_config.py,340,function, 13118,_find_curand_config,tensorflow/third_party/gpus/find_cuda_config.py,370,function, 13119,_find_cufft_config,tensorflow/third_party/gpus/find_cuda_config.py,400,function, 13120,_find_cudnn_config,tensorflow/third_party/gpus/find_cuda_config.py,429,function, 13121,_find_cusparse_config,tensorflow/third_party/gpus/find_cuda_config.py,452,function, 13122,_find_nccl_config,tensorflow/third_party/gpus/find_cuda_config.py,482,function, 13123,_find_tensorrt_config,tensorflow/third_party/gpus/find_cuda_config.py,504,function, 13124,_list_from_env,tensorflow/third_party/gpus/find_cuda_config.py,538,function,Returns comma-separated list from environment variable. 13125,_get_legacy_path,tensorflow/third_party/gpus/find_cuda_config.py,545,function,"Returns a path specified by a legacy environment variable. CUDNN_INSTALL_PATH, NCCL_INSTALL_PATH, TENSORRT_INSTALL_PATH set to '/usr/lib/x86_64-linux-gnu' would previously find both library and header paths. Detect those and return '/usr', otherwise forward to _list_from_env()." 13126,_normalize_path,tensorflow/third_party/gpus/find_cuda_config.py,559,function,"Returns normalized path, with forward slashes on Windows." 13127,find_cuda_config,tensorflow/third_party/gpus/find_cuda_config.py,567,function,Returns a dictionary of CUDA library and header file paths. 13128,main,tensorflow/third_party/gpus/find_cuda_config.py,638,function, 13129,_parse_args,tensorflow/third_party/llvm/expand_cmake_vars.py,30,function,Parses arguments with the form KEY=VALUE into a dictionary. 13130,_expand_variables,tensorflow/third_party/llvm/expand_cmake_vars.py,39,function,"Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'. Args: input_str: the string containing ${VARIABLE} expressions to expand. cmake_vars: a dictionary mapping variable names to their values. Returns: The expanded string." 13131,_expand_cmakedefines,tensorflow/third_party/llvm/expand_cmake_vars.py,56,function,"Expands #cmakedefine declarations, using a dictionary 'cmake_vars'." 13132,main,tensorflow/third_party/llvm/expand_cmake_vars.py,81,function, 13133,Log,tensorflow/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py,41,function, 13134,GetOptionValue,tensorflow/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py,45,function,"Extract the list of values for option from options. Args: option: The option whose value to extract. Returns: 1. A list of values, either directly following the option, (eg., /opt val1 val2) or values collected from multiple occurrences of the option (eg., /opt val1 /opt val2). 2. The leftover options." 13135,_update_options,tensorflow/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py,66,function, 13136,GetNvccOptions,tensorflow/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py,74,function,"Collect the -nvcc_options values from argv. Args: argv: A list of strings, possibly the argv passed to main(). Returns: 1. The string that can be passed directly to nvcc. 2. The leftover options." 13137,InvokeNvcc,tensorflow/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py,96,function,"Call nvcc with arguments assembled from argv. Args: argv: A list of strings, possibly the argv passed to main(). log: True if logging is requested. Returns: The return value of calling os.system('nvcc ' + args)" 13138,main,tensorflow/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py,190,function,