Home | History | Annotate | Download | only in testing
      1 # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #     http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 # ==============================================================================
     15 
     16 """Generate a series of TensorFlow graphs that become tflite test cases.
     17 
     18 Usage:
     19 
     20 generate_examples <output directory> zipped
     21 
     22 bazel run //tensorflow/contrib/lite/testing:generate_examples
     23     third_party/tensorflow/contrib/lite/testing/generated_examples zipped
     24 """
     25 from __future__ import absolute_import
     26 from __future__ import division
     27 from __future__ import print_function
     28 
     29 import argparse
     30 import itertools
     31 import os
     32 import re
     33 import sys
     34 import tempfile
     35 import traceback
     36 import zipfile
     37 import numpy as np
     38 from six import StringIO
     39 
     40 # TODO(aselle): Disable GPU for now
     41 os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
     42 
     43 # pylint: disable=g-import-not-at-top
     44 import tensorflow as tf
     45 from google.protobuf import text_format
     46 # TODO(aselle): switch to TensorFlow's resource_loader
     47 from tensorflow.contrib.lite.testing import generate_examples_report as report_lib
     48 from tensorflow.python.framework import graph_util as tf_graph_util
     49 
     50 parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
     51 parser.add_argument("output_path",
     52                     help="Directory where the outputs will be go.")
     53 # TODO(ahentz): remove this flag
     54 parser.add_argument("type", help="zipped")
     55 parser.add_argument("--zip_to_output",
     56                     type=str,
     57                     help="Particular zip to output.",
     58                     required=False)
     59 parser.add_argument("--toco",
     60                     type=str,
     61                     help="Path to toco tool.",
     62                     required=True)
     63 parser.add_argument(
     64     "--known_bugs_are_errors",
     65     action="store_true",
     66     help=("If a particular model is affected by a known bug,"
     67           " count it as a toco error."))
     68 parser.add_argument(
     69     "--ignore_toco_errors",
     70     action="store_true",
     71     help="Raise an exception if any toco error is encountered.")
     72 parser.add_argument(
     73     "--save_graphdefs",
     74     action="store_true",
     75     help="Include intermediate graphdefs in the output zip files.")
     76 
     77 
     78 RANDOM_SEED = 342
     79 TEST_INPUT_DEPTH = 3
     80 
     81 
     82 # A map from regular expression to bug number. Any test failure with label
     83 # matching the expression will be considered due to the corresponding bug.
     84 KNOWN_BUGS = {
     85     # TOCO doesn't support scalars as input.
     86     r"relu.*input_shape=\[\]": "67587484",
     87     r"sigmoid.*input_shape=\[\]": "67645668",
     88     # Concat doesn't work with a single input tensor
     89     r"concat.*num_tensors=1": "67378344",
     90     # Transposition in MatMul is not supported.
     91     r"fully_connected.*transpose_.=True": "67586970",
     92     # Softmax graphs are too complex.
     93     r"softmax.*dim=0": "67749831",
     94     r"softmax.*input_shape=\[1,3,4,3\]": "67749831",
     95     # SpaceToDepth only supports float32.
     96     r"space_to_depth.*(float16|int32|uint8|int64)": "68018134",
     97     # BatchToSpaceND doesn't support cropping. This catches test cases with
     98     # const tensors as crops.
     99     r"batch_to_space_nd.*crops=\[\[1,1\],\[1,1\]\]": "70594634",
    100     # BatchToSpaceND only supports 4D tensors.
    101     r"batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\]": "70594733",
    102     # Div will use floordiv.
    103     r"div.*int32": "72051395",
    104     # TOCO require matching dimensions in strided_slice.
    105     r"strided_slice.*begin=\[0\].*end=\[1\].*": "73170889",
    106     # No support for SplitV
    107     r"split.*num_or_size_splits=\[2,2\]": "73377559",
    108 }
    109 
    110 
    111 def toco_options(data_types,
    112                  input_arrays,
    113                  output_arrays,
    114                  shapes,
    115                  drop_control_dependency):
    116   """Create TOCO options to process a model.
    117 
    118   Args:
    119     data_types: input and inference types used by TOCO.
    120     input_arrays: names of the input tensors
    121     output_arrays: name of the output tensors
    122     shapes: shapes of the input tensors
    123     drop_control_dependency: whether to ignore control dependency nodes.
    124 
    125   Returns:
    126     the options in a string.
    127   """
    128   shape_str = ":".join([",".join(str(y) for y in x) for x in shapes])
    129   inference_type = "FLOAT"
    130   # TODO(ahentz): if we get multi-input quantization to work we need this
    131   # to change
    132   if data_types[0] == "QUANTIZED_UINT8":
    133     inference_type = "QUANTIZED_UINT8"
    134   s = (" --input_data_types=%s" % ",".join(data_types) +
    135        " --inference_type=%s" % inference_type +
    136        " --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
    137        " --input_arrays=%s" % ",".join(input_arrays) +
    138        " --input_shapes=%s" % shape_str +
    139        " --output_arrays=%s" % ",".join(output_arrays))
    140   if drop_control_dependency:
    141     s += " --drop_control_dependency"
    142   return s
    143 
    144 
    145 def write_toco_options(filename,
    146                        data_types,
    147                        input_arrays,
    148                        output_arrays,
    149                        shapes,
    150                        drop_control_dependency=False):
    151   """Create TOCO options to process a model.
    152 
    153   Args:
    154     filename: Filename to write the options to.
    155     data_types: input and inference types used by TOCO.
    156     input_arrays: names of the input tensors
    157     output_arrays: names of the output tensors
    158     shapes: shapes of the input tensors
    159     drop_control_dependency: whether to ignore control dependency nodes.
    160   """
    161   with open(filename, "w") as fp:
    162     fp.write(
    163         toco_options(
    164             data_types=data_types,
    165             input_arrays=input_arrays,
    166             output_arrays=output_arrays,
    167             shapes=shapes,
    168             drop_control_dependency=drop_control_dependency))
    169 
    170 
    171 def write_examples(fp, examples):
    172   """Given a list `examples`, write a text format representation.
    173 
    174   The file format is csv like with a simple repeated pattern. We would ike
    175   to use proto here, but we can't yet due to interfacing with the Android
    176   team using this format.
    177 
    178   Args:
    179     fp: File-like object to write to.
    180     examples: Example dictionary consiting of keys "inputs" and "outputs"
    181   """
    182 
    183   def write_tensor(fp, x):
    184     """Write tensor in file format supported by TFLITE example."""
    185     fp.write("dtype,%s\n" % x.dtype)
    186     fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
    187     # Output 9 digits after the point to ensure the precision is good enough.
    188     values = ["{:.9f}".format(value) for value in list(x.flatten())]
    189     fp.write("values," + ",".join(values) + "\n")
    190 
    191   fp.write("test_cases,%d\n" % len(examples))
    192   for example in examples:
    193     fp.write("inputs,%d\n" % len(example["inputs"]))
    194     for i in example["inputs"]:
    195       write_tensor(fp, i)
    196     fp.write("outputs,%d\n" % len(example["outputs"]))
    197     for i in example["outputs"]:
    198       write_tensor(fp, i)
    199 
    200 
    201 def write_test_cases(fp, model_name, examples):
    202   """Given a dictionary of `examples`, write a text format representation.
    203 
    204   The file format is protocol-buffer-like, even though we don't use proto due
    205   to the needs of the Android team.
    206 
    207   Args:
    208     fp: File-like object to write to.
    209     model_name: Filename where the model was written to, relative to filename.
    210     examples: Example dictionary consiting of keys "inputs" and "outputs"
    211   """
    212 
    213   fp.write("load_model: %s\n" % os.path.basename(model_name))
    214   for example in examples:
    215     fp.write("reshape {\n")
    216     for t in example["inputs"]:
    217       fp.write("  input: \"" + ",".join(map(str, t.shape)) + "\"\n")
    218     fp.write("}\n")
    219     fp.write("invoke {\n")
    220 
    221     for t in example["inputs"]:
    222       values = ["{:.9f}".format(value) for value in list(t.flatten())]
    223       fp.write("  input: \"" + ",".join(values) + "\"\n")
    224     for t in example["outputs"]:
    225       values = ["{:.9f}".format(value) for value in list(t.flatten())]
    226       fp.write("  output: \"" + ",".join(values) + "\"\n")
    227     fp.write("}\n")
    228 
    229 
    230 _TF_TYPE_INFO = {
    231     tf.float32: (np.float32, "FLOAT"),
    232     tf.float16: (np.float16, "FLOAT"),
    233     tf.int32: (np.int32, "INT32"),
    234     tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
    235     tf.int64: (np.int64, "INT64"),
    236 }
    237 
    238 
    239 def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
    240   """Build tensor data spreading the range [min_value, max_value)."""
    241 
    242   if dtype in _TF_TYPE_INFO:
    243     dtype = _TF_TYPE_INFO[dtype][0]
    244 
    245   if dtype in (tf.float32, tf.float16):
    246     value = (max_value-min_value)*np.random.random_sample(shape)+min_value
    247   elif dtype in (tf.int32, tf.uint8, tf.int64):
    248     value = np.random.randint(min_value, max_value+1, shape)
    249   return value.astype(dtype)
    250 
    251 
    252 def freeze_graph(session, outputs):
    253   """Freeze the current graph.
    254 
    255   Args:
    256     session: Tensorflow sessions containing the graph
    257     outputs: List of output tensors
    258 
    259   Returns:
    260     The frozen graph_def.
    261   """
    262   return tf_graph_util.convert_variables_to_constants(
    263       session, session.graph.as_graph_def(), [x.op.name for x in outputs])
    264 
    265 
    266 def make_control_dep_tests(zip_path):
    267   """Make a set of tests that use control dependencies."""
    268 
    269   test_parameters = [{
    270       "input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
    271   }]
    272 
    273   def build_graph(parameters):
    274     input_tensor = tf.placeholder(
    275         dtype=tf.float32, name="input", shape=parameters["input_shape"])
    276     filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
    277     assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
    278     with tf.control_dependencies([assert_op]):
    279       out = tf.nn.conv2d(input_tensor, filter_value,
    280                          strides=(1, 1, 1, 1), padding="SAME")
    281       return [input_tensor], [out]
    282 
    283   def build_inputs(parameters, sess, inputs, outputs):
    284     input_values = create_tensor_data(tf.float32, parameters["input_shape"])
    285     return [input_values], sess.run(
    286         outputs, feed_dict=dict(zip(inputs, [input_values])))
    287 
    288   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs,
    289                     drop_control_dependency=True)
    290 
    291 
    292 def toco_convert(graph_def_str, input_tensors, output_tensors,
    293                  drop_control_dependency=False):
    294   """Convert a model's graph def into a tflite model.
    295 
    296   NOTE: this currently shells out to the toco binary, but we would like
    297   convert to Python API tooling in the future.
    298 
    299   Args:
    300     graph_def_str: Graph def proto in serialized string format.
    301     input_tensors: List of input tensor tuples `(name, shape, type)`
    302     output_tensors: List of output tensors (names)
    303     drop_control_dependency: whether to ignore control dependency nodes.
    304 
    305   Returns:
    306     output tflite model, log_txt from conversion
    307     or None, log_txt if it did not convert properly.
    308   """
    309   data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors]
    310   opts = toco_options(
    311       data_types=data_types,
    312       input_arrays=[x[0] for x in input_tensors],
    313       shapes=[x[1] for x in input_tensors],
    314       output_arrays=output_tensors,
    315       drop_control_dependency=drop_control_dependency)
    316 
    317   with tempfile.NamedTemporaryFile() as graphdef_file, \
    318        tempfile.NamedTemporaryFile() as output_file, \
    319        tempfile.NamedTemporaryFile("w+") as stdout_file:
    320     graphdef_file.write(graph_def_str)
    321     graphdef_file.flush()
    322 
    323     # TODO(aselle): Switch this to subprocess at some point.
    324     cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" %
    325            (bin_path, graphdef_file.name, output_file.name, opts,
    326             stdout_file.name))
    327     exit_code = os.system(cmd)
    328     log = (
    329         cmd + "exited with code %d" % exit_code + "\n------------------\n" +
    330         stdout_file.read())
    331     return (None if exit_code != 0 else output_file.read()), log
    332 
    333 
    334 def normalize_output_name(output_name):
    335   """Remove :0 suffix from tensor names."""
    336   return output_name.split(":")[0] if output_name.endswith(
    337       ":0") else output_name
    338 
    339 
    340 def make_zip_of_tests(zip_path,
    341                       test_parameters,
    342                       make_graph,
    343                       make_test_inputs,
    344                       drop_control_dependency=False):
    345   """Helper to make a zip file of a bunch of TensorFlow models.
    346 
    347   This does a cartestian product of the dictionary of test_parameters and
    348   calls make_graph() for each item in the cartestian product set.
    349   If the graph is built successfully, then make_test_inputs() is called to
    350   build expected input/output value pairs. The model is then converted to tflite
    351   with toco, and the examples are serialized with the tflite model into a zip
    352   file (2 files per item in the cartesian product set).
    353 
    354   Args:
    355     zip_path: Path of zip file to write
    356     test_parameters: Dictionary mapping to lists for each parameter.
    357       e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
    358     make_graph: function that takes current parameters and returns tuple
    359       `[input1, input2, ...], [output1, output2, ...]`
    360     make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
    361       `output_tensors` and returns tuple `(input_values, output_values)`.
    362     drop_control_dependency: whether to ignore control dependency nodes.
    363   Raises:
    364     RuntimeError: if there are toco errors that can't be ignored.
    365   """
    366 
    367   # TODO(aselle): Make this allow multiple inputs outputs.
    368   archive = zipfile.PyZipFile(zip_path, "w")
    369   zip_manifest = []
    370   convert_report = []
    371   toco_errors = 0
    372   for parameters in test_parameters:
    373     keys = parameters.keys()
    374     for curr in itertools.product(*parameters.values()):
    375       label = zip_path.replace(".zip", "") + (",".join(
    376           "%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
    377       if label[0] == "/":
    378         label = label[1:]
    379       param_dict = dict(zip(keys, curr))
    380 
    381       def build_example(label, param_dict_real):
    382         """Build the model with parameter values set in param_dict_real.
    383 
    384         Args:
    385           label: Label of the model (i.e. the filename in the zip).
    386           param_dict_real: Parameter dictionary (arguments to the factories
    387             make_graph and make_test_inputs)
    388         Returns:
    389           (tflite_model_binary, report) where tflite_model_binary is the
    390           serialized flatbuffer as a string and report is a dictionary with
    391           keys `toco_log` (log of toco conversion), `tf_log` (log of tf
    392           conversion), `toco` (a string of success status of the conversion),
    393           `tf` (a string success status of the conversion).
    394         """
    395 
    396         np.random.seed(RANDOM_SEED)
    397         report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
    398 
    399         # Build graph
    400         report["tf_log"] = ""
    401         report["toco_log"] = ""
    402         tf.reset_default_graph()
    403 
    404         with tf.device("/cpu:0"):
    405           try:
    406             inputs, outputs = make_graph(param_dict_real)
    407           except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
    408                   ValueError):
    409             report["tf_log"] += traceback.format_exc()
    410             return None, report
    411 
    412         sess = tf.Session()
    413         try:
    414           baseline_inputs, baseline_outputs = (make_test_inputs(
    415               param_dict_real, sess, inputs, outputs))
    416         except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
    417                 ValueError):
    418           report["tf_log"] += traceback.format_exc()
    419           return None, report
    420         report["toco"] = report_lib.FAILED
    421         report["tf"] = report_lib.SUCCESS
    422 
    423         # Convert graph to toco
    424         tflite_model_binary, toco_log = toco_convert(
    425             sess.graph_def.SerializeToString(),
    426             [(input_tensor.name.split(":")[0], input_tensor.get_shape(),
    427               input_tensor.dtype) for input_tensor in inputs],
    428             [normalize_output_name(out.name) for out in outputs],
    429             drop_control_dependency)
    430         report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None
    431                           else report_lib.FAILED)
    432         report["toco_log"] = toco_log
    433 
    434         if FLAGS.save_graphdefs:
    435           archive.writestr(label + ".pb",
    436                            text_format.MessageToString(sess.graph_def),
    437                            zipfile.ZIP_DEFLATED)
    438 
    439         if tflite_model_binary:
    440           archive.writestr(label + ".bin", tflite_model_binary,
    441                            zipfile.ZIP_DEFLATED)
    442           example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
    443 
    444           example_fp = StringIO()
    445           write_examples(example_fp, [example])
    446           archive.writestr(label + ".inputs",
    447                            example_fp.getvalue(), zipfile.ZIP_DEFLATED)
    448 
    449           example_fp2 = StringIO()
    450           write_test_cases(example_fp2, label + ".bin", [example])
    451           archive.writestr(label + "_tests.txt",
    452                            example_fp2.getvalue(), zipfile.ZIP_DEFLATED)
    453 
    454           zip_manifest.append(label + "\n")
    455 
    456         return tflite_model_binary, report
    457 
    458       _, report = build_example(label, param_dict)
    459 
    460       if report["toco"] == report_lib.FAILED:
    461         ignore_error = False
    462         if not FLAGS.known_bugs_are_errors:
    463           for pattern, bug_number in KNOWN_BUGS.items():
    464             if re.search(pattern, label):
    465               print("Ignored TOCO error due to bug %s" % bug_number)
    466               ignore_error = True
    467         if not ignore_error:
    468           toco_errors += 1
    469           print("-----------------\ntoco error!\n%s\n-----------------\n" %
    470                 report["toco_log"])
    471 
    472       convert_report.append((param_dict, report))
    473   report_io = StringIO()
    474   report_lib.make_report_table(report_io, zip_path, convert_report)
    475   archive.writestr("report.html", report_io.getvalue())
    476 
    477   archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED)
    478 
    479   # Log statistics of what succeeded
    480   total_conversions = len(convert_report)
    481   tf_success = sum(1 for x in convert_report
    482                    if x[1]["tf"] == report_lib.SUCCESS)
    483   toco_success = sum(1 for x in convert_report
    484                      if x[1]["toco"] == report_lib.SUCCESS)
    485   percent = 0
    486   if tf_success > 0:
    487     percent = float(toco_success) / float(tf_success) * 100.
    488   tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
    489                    " and %d TOCO converted graphs (%.1f%%"), zip_path,
    490                   total_conversions, tf_success, toco_success, percent)
    491 
    492   if not FLAGS.ignore_toco_errors and toco_errors > 0:
    493     raise RuntimeError(
    494         "Found %d errors while generating toco models" % toco_errors)
    495 
    496 
    497 def make_pool_tests(pool_op_in):
    498   """Make a set of tests to do average pooling.
    499 
    500   Args:
    501     pool_op_in: TensorFlow pooling operation to test  i.e. `tf.nn.avg_pool`.
    502 
    503   Returns:
    504     A function representing the true generator (after curried pool_op_in).
    505   """
    506 
    507   pool_op = pool_op_in
    508 
    509   def f(zip_path):
    510     """Actual function that generates examples.
    511 
    512     Args:
    513       zip_path: path to write zip to.
    514     """
    515 
    516     # Chose a set of parameters
    517     test_parameters = [{
    518         "ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
    519         "strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
    520         # TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).
    521         "input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
    522         "padding": ["SAME", "VALID"],
    523         "data_format": ["NHWC"],  # TODO(aselle): NCHW  would be good
    524     }]
    525 
    526     def build_graph(parameters):
    527       input_tensor = tf.placeholder(
    528           dtype=tf.float32, name="input", shape=parameters["input_shape"])
    529       out = pool_op(
    530           input_tensor,
    531           ksize=parameters["ksize"],
    532           strides=parameters["strides"],
    533           data_format=parameters["data_format"],
    534           padding=parameters["padding"])
    535       return [input_tensor], [out]
    536 
    537     def build_inputs(parameters, sess, inputs, outputs):
    538       input_values = create_tensor_data(tf.float32, parameters["input_shape"])
    539       return [input_values], sess.run(
    540           outputs, feed_dict=dict(zip(inputs, [input_values])))
    541 
    542     make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    543   return f
    544 
    545 
    546 def make_relu_tests(zip_path):
    547   """Make a set of tests to do relu."""
    548 
    549   # Chose a set of parameters
    550   test_parameters = [{
    551       "input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
    552                       [3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
    553   }]
    554 
    555   def build_graph(parameters):
    556     input_tensor = tf.placeholder(
    557         dtype=tf.float32, name="input", shape=parameters["input_shape"])
    558     out = tf.nn.relu(input_tensor)
    559     return [input_tensor], [out]
    560 
    561   def build_inputs(parameters, sess, inputs, outputs):
    562     input_values = create_tensor_data(
    563         np.float32, parameters["input_shape"], min_value=-4, max_value=10)
    564     return [input_values], sess.run(
    565         outputs, feed_dict=dict(zip(inputs, [input_values])))
    566 
    567   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    568 
    569 
    570 def make_relu1_tests(zip_path):
    571   """Make a set of tests to do relu1."""
    572 
    573   # Chose a set of parameters
    574   test_parameters = [{
    575       "input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
    576                       [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
    577   }]
    578 
    579   def build_graph(parameters):
    580     input_tensor = tf.placeholder(
    581         dtype=tf.float32, name="input", shape=parameters["input_shape"])
    582     # Note that the following is not supported:
    583     #   out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))
    584     out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))
    585     return [input_tensor], [out]
    586 
    587   def build_inputs(parameters, sess, inputs, outputs):
    588     input_values = create_tensor_data(
    589         np.float32, parameters["input_shape"], min_value=-3, max_value=10)
    590     return [input_values], sess.run(
    591         outputs, feed_dict=dict(zip(inputs, [input_values])))
    592 
    593   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    594 
    595 
    596 def make_relu6_tests(zip_path):
    597   """Make a set of tests to do relu6."""
    598 
    599   # Chose a set of parameters
    600   test_parameters = [{
    601       "input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
    602                       [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
    603   }]
    604 
    605   def build_graph(parameters):
    606     input_tensor = tf.placeholder(
    607         dtype=tf.float32, name="input", shape=parameters["input_shape"])
    608     out = tf.nn.relu(input_tensor)
    609     return [input_tensor], [out]
    610 
    611   def build_inputs(parameters, sess, inputs, outputs):
    612     input_values = create_tensor_data(
    613         np.float32, parameters["input_shape"], min_value=-3, max_value=10)
    614     return [input_values], sess.run(
    615         outputs, feed_dict=dict(zip(inputs, [input_values])))
    616 
    617   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    618 
    619 
    620 # This function tests various TensorFLow functions that generates Const op,
    621 # including `tf.ones`, `tf.zeros` and random functions.
    622 def make_constant_tests(zip_path):
    623   """Make a set of tests to do constant ops."""
    624 
    625   test_parameters = [{
    626       "dtype": [tf.float32, tf.int32],
    627       "input_shape": [[1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],
    628   }]
    629 
    630   def build_graph(parameters):
    631     # Since Toco & Tflite can't have a single constant op in the entire graph,
    632     # this test adds a zero tensor with a constant op tensor.
    633     input1 = tf.placeholder(dtype=parameters["dtype"], name="input1",
    634                             shape=parameters["input_shape"])
    635     out = tf.ones(parameters["input_shape"], dtype=parameters["dtype"]) + input1
    636     return [input1], [out]
    637 
    638   def build_inputs(parameters, sess, inputs, outputs):
    639     input1 = np.zeros(parameters["input_shape"],
    640                       dtype=_TF_TYPE_INFO[parameters["dtype"]][0])
    641     return [input1], sess.run(outputs, feed_dict={inputs[0]: input1})
    642 
    643   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    644 
    645 
    646 def make_binary_op_tests(zip_path, binary_operator):
    647   """Make a set of tests to do add with and without broadcast."""
    648 
    649   # These parameters are split because we don't support broadcasting.
    650   test_parameters = [{
    651       "dtype": [tf.float32, tf.int32],
    652       "input_shape_1": [[1, 3, 4, 3]],
    653       "input_shape_2": [[1, 3, 4, 3]],
    654       "activation": [True]
    655   }, {
    656       "dtype": [tf.float32],
    657       "input_shape_1": [[5]],
    658       "input_shape_2": [[5]],
    659       "activation": [False, True]
    660   }, {
    661       "dtype": [tf.float32],
    662       "input_shape_1": [[1, 3, 4, 3]],
    663       "input_shape_2": [[3]],
    664       "activation": [True]
    665   }]
    666 
    667   def build_graph(parameters):
    668     """Builds the graph given the current parameters."""
    669     input1 = tf.placeholder(
    670         dtype=parameters["dtype"],
    671         name="input1",
    672         shape=parameters["input_shape_1"])
    673     input2 = tf.placeholder(
    674         dtype=parameters["dtype"],
    675         name="input2",
    676         shape=parameters["input_shape_2"])
    677     out = binary_operator(input1, input2)
    678     if parameters["activation"]:
    679       out = tf.nn.relu(out)
    680     return [input1, input2], [out]
    681 
    682   def build_inputs(parameters, sess, inputs, outputs):
    683     """Builds operand inputs for op."""
    684     input1 = create_tensor_data(parameters["dtype"],
    685                                 parameters["input_shape_1"])
    686     input2 = create_tensor_data(parameters["dtype"],
    687                                 parameters["input_shape_2"])
    688     return [input1, input2], sess.run(
    689         outputs, feed_dict={
    690             inputs[0]: input1,
    691             inputs[1]: input2
    692         })
    693 
    694   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    695 
    696 
    697 def make_mean_tests(zip_path):
    698   """Make a set of tests to do mean."""
    699 
    700   test_parameters = [{
    701       "input_dtype": [tf.float32, tf.int32, tf.int64],
    702       "input_shape": [[3, 2, 4]],
    703       "axis": [
    704           None, 0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
    705           [2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0],
    706           [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
    707       ],
    708       "const_axis": [True, False],
    709       "keep_dims": [True, False],
    710   }, {
    711       "input_dtype": [tf.float32, tf.int32, tf.int64],
    712       "input_shape": [[1, 224, 224, 3]],
    713       "axis": [
    714           None, 0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3],
    715           [3, 2, 1, 0], [3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2,
    716           -3, -4, [0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
    717           [2, 2, 3], [-3, -3, -4], [-3, 2, 1]
    718       ],
    719       "const_axis": [True, False],
    720       "keep_dims": [True, False],
    721   }]
    722 
    723   def build_graph(parameters):
    724     """Build the mean op testing graph."""
    725     input_tensor = tf.placeholder(
    726         dtype=parameters["input_dtype"],
    727         name="input",
    728         shape=parameters["input_shape"])
    729 
    730     # Get axis as either a placeholder or constants.
    731     if parameters["const_axis"]:
    732       axis = parameters["axis"]
    733       input_tensors = [input_tensor]
    734     else:
    735       if isinstance(parameters["axis"], list):
    736         shape = [len(parameters["axis"])]
    737       else:
    738         shape = [0]  # shape for None or integers.
    739       axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape)
    740       input_tensors = [input_tensor, axis]
    741 
    742     out = tf.reduce_mean(
    743         input_tensor, axis=axis, keep_dims=parameters["keep_dims"])
    744     return input_tensors, [out]
    745 
    746   def build_inputs(parameters, sess, inputs, outputs):
    747     values = [
    748         create_tensor_data(parameters["input_dtype"], parameters["input_shape"])
    749     ]
    750     if not parameters["const_axis"]:
    751       if parameters["axis"]:
    752         values.append(np.array(parameters["axis"]))
    753     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
    754 
    755   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    756 
    757 
    758 def make_exp_tests(zip_path):
    759   """Make a set of tests to do exp."""
    760 
    761   test_parameters = [{
    762       "input_dtype": [tf.float32],
    763       "input_shape": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
    764   }]
    765 
    766   def build_graph(parameters):
    767     """Build the exp op testing graph."""
    768     input_tensor = tf.placeholder(
    769         dtype=parameters["input_dtype"],
    770         name="input",
    771         shape=parameters["input_shape"])
    772 
    773     out = tf.exp(input_tensor)
    774     return [input_tensor], [out]
    775 
    776   def build_inputs(parameters, sess, inputs, outputs):
    777     values = [
    778         create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
    779                            min_value=-100, max_value=9)
    780     ]
    781     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
    782 
    783   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    784 
    785 
    786 def make_binary_op_tests_func(binary_operator):
    787   """Return a function that does a test on a binary operator."""
    788   return lambda zip_path: make_binary_op_tests(zip_path, binary_operator)
    789 
    790 
    791 def make_gather_tests(zip_path):
    792   """Make a set of tests to do gather."""
    793 
    794   test_parameters = [{
    795       # TODO(mgubin): add string tests when they are supported by Toco.
    796       # TODO(mgubin): add tests for Nd indices when they are supported by
    797       # TfLite.
    798       # TODO(mgubin): add tests for axis != 0 when it is supported by TfLite.
    799       "params_dtype": [tf.float32, tf.int32],
    800       "params_shape": [[10], [1, 2, 20]],
    801       "indices_dtype": [tf.int32],
    802       "indices_shape": [[3], [5]],
    803       "axis": [0],  # axis!=0 is GatherV2
    804   }]
    805 
    806   def build_graph(parameters):
    807     """Build the gather op testing graph."""
    808     params = tf.placeholder(
    809         dtype=parameters["params_dtype"],
    810         name="params",
    811         shape=parameters["params_shape"])
    812     indices = tf.placeholder(
    813         dtype=parameters["indices_dtype"],
    814         name="indices",
    815         shape=parameters["indices_shape"])
    816     out = tf.gather(params, indices, axis=parameters["axis"])
    817     return [params, indices], [out]
    818 
    819   def build_inputs(parameters, sess, inputs, outputs):
    820     params = create_tensor_data(parameters["params_dtype"],
    821                                 parameters["params_shape"])
    822     indices = create_tensor_data(parameters["indices_dtype"],
    823                                  parameters["indices_shape"], 0,
    824                                  parameters["params_shape"][0] - 1)
    825     return [params, indices], sess.run(
    826         outputs, feed_dict=dict(zip(inputs, [params, indices])))
    827 
    828   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    829 
    830 
    831 def make_global_batch_norm_tests(zip_path):
    832   """Make a set of tests to do batch_norm_with_global_normalization."""
    833 
    834   test_parameters = [{
    835       "dtype": [tf.float32],
    836       "input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]],
    837       "epsilon": [0.1, 0.0001],
    838       "scale_after": [True, False],
    839   }]
    840 
    841   def build_graph(parameters):
    842     """Build the global batch norm testing graph."""
    843     input_shape = parameters["input_shape"]
    844     scale_shape = input_shape[3]
    845 
    846     scale = create_tensor_data(parameters["dtype"], scale_shape)
    847     offset = create_tensor_data(parameters["dtype"], scale_shape)
    848     mean = create_tensor_data(parameters["dtype"], scale_shape)
    849     variance = create_tensor_data(parameters["dtype"], scale_shape)
    850 
    851     x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
    852     x_norm = tf.nn.batch_norm_with_global_normalization(
    853         x, mean, variance, scale, offset,
    854         parameters["epsilon"], parameters["scale_after"])
    855 
    856     input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
    857                                   shape=parameters["input_shape"])
    858     out = tf.add(input_tensor, x_norm)
    859     return [input_tensor], [out]
    860 
    861   def build_inputs(parameters, sess, inputs, outputs):
    862     input_value = create_tensor_data(parameters["dtype"],
    863                                      parameters["input_shape"])
    864     return [input_value], sess.run(
    865         outputs, feed_dict=dict(zip(inputs, [input_value])))
    866 
    867   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    868 
    869 
    870 def make_fused_batch_norm_tests(zip_path):
    871   """Make a set of tests to do fused_batch_norm."""
    872 
    873   test_parameters = [{
    874       "dtype": [tf.float32],
    875       "input_shape": [[1, 1, 6, 2]],
    876       "epsilon": [0.001, 0.1],
    877   }]
    878 
    879   def build_graph(parameters):
    880     """Build the testing graph for fused batch normalization."""
    881     input_shape = parameters["input_shape"]
    882     scale_shape = input_shape[3]
    883 
    884     scale = create_tensor_data(parameters["dtype"], scale_shape)
    885     offset = create_tensor_data(parameters["dtype"], scale_shape)
    886     mean = create_tensor_data(parameters["dtype"], scale_shape)
    887     variance = create_tensor_data(parameters["dtype"], scale_shape)
    888 
    889     x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
    890     [x_norm, _, _] = tf.nn.fused_batch_norm(
    891         x, scale, offset, mean, variance,
    892         parameters["epsilon"], data_format="NHWC", is_training=False)
    893 
    894     input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
    895                                   shape=parameters["input_shape"])
    896     out = tf.add(input_tensor, x_norm)
    897     return [input_tensor], [out]
    898 
    899   def build_inputs(parameters, sess, inputs, outputs):
    900     input_value = create_tensor_data(parameters["dtype"],
    901                                      parameters["input_shape"])
    902     return [input_value], sess.run(
    903         outputs, feed_dict=dict(zip(inputs, [input_value])))
    904 
    905   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    906 
    907 
    908 def make_conv_tests(zip_path):
    909   """Make a set of tests to do convolution."""
    910 
    911   test_parameters = [
    912       {
    913           "input_shape": [[1, 3, 4, 3]],
    914           "filter_shape": [[1, 1, 3, 2]],
    915           "strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
    916           "padding": ["SAME", "VALID"],
    917           "data_format": ["NHWC"],  # TODO(aselle): NCHW  would be good
    918           "constant_filter": [True, False],
    919       },
    920       {
    921           "input_shape": [[2, 14, 14, 2]],
    922           "filter_shape": [[6, 6, 2, 2]],
    923           "strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
    924           "padding": ["SAME", "VALID"],
    925           "data_format": ["NHWC"],  # TODO(aselle): NCHW  would be good
    926           "constant_filter": [True, False],
    927       }
    928   ]
    929 
    930   def build_graph(parameters):
    931     """Build a conv graph given `parameters`."""
    932     input_tensor = tf.placeholder(
    933         dtype=tf.float32, name="input", shape=parameters["input_shape"])
    934 
    935     # Get filter input either as a placeholder or constants. Also get a list of
    936     # the input tensors that are represented as placeholders.
    937     if parameters["constant_filter"]:
    938       filter_input = create_tensor_data(np.float32, parameters["filter_shape"])
    939       input_tensors = [input_tensor]
    940     else:
    941       filter_input = tf.placeholder(
    942           dtype=tf.float32, name="filter", shape=parameters["filter_shape"])
    943       input_tensors = [input_tensor, filter_input]
    944 
    945     out = tf.nn.conv2d(
    946         input_tensor,
    947         filter_input,
    948         strides=parameters["strides"],
    949         padding=parameters["padding"],
    950         data_format=parameters["data_format"])
    951     return input_tensors, [out]
    952 
    953   def build_inputs(parameters, sess, inputs, outputs):
    954     # Build list of input values either containing 1 tensor (input) or 2 tensors
    955     # (input, filter) based on whether filter is constant or variable input.
    956     values = [create_tensor_data(np.float32, parameters["input_shape"])]
    957     if not parameters["constant_filter"]:
    958       values.append(create_tensor_data(np.float32, parameters["filter_shape"]))
    959     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
    960 
    961   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
    962 
    963 
    964 def make_depthwiseconv_tests(zip_path):
    965   """Make a set of tests to do convolution."""
    966 
    967   # Tensorflow only supports equal strides
    968   test_parameters = [
    969       {
    970           "input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]],
    971           "filter_size": [[1, 1], [1, 2], [3, 3]],
    972           "strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
    973           "channel_multiplier": [1, 2],
    974           "rate": [[1, 1]],
    975           "padding": ["SAME", "VALID"],
    976           "data_format": ["NHWC"],
    977           "constant_filter": [True, False],
    978       },
    979       {
    980           "input_shape": [[1, 3, 4, 3]],
    981           "filter_size": [[1, 1]],
    982           "strides": [[1, 1, 2, 1]],  # TF needs [1, x, x, 1]
    983           "channel_multiplier": [2],
    984           "rate": [[2, 2]],  #  Only [1, 1] is supported
    985           "padding": ["SAME"],
    986           "data_format": ["NHWC"],
    987           "constant_filter": [True, False],
    988       }
    989   ]
    990 
    991   def get_tensor_shapes(parameters):
    992     input_shape = parameters["input_shape"]
    993     filter_size = parameters["filter_size"]
    994     filter_shape = filter_size + [
    995         input_shape[3], parameters["channel_multiplier"]
    996     ]
    997     return [input_shape, filter_shape]
    998 
    999   def build_graph(parameters):
   1000     """Build a depthwise conv graph given `parameters`."""
   1001     input_shape, filter_shape = get_tensor_shapes(parameters)
   1002     input_tensor = tf.placeholder(
   1003         dtype=tf.float32, name="input", shape=input_shape)
   1004 
   1005     # Get filter input either as a placeholder or constants. Also get a list of
   1006     # the input tensors that are represented as placeholders.
   1007     if parameters["constant_filter"]:
   1008       filter_input = create_tensor_data(np.float32, filter_shape)
   1009       input_tensors = [input_tensor]
   1010     else:
   1011       filter_input = tf.placeholder(
   1012           dtype=tf.float32, name="filter", shape=filter_shape)
   1013       input_tensors = [input_tensor, filter_input]
   1014 
   1015     out = tf.nn.depthwise_conv2d(
   1016         input_tensor,
   1017         filter_input,
   1018         strides=parameters["strides"],
   1019         rate=parameters["rate"],
   1020         padding=parameters["padding"],
   1021         data_format=parameters["data_format"])
   1022     return input_tensors, [out]
   1023 
   1024   def build_inputs(parameters, sess, inputs, outputs):
   1025     # Build list of input values either containing 1 tensor (input) or 2 tensors
   1026     # (input, filter) based on whether filter is constant or variable input.
   1027     input_shape, filter_shape = get_tensor_shapes(parameters)
   1028     values = [create_tensor_data(np.float32, input_shape)]
   1029     if not parameters["constant_filter"]:
   1030       values.append(create_tensor_data(np.float32, filter_shape))
   1031     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
   1032 
   1033   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1034 
   1035 
   1036 def make_split_tests(zip_path):
   1037   """Make a set of tests to do tf.split."""
   1038 
   1039   test_parameters = [{
   1040       "input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
   1041       "num_or_size_splits": [1, 2, 3, 4, 5, [2, 2]],
   1042       "axis": [0, 1, 2, 3, -4, -3, -2, -1],
   1043   }]
   1044 
   1045   def build_graph(parameters):
   1046     input_tensor = tf.placeholder(
   1047         dtype=tf.float32, name="input", shape=parameters["input_shape"])
   1048     out = tf.split(
   1049         input_tensor, parameters["num_or_size_splits"], parameters["axis"])
   1050     return [input_tensor], out
   1051 
   1052   def build_inputs(parameters, sess, inputs, outputs):
   1053     values = [create_tensor_data(np.float32, parameters["input_shape"])]
   1054     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
   1055 
   1056   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1057 
   1058 
   1059 def make_concatenation_tests(zip_path):
   1060   """Make a set of tests to do concatenation."""
   1061 
   1062   test_parameters = [{
   1063       "base_shape": [[1, 3, 4, 3], [3, 4]],
   1064       "num_tensors": [1, 2, 3, 4, 5, 6],
   1065       "axis": [0, 1, 2, 3, -3, -2, -1],
   1066   }]
   1067 
   1068   def get_shape(parameters, delta):
   1069     """Return a tweaked version of 'base_shape'."""
   1070     axis = parameters["axis"]
   1071     shape = parameters["base_shape"][:]
   1072     if axis < 0:
   1073       axis += len(shape)
   1074     if axis < len(shape):
   1075       shape[axis] += delta
   1076     return shape
   1077 
   1078   def build_graph(parameters):
   1079     all_tensors = []
   1080     for n in range(0, parameters["num_tensors"]):
   1081       input_tensor = tf.placeholder(dtype=tf.float32, name=("input%d" % n),
   1082                                     shape=get_shape(parameters, n))
   1083       all_tensors.append(input_tensor)
   1084     out = tf.concat(all_tensors, parameters["axis"])
   1085     return all_tensors, [out]
   1086 
   1087   def build_inputs(parameters, sess, inputs, outputs):
   1088     all_values = []
   1089     for n in range(0, parameters["num_tensors"]):
   1090       input_values = create_tensor_data(np.float32,
   1091                                         get_shape(parameters, n))
   1092       all_values.append(input_values)
   1093     return all_values, sess.run(
   1094         outputs, feed_dict=dict(zip(inputs, all_values)))
   1095 
   1096   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1097 
   1098 
   1099 def make_fully_connected_tests(zip_path):
   1100   """Make a set of tests to do fully_connected."""
   1101 
   1102   test_parameters = [{
   1103       "shape1": [[3, 3]],
   1104       "shape2": [[3, 3]],
   1105       "transpose_a": [True, False],
   1106       "transpose_b": [True, False],
   1107       "constant_filter": [True, False],
   1108   }, {
   1109       "shape1": [[4, 4], [1, 4], [4]],
   1110       "shape2": [[4, 4], [4, 1], [4]],
   1111       "transpose_a": [False],
   1112       "transpose_b": [False],
   1113       "constant_filter": [True, False],
   1114   }, {
   1115       "shape1": [[40, 37]],
   1116       "shape2": [[37, 40]],
   1117       "transpose_a": [False],
   1118       "transpose_b": [False],
   1119       "constant_filter": [True, False],
   1120   }]
   1121 
   1122   def build_graph(parameters):
   1123     """Build a matmul graph given `parameters`."""
   1124     input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1",
   1125                                    shape=parameters["shape1"])
   1126 
   1127     # Get input_tensor2 either as a placeholder or constants. Also get a list of
   1128     # the input tensors that are represented as placeholders.
   1129     if parameters["constant_filter"]:
   1130       input_tensor2 = create_tensor_data(np.float32, parameters["shape2"])
   1131       input_tensors = [input_tensor1]
   1132     else:
   1133       input_tensor2 = tf.placeholder(
   1134           dtype=tf.float32, name="input2", shape=parameters["shape2"])
   1135       input_tensors = [input_tensor1, input_tensor2]
   1136 
   1137     out = tf.matmul(input_tensor1, input_tensor2,
   1138                     transpose_a=parameters["transpose_a"],
   1139                     transpose_b=parameters["transpose_b"])
   1140     return input_tensors, [out]
   1141 
   1142   def build_inputs(parameters, sess, inputs, outputs):
   1143     # Build list of input values either containing 1 tensor (input_values1) or 2
   1144     # tensors (input_values1, input_values2) based on whether the second input
   1145     # is a constant or variable input.
   1146     values = [create_tensor_data(np.float32, shape=parameters["shape1"])]
   1147     if not parameters["constant_filter"]:
   1148       values.append(create_tensor_data(np.float32, parameters["shape2"]))
   1149     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
   1150 
   1151   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1152 
   1153 
   1154 def make_l2norm_tests(zip_path):
   1155   """Make a set of tests to do l2norm."""
   1156 
   1157   # Chose a set of parameters
   1158   test_parameters = [{
   1159       "input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
   1160                       [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
   1161       "dim": [0, 1, 2, 3, [2, 3], -2],
   1162       "epsilon": [None, 1e-12, 1e-3],
   1163   }]
   1164 
   1165   def build_graph(parameters):
   1166     input_tensor = tf.placeholder(
   1167         dtype=tf.float32, name="input", shape=parameters["input_shape"])
   1168     if parameters["epsilon"]:
   1169       out = tf.nn.l2_normalize(
   1170           input_tensor, parameters["dim"], epsilon=parameters["epsilon"])
   1171     else:
   1172       out = tf.nn.l2_normalize(input_tensor, parameters["dim"])
   1173     return [input_tensor], [out]
   1174 
   1175   def build_inputs(parameters, sess, inputs, outputs):
   1176     input_values = create_tensor_data(
   1177         np.float32, parameters["input_shape"], min_value=-4, max_value=10)
   1178     return [input_values], sess.run(
   1179         outputs, feed_dict=dict(zip(inputs, [input_values])))
   1180 
   1181   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1182 
   1183 
   1184 def make_local_response_norm_tests(zip_path):
   1185   """Make a set of tests to do local_response_norm."""
   1186 
   1187   # Chose a set of parameters
   1188   test_parameters = [{
   1189       "input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
   1190       "depth_radius": [None, 0, 1, 3, 4, 5],
   1191       "bias": [None, 0.1, 0.3, -0.1],
   1192       "alpha": [None, 1, 2, -3],
   1193       "beta": [None, 0.5, 0.25, 2],
   1194   }]
   1195 
   1196   def build_graph(parameters):
   1197     input_tensor = tf.placeholder(
   1198         dtype=tf.float32, name="input", shape=parameters["input_shape"])
   1199     out = tf.nn.local_response_normalization(
   1200         input_tensor, depth_radius=parameters["depth_radius"],
   1201         bias=parameters["bias"], alpha=parameters["alpha"],
   1202         beta=parameters["beta"])
   1203     return [input_tensor], [out]
   1204 
   1205   def build_inputs(parameters, sess, inputs, outputs):
   1206     input_values = create_tensor_data(
   1207         np.float32, parameters["input_shape"], min_value=-4, max_value=10)
   1208     return [input_values], sess.run(
   1209         outputs, feed_dict=dict(zip(inputs, [input_values])))
   1210 
   1211   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1212 
   1213 
   1214 def make_pad_tests(zip_path):
   1215   """Make a set of tests to do pad."""
   1216 
   1217   # TODO(nupurgarg): Add test for tf.uint8.
   1218   test_parameters = [
   1219       {
   1220           "dtype": [tf.int32, tf.int64, tf.float32],
   1221           "input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
   1222           "paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
   1223                                                           [0, 0], [2, 3]]],
   1224           "constant_paddings": [True, False],
   1225       },
   1226       # Non-4D use case.
   1227       {
   1228           "dtype": [tf.int32, tf.int64, tf.float32],
   1229           "input_shape": [[1, 2], [0, 1, 2]],
   1230           "paddings": [[[0, 1], [2, 3]]],
   1231           "constant_paddings": [True, False],
   1232       },
   1233   ]
   1234 
   1235   def build_graph(parameters):
   1236     """Build a pad graph given `parameters`."""
   1237     input_tensor = tf.placeholder(
   1238         dtype=parameters["dtype"],
   1239         name="input",
   1240         shape=parameters["input_shape"])
   1241 
   1242     # Get paddings as either a placeholder or constants.
   1243     if parameters["constant_paddings"]:
   1244       paddings = parameters["paddings"]
   1245       input_tensors = [input_tensor]
   1246     else:
   1247       shape = [len(parameters["paddings"]), 2]
   1248       paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
   1249       input_tensors = [input_tensor, paddings]
   1250 
   1251     out = tf.pad(input_tensor, paddings=paddings)
   1252     return input_tensors, [out]
   1253 
   1254   def build_inputs(parameters, sess, inputs, outputs):
   1255     values = [
   1256         create_tensor_data(parameters["dtype"], parameters["input_shape"])
   1257     ]
   1258     if not parameters["constant_paddings"]:
   1259       values.append(np.array(parameters["paddings"]))
   1260     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
   1261 
   1262   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1263 
   1264 
   1265 def make_reshape_tests(zip_path):
   1266   """Make a set of tests to do reshape."""
   1267 
   1268   # All shapes below are suitable for tensors with 420 elements.
   1269   test_parameters = [{
   1270       "dtype": [tf.float32, tf.int32],
   1271       "input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],
   1272       "output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]],
   1273   }]
   1274 
   1275   def build_graph(parameters):
   1276     input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
   1277                                   shape=parameters["input_shape"])
   1278     out = tf.reshape(input_tensor, shape=parameters["output_shape"])
   1279     return [input_tensor], [out]
   1280 
   1281   def build_inputs(parameters, sess, inputs, outputs):
   1282     input_values = create_tensor_data(parameters["dtype"],
   1283                                       parameters["input_shape"])
   1284     return [input_values], sess.run(
   1285         outputs, feed_dict=dict(zip(inputs, [input_values])))
   1286 
   1287   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1288 
   1289 
   1290 def make_resize_bilinear_tests(zip_path):
   1291   """Make a set of tests to do resize_bilinear."""
   1292 
   1293   test_parameters = [{
   1294       "dtype": [tf.float32, tf.int32],
   1295       "input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
   1296       "size": [[1, 1], [4, 3], [2, 2], [5, 6]],
   1297       "align_corners": [None, True, False],
   1298   }]
   1299 
   1300   def build_graph(parameters):
   1301     input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
   1302                                   shape=parameters["input_shape"])
   1303     out = tf.image.resize_bilinear(input_tensor, size=parameters["size"],
   1304                                    align_corners=parameters["align_corners"])
   1305     return [input_tensor], [out]
   1306 
   1307   def build_inputs(parameters, sess, inputs, outputs):
   1308     input_values = create_tensor_data(parameters["dtype"],
   1309                                       parameters["input_shape"])
   1310     return [input_values], sess.run(
   1311         outputs, feed_dict=dict(zip(inputs, [input_values])))
   1312 
   1313   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1314 
   1315 
   1316 def make_sigmoid_tests(zip_path):
   1317   """Make a set of tests to do sigmoid."""
   1318 
   1319   test_parameters = [{
   1320       "dtype": [tf.float32],
   1321       "input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],
   1322   }]
   1323 
   1324   def build_graph(parameters):
   1325     input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
   1326                                   shape=parameters["input_shape"])
   1327     out = tf.sigmoid(input_tensor)
   1328     return [input_tensor], [out]
   1329 
   1330   def build_inputs(parameters, sess, inputs, outputs):
   1331     input_values = create_tensor_data(parameters["dtype"],
   1332                                       parameters["input_shape"])
   1333     return [input_values], sess.run(
   1334         outputs, feed_dict=dict(zip(inputs, [input_values])))
   1335 
   1336   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1337 
   1338 
   1339 def make_softmax_tests(zip_path):
   1340   """Make a set of tests to do softmax."""
   1341 
   1342   test_parameters = [{
   1343       "dtype": [tf.float32],
   1344       "input_shape": [[1, 3, 4, 3], [2, 3]],
   1345       "dim": [-1, 0],
   1346   }, {
   1347       "dtype": [tf.float32],
   1348       "input_shape": [[4, 7]],
   1349       "dim": [-1, 1],
   1350   }]
   1351 
   1352   def build_graph(parameters):
   1353     input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
   1354                                   shape=parameters["input_shape"])
   1355     out = tf.nn.softmax(input_tensor, dim=parameters["dim"])
   1356     return [input_tensor], [out]
   1357 
   1358   def build_inputs(parameters, sess, inputs, outputs):
   1359     input_values = create_tensor_data(parameters["dtype"],
   1360                                       parameters["input_shape"])
   1361     return [input_values], sess.run(
   1362         outputs, feed_dict=dict(zip(inputs, [input_values])))
   1363 
   1364   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1365 
   1366 
   1367 def make_space_to_depth_tests(zip_path):
   1368   """Make a set of tests to do space_to_depth."""
   1369 
   1370   test_parameters = [{
   1371       "dtype": [tf.float32, tf.float16, tf.int32, tf.uint8, tf.int64],
   1372       "input_shape": [[2, 12, 24, 1]],
   1373       "block_size": [2, 3, 4],
   1374   }]
   1375 
   1376   def build_graph(parameters):
   1377     input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
   1378                                   shape=parameters["input_shape"])
   1379     out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"])
   1380     return [input_tensor], [out]
   1381 
   1382   def build_inputs(parameters, sess, inputs, outputs):
   1383     input_values = create_tensor_data(parameters["dtype"],
   1384                                       parameters["input_shape"])
   1385     return [input_values], sess.run(
   1386         outputs, feed_dict=dict(zip(inputs, [input_values])))
   1387 
   1388   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1389 
   1390 
   1391 def make_space_to_batch_nd_tests(zip_path):
   1392   """Make a set of tests to do space_to_batch_nd."""
   1393 
   1394   # TODO(nupurgarg): Add test for uint8.
   1395   test_parameters = [
   1396       {
   1397           "dtype": [tf.int32, tf.int64, tf.float32],
   1398           "input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]],
   1399           "block_shape": [[1, 3], [2, 2]],
   1400           "paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],
   1401           "constant_block_shape": [True, False],
   1402           "constant_paddings": [True, False],
   1403       },
   1404       {
   1405           "dtype": [tf.float32],
   1406           "input_shape": [[2, 3, 7, 3]],
   1407           "block_shape": [[1, 3], [2, 2]],
   1408           "paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],
   1409           "constant_block_shape": [True, False],
   1410           "constant_paddings": [True, False],
   1411       },
   1412       # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
   1413       {
   1414           "dtype": [tf.float32],
   1415           "input_shape": [[1, 4, 4, 4, 1, 1]],
   1416           "block_shape": [[2, 2, 2]],
   1417           "paddings": [[[0, 0], [0, 0], [0, 0]]],
   1418           "constant_block_shape": [True, False],
   1419           "constant_paddings": [True, False],
   1420       },
   1421   ]
   1422 
   1423   def build_graph(parameters):
   1424     """Build a space_to_batch graph given `parameters`."""
   1425     input_tensor = tf.placeholder(
   1426         dtype=parameters["dtype"],
   1427         name="input",
   1428         shape=parameters["input_shape"])
   1429     input_tensors = [input_tensor]
   1430 
   1431     # Get block_shape either as a const or as a placeholder (tensor).
   1432     if parameters["constant_block_shape"]:
   1433       block_shape = parameters["block_shape"]
   1434     else:
   1435       shape = [len(parameters["block_shape"])]
   1436       block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
   1437       input_tensors.append(block_shape)
   1438 
   1439     # Get paddings either as a const or as a placeholder (tensor).
   1440     if parameters["constant_paddings"]:
   1441       paddings = parameters["paddings"]
   1442     else:
   1443       shape = [len(parameters["paddings"]), 2]
   1444       paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape)
   1445       input_tensors.append(paddings)
   1446 
   1447     out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)
   1448     return input_tensors, [out]
   1449 
   1450   def build_inputs(parameters, sess, inputs, outputs):
   1451     values = [
   1452         create_tensor_data(parameters["dtype"], parameters["input_shape"])
   1453     ]
   1454     if not parameters["constant_block_shape"]:
   1455       values.append(np.array(parameters["block_shape"]))
   1456     if not parameters["constant_paddings"]:
   1457       values.append(np.array(parameters["paddings"]))
   1458     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
   1459 
   1460   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1461 
   1462 
   1463 def make_batch_to_space_nd_tests(zip_path):
   1464   """Make a set of tests to do batch_to_space_nd."""
   1465 
   1466   test_parameters = [
   1467       {
   1468           "dtype": [tf.float32, tf.int64, tf.int32],
   1469           "input_shape": [[12, 2, 2, 1]],
   1470           "block_shape": [[1, 4], [2, 2], [3, 4]],
   1471           "crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
   1472           "constant_block_shape": [True, False],
   1473           "constant_crops": [True, False],
   1474       },
   1475       # Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
   1476       {
   1477           "dtype": [tf.float32],
   1478           "input_shape": [[8, 2, 2, 2, 1, 1]],
   1479           "block_shape": [[2, 2, 2]],
   1480           "crops": [[[0, 0], [0, 0], [0, 0]]],
   1481           "constant_block_shape": [True, False],
   1482           "constant_crops": [True, False],
   1483       },
   1484   ]
   1485 
   1486   def build_graph(parameters):
   1487     """Build a batch_to_space graph given `parameters`."""
   1488     input_tensor = tf.placeholder(
   1489         dtype=parameters["dtype"],
   1490         name="input",
   1491         shape=parameters["input_shape"])
   1492     input_tensors = [input_tensor]
   1493 
   1494     # Get block_shape either as a const or as a placeholder (tensor).
   1495     if parameters["constant_block_shape"]:
   1496       block_shape = parameters["block_shape"]
   1497     else:
   1498       shape = [len(parameters["block_shape"])]
   1499       block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
   1500       input_tensors.append(block_shape)
   1501 
   1502     # Get crops either as a const or as a placeholder (tensor).
   1503     if parameters["constant_crops"]:
   1504       crops = parameters["crops"]
   1505     else:
   1506       shape = [len(parameters["crops"]), 2]
   1507       crops = tf.placeholder(dtype=tf.int32, name="crops", shape=shape)
   1508       input_tensors.append(crops)
   1509 
   1510     out = tf.batch_to_space_nd(input_tensor, block_shape, crops)
   1511     return input_tensors, [out]
   1512 
   1513   def build_inputs(parameters, sess, inputs, outputs):
   1514     values = [
   1515         create_tensor_data(parameters["dtype"], parameters["input_shape"])
   1516     ]
   1517     if not parameters["constant_block_shape"]:
   1518       values.append(np.array(parameters["block_shape"]))
   1519     if not parameters["constant_crops"]:
   1520       values.append(np.array(parameters["crops"]))
   1521     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
   1522 
   1523   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1524 
   1525 
   1526 def make_transpose_tests(zip_path):
   1527   """Make a set of tests to do transpose."""
   1528 
   1529   # TODO(nupurgarg): Add test for uint8.
   1530   test_parameters = [{
   1531       "dtype": [tf.int32, tf.int64, tf.float32],
   1532       "input_shape": [[2, 2, 3]],
   1533       "perm": [[0, 1, 2], [0, 2, 1]],
   1534       "constant_perm": [True, False],
   1535   }, {
   1536       "dtype": [tf.float32],
   1537       "input_shape": [[1, 2, 3, 4]],
   1538       "perm": [[0, 1, 2, 3], [3, 0, 1, 2]],
   1539       "constant_perm": [True, False],
   1540   }, {
   1541       "dtype": [tf.float32],
   1542       "input_shape": [[1, 2, 3, 4, 5]],
   1543       "perm": [[0, 1, 2, 3, 4]],
   1544       "constant_perm": [True, False],
   1545   }]
   1546 
   1547   def build_graph(parameters):
   1548     """Build a transpose graph given `parameters`."""
   1549     input_tensor = tf.placeholder(
   1550         dtype=parameters["dtype"],
   1551         name="input",
   1552         shape=parameters["input_shape"])
   1553 
   1554     if parameters["constant_perm"]:
   1555       perm = parameters["perm"]
   1556       input_tensors = [input_tensor]
   1557     else:
   1558       shape = [len(parameters["perm"]), 2]
   1559       perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape)
   1560       input_tensors = [input_tensor, perm]
   1561 
   1562     out = tf.transpose(input_tensor, perm=perm)
   1563     return input_tensors, [out]
   1564 
   1565   def build_inputs(parameters, sess, inputs, outputs):
   1566     values = [
   1567         create_tensor_data(parameters["dtype"], parameters["input_shape"])
   1568     ]
   1569     if not parameters["constant_perm"]:
   1570       values.append(np.array(parameters["perm"]))
   1571     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
   1572 
   1573   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1574 
   1575 
   1576 def make_squeeze_tests(zip_path):
   1577   """Make a set of tests to do squeeze."""
   1578 
   1579   test_parameters = [{
   1580       "dtype": [tf.int32, tf.float32, tf.int64],
   1581       "input_shape": [[1, 2, 1, 3, 1, 4, 1, 1]],
   1582       "axis": [
   1583           None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2],
   1584           [-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6],
   1585           [0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5]
   1586       ],
   1587   }, {
   1588       "dtype": [tf.int32, tf.float32, tf.int64],
   1589       "input_shape": [[1]],
   1590       "axis": [None, [], [0], [-1]],
   1591   }, {
   1592       "dtype": [tf.int32, tf.float32, tf.int64],
   1593       "input_shape": [[1, 1, 1, 1, 1]],
   1594       "axis": [None, [], [0], [3, 0], [-2, 0, 3, 2]],
   1595   }]
   1596 
   1597   def build_graph(parameters):
   1598     input_tensor = tf.placeholder(
   1599         dtype=parameters["dtype"],
   1600         name="input",
   1601         shape=parameters["input_shape"])
   1602     out = tf.squeeze(input_tensor, axis=parameters["axis"])
   1603     return [input_tensor], [out]
   1604 
   1605   def build_inputs(parameters, sess, inputs, outputs):
   1606     input_values = create_tensor_data(parameters["dtype"],
   1607                                       parameters["input_shape"])
   1608     return [input_values], sess.run(
   1609         outputs, feed_dict=dict(zip(inputs, [input_values])))
   1610 
   1611   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1612 
   1613 
   1614 def make_strided_slice_tests(zip_path):
   1615   """Make a set of tests to do strided_slice."""
   1616 
   1617   # TODO(soroosh): add test/support for uint8.
   1618   test_parameters = [
   1619       # 4-D
   1620       {
   1621           "dtype": [tf.float32, tf.int32, tf.int64],
   1622           "index_type": [tf.int32],
   1623           "input_shape": [[12, 2, 2, 5]],
   1624           "begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
   1625           "end": [[8, 2, 2, 3], [12, 2, 2, 5]],
   1626           "strides": [None, [2, 1, 3, 1]],
   1627           "begin_mask": [None, 1, 8],
   1628           "end_mask": [None, 1, 8],
   1629           "shrink_axis_mask": [None, 1, 8, 11, 15, -1],
   1630           "constant_indices": [False, True],
   1631       },
   1632       #
   1633       {
   1634           "dtype": [tf.float32],
   1635           "index_type": [tf.int32],
   1636           "input_shape": [[12, 2, 2, 5]],
   1637           "begin": [[0]],
   1638           "end": [[1]],
   1639           "strides": [[1]],
   1640           "begin_mask": [0],
   1641           "end_mask": [0],
   1642           "shrink_axis_mask": [1],
   1643           "constant_indices": [True],
   1644       },
   1645       # 2-D
   1646       {
   1647           "dtype": [tf.float32, tf.int32, tf.int64],
   1648           "index_type": [tf.int32],
   1649           "input_shape": [[2, 3]],
   1650           "begin": [[0, 0], [1, 0]],
   1651           "end": [[2, 3], [2, 2]],
   1652           "strides": [None, [2, 2]],
   1653           "begin_mask": [None, 1, 2],
   1654           "end_mask": [None, 1, 2],
   1655           "shrink_axis_mask": [None, 1, 2, 3, -1],
   1656           "constant_indices": [False, True],
   1657       },
   1658       # Negative strides
   1659       {
   1660           "dtype": [tf.float32],
   1661           "index_type": [tf.int32],
   1662           "input_shape": [[2, 3]],
   1663           "begin": [[0, -1]],
   1664           "end": [[2, -3]],
   1665           "strides": [[1, -1]],
   1666           "begin_mask": [None, 1, 2],
   1667           "end_mask": [None, 1, 2],
   1668           "shrink_axis_mask": [None, 1, 2, 3, -1],
   1669           "constant_indices": [False],
   1670       },
   1671   ]
   1672 
   1673   def build_graph(parameters):
   1674     """Build graph for stride_slice test."""
   1675     input_tensor = tf.placeholder(
   1676         dtype=parameters["dtype"],
   1677         name="input",
   1678         shape=parameters["input_shape"])
   1679     if parameters["constant_indices"]:
   1680       begin = parameters["begin"]
   1681       end = parameters["end"]
   1682       strides = parameters["strides"]
   1683       tensors = [input_tensor]
   1684     else:
   1685       begin = tf.placeholder(
   1686           dtype=parameters["index_type"],
   1687           name="begin",
   1688           shape=[len(parameters["input_shape"])])
   1689       end = tf.placeholder(
   1690           dtype=parameters["index_type"],
   1691           name="end",
   1692           shape=[len(parameters["input_shape"])])
   1693       strides = (
   1694           tf.placeholder(
   1695               dtype=parameters["index_type"],
   1696               name="strides",
   1697               shape=[len(parameters["input_shape"])])
   1698           if parameters["strides"] is not None else None)
   1699       tensors = [input_tensor, begin, end]
   1700       if strides is not None:
   1701         tensors.append(strides)
   1702     out = tf.strided_slice(
   1703         input_tensor,
   1704         begin,
   1705         end,
   1706         strides,
   1707         begin_mask=parameters["begin_mask"],
   1708         end_mask=parameters["end_mask"])
   1709     return tensors, [out]
   1710 
   1711   def build_inputs(parameters, sess, inputs, outputs):
   1712     """Build inputs for stride_slice test."""
   1713     input_values = create_tensor_data(parameters["dtype"],
   1714                                       parameters["input_shape"])
   1715     index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
   1716     values = [input_values]
   1717     if not parameters["constant_indices"]:
   1718       begin_values = np.array(parameters["begin"]).astype(index_type)
   1719       end_values = np.array(parameters["end"]).astype(index_type)
   1720       stride_values = (
   1721           np.array(parameters["strides"]).astype(index_type)
   1722           if parameters["strides"] is not None else None)
   1723       values.append(begin_values)
   1724       values.append(end_values)
   1725       if stride_values is not None:
   1726         values.append(stride_values)
   1727 
   1728     return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
   1729 
   1730   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1731 
   1732 
   1733 def make_l2_pool(input_tensor, ksize, strides, padding, data_format):
   1734   """Given an input perform a sequence of TensorFlow ops to produce l2pool."""
   1735   return tf.sqrt(tf.nn.avg_pool(
   1736       tf.square(input_tensor), ksize=ksize, strides=strides,
   1737       padding=padding, data_format=data_format))
   1738 
   1739 
   1740 def make_topk_tests(zip_path):
   1741   """Make a set of tests to do gather."""
   1742 
   1743   test_parameters = [{
   1744       "input_dtype": [tf.float32, tf.int32],
   1745       "input_shape": [[10], [5, 20]],
   1746   }]
   1747 
   1748   def build_graph(parameters):
   1749     """Build the gather op testing graph."""
   1750     input_value = tf.placeholder(
   1751         dtype=parameters["input_dtype"],
   1752         name="input",
   1753         shape=parameters["input_shape"])
   1754     k = tf.constant(3, name="k")
   1755     out = tf.nn.top_k(input_value, k)
   1756     return [input_value], [out[1]]
   1757 
   1758   def build_inputs(parameters, sess, inputs, outputs):
   1759     input_value = create_tensor_data(parameters["input_dtype"],
   1760                                      parameters["input_shape"])
   1761     return [input_value], sess.run(
   1762         outputs, feed_dict=dict(zip(inputs, [input_value])))
   1763 
   1764   make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
   1765 
   1766 # Toco binary path provided by the generate rule.
   1767 bin_path = None
   1768 
   1769 
   1770 def main(unused_args):
   1771   global bin_path
   1772   def mkdir_if_not_exist(x):
   1773     if not os.path.isdir(x):
   1774       os.mkdir(x)
   1775       if not os.path.isdir(x):
   1776         raise RuntimeError("Failed to create dir %r" % x)
   1777 
   1778   if FLAGS.type == "zipped":
   1779     opstest_path = os.path.join(FLAGS.output_path)
   1780     mkdir_if_not_exist(opstest_path)
   1781     def _path(filename):
   1782       return os.path.join(opstest_path, filename)
   1783 
   1784     dispatch = {
   1785         "control_dep.zip": make_control_dep_tests,
   1786         "add.zip": make_binary_op_tests_func(tf.add),
   1787         "space_to_batch_nd.zip": make_space_to_batch_nd_tests,
   1788         "div.zip": make_binary_op_tests_func(tf.div),
   1789         "sub.zip": make_binary_op_tests_func(tf.subtract),
   1790         "batch_to_space_nd.zip": make_batch_to_space_nd_tests,
   1791         "conv.zip": make_conv_tests,
   1792         "constant.zip": make_constant_tests,
   1793         "depthwiseconv.zip": make_depthwiseconv_tests,
   1794         "concat.zip": make_concatenation_tests,
   1795         "fully_connected.zip": make_fully_connected_tests,
   1796         "global_batch_norm.zip": make_global_batch_norm_tests,
   1797         "gather.zip": make_gather_tests,
   1798         "fused_batch_norm.zip": make_fused_batch_norm_tests,
   1799         "l2norm.zip": make_l2norm_tests,
   1800         "local_response_norm.zip": make_local_response_norm_tests,
   1801         "mul.zip": make_binary_op_tests_func(tf.multiply),
   1802         "relu.zip": make_relu_tests,
   1803         "relu1.zip": make_relu1_tests,
   1804         "relu6.zip": make_relu6_tests,
   1805         "l2_pool.zip": make_pool_tests(make_l2_pool),
   1806         "avg_pool.zip": make_pool_tests(tf.nn.avg_pool),
   1807         "max_pool.zip": make_pool_tests(tf.nn.max_pool),
   1808         "pad.zip": make_pad_tests,
   1809         "reshape.zip": make_reshape_tests,
   1810         "resize_bilinear.zip": make_resize_bilinear_tests,
   1811         "sigmoid.zip": make_sigmoid_tests,
   1812         "softmax.zip": make_softmax_tests,
   1813         "space_to_depth.zip": make_space_to_depth_tests,
   1814         "topk.zip": make_topk_tests,
   1815         "split.zip": make_split_tests,
   1816         "transpose.zip": make_transpose_tests,
   1817         "mean.zip": make_mean_tests,
   1818         "squeeze.zip": make_squeeze_tests,
   1819         "strided_slice.zip": make_strided_slice_tests,
   1820         "exp.zip": make_exp_tests,
   1821     }
   1822     out = FLAGS.zip_to_output
   1823     bin_path = FLAGS.toco
   1824     if out in dispatch:
   1825       dispatch[out](_path(out))
   1826     else:
   1827       raise RuntimeError("Invalid zip to output %r" % out)
   1828 
   1829   else:
   1830     raise RuntimeError("Invalid argument for type of generation.")
   1831 
   1832 
   1833 if __name__ == "__main__":
   1834   FLAGS, unparsed = parser.parse_known_args()
   1835 
   1836   if unparsed:
   1837     print("Usage: %s <path out> zipped <zip file to generate>")
   1838   else:
   1839     tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
   1840