Home | History | Annotate | Download | only in op
      1 // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //     http://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 // DO NOT EDIT
     16 // This file was machine generated by github.com/tensorflow/tensorflow/tensorflow/go/genop/internal
     17 //
     18 // WARNING: This generation of wrapper function for TensorFlow ops is in an
     19 // experimental state. The generated API can change without notice.
     20 
     21 package op
     22 
     23 import tf "github.com/tensorflow/tensorflow/tensorflow/go"
     24 
     25 // optionalAttr is an intentionally un-exported type to hide
     26 // details of how optional attributes to operations are implemented.
     27 type optionalAttr map[string]interface{}
     28 
     29 func makeOutputList(op *tf.Operation, start int, output string) ([]tf.Output, int, error) {
     30 	size, err := op.OutputListSize(output)
     31 	if err != nil {
     32 		return nil, start, err
     33 	}
     34 	list := make([]tf.Output, size)
     35 	for i := 0; i < size; i++ {
     36 		list[i] = op.Output(start + i)
     37 	}
     38 	return list, start + size, nil
     39 }
     40 
     41 // WriteImageSummaryAttr is an optional argument to WriteImageSummary.
     42 type WriteImageSummaryAttr func(optionalAttr)
     43 
     44 // WriteImageSummaryMaxImages sets the optional max_images attribute to value.
     45 //
     46 // value: Max number of batch elements to generate images for.
     47 // If not specified, defaults to 3
     48 //
     49 // REQUIRES: value >= 1
     50 func WriteImageSummaryMaxImages(value int64) WriteImageSummaryAttr {
     51 	return func(m optionalAttr) {
     52 		m["max_images"] = value
     53 	}
     54 }
     55 
     56 // Writes a `Summary` protocol buffer with images.
     57 //
     58 // The summary has up to `max_images` summary values containing images. The
     59 // images are built from `tensor` which must be 4-D with shape `[batch_size,
     60 // height, width, channels]` and where `channels` can be:
     61 //
     62 // *  1: `tensor` is interpreted as Grayscale.
     63 // *  3: `tensor` is interpreted as RGB.
     64 // *  4: `tensor` is interpreted as RGBA.
     65 //
     66 // The images have the same number of channels as the input tensor. For float
     67 // input, the values are normalized one image at a time to fit in the range
     68 // `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
     69 // normalization algorithms:
     70 //
     71 // *  If the input values are all positive, they are rescaled so the largest one
     72 //    is 255.
     73 //
     74 // *  If any input value is negative, the values are shifted so input value 0.0
     75 //    is at 127.  They are then rescaled so that either the smallest value is 0,
     76 //    or the largest one is 255.
     77 //
     78 // The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
     79 // build the `tag` of the summary values:
     80 //
     81 // *  If `max_images` is 1, the summary value tag is '*tag*/image'.
     82 // *  If `max_images` is greater than 1, the summary value tags are
     83 //    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
     84 //
     85 // The `bad_color` argument is the color to use in the generated images for
     86 // non-finite input values.  It is a `unit8` 1-D tensor of length `channels`.
     87 // Each element must be in the range `[0, 255]` (It represents the value of a
     88 // pixel in the output image).  Non-finite values in the input tensor are
     89 // replaced by this tensor in the output image.  The default value is the color
     90 // red.
     91 //
     92 // Arguments:
     93 //	writer: A handle to a summary writer.
     94 //	step: The step to write the summary for.
     95 //	tag: Scalar. Used to build the `tag` attribute of the summary values.
     96 //	tensor: 4-D of shape `[batch_size, height, width, channels]` where
     97 // `channels` is 1, 3, or 4.
     98 //	bad_color: Color to use for pixels with non-finite values.
     99 //
    100 // Returns the created operation.
    101 func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, bad_color tf.Output, optional ...WriteImageSummaryAttr) (o *tf.Operation) {
    102 	if scope.Err() != nil {
    103 		return
    104 	}
    105 	attrs := map[string]interface{}{}
    106 	for _, a := range optional {
    107 		a(attrs)
    108 	}
    109 	opspec := tf.OpSpec{
    110 		Type: "WriteImageSummary",
    111 		Input: []tf.Input{
    112 			writer, step, tag, tensor, bad_color,
    113 		},
    114 		Attrs: attrs,
    115 	}
    116 	return scope.AddOperation(opspec)
    117 }
    118 
    119 // Outputs a `tf.Event` protocol buffer.
    120 //
    121 // When CreateSummaryDbWriter is being used, this op can be useful for
    122 // importing data from event logs.
    123 //
    124 // Arguments:
    125 //	writer: A handle to a summary writer.
    126 //	event: A string containing a binary-encoded tf.Event proto.
    127 //
    128 // Returns the created operation.
    129 func ImportEvent(scope *Scope, writer tf.Output, event tf.Output) (o *tf.Operation) {
    130 	if scope.Err() != nil {
    131 		return
    132 	}
    133 	opspec := tf.OpSpec{
    134 		Type: "ImportEvent",
    135 		Input: []tf.Input{
    136 			writer, event,
    137 		},
    138 	}
    139 	return scope.AddOperation(opspec)
    140 }
    141 
    142 // Outputs a `Summary` protocol buffer with a tensor.
    143 //
    144 // Arguments:
    145 //	writer: A handle to a summary writer.
    146 //	step: The step to write the summary for.
    147 //	tensor: A tensor to serialize.
    148 //	tag: The summary's tag.
    149 //	summary_metadata: Serialized SummaryMetadata protocol buffer containing
    150 // plugin-related metadata for this summary.
    151 //
    152 // Returns the created operation.
    153 func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, tag tf.Output, summary_metadata tf.Output) (o *tf.Operation) {
    154 	if scope.Err() != nil {
    155 		return
    156 	}
    157 	opspec := tf.OpSpec{
    158 		Type: "WriteSummary",
    159 		Input: []tf.Input{
    160 			writer, step, tensor, tag, summary_metadata,
    161 		},
    162 	}
    163 	return scope.AddOperation(opspec)
    164 }
    165 
    166 // Creates summary database writer accessible by given resource handle.
    167 //
    168 // This can be used to write tensors from the execution graph directly
    169 // to a database. Only SQLite is supported right now. This function
    170 // will create the schema if it doesn't exist. Entries in the Users,
    171 // Experiments, and Runs tables will be created automatically if they
    172 // don't already exist.
    173 //
    174 // Arguments:
    175 //	writer: Handle to SummaryWriter resource to overwrite.
    176 //	db_uri: For example "file:/tmp/foo.sqlite".
    177 //	experiment_name: Can't contain ASCII control characters or <>. Case
    178 // sensitive. If empty, then the Run will not be associated with any
    179 // Experiment.
    180 //	run_name: Can't contain ASCII control characters or <>. Case sensitive.
    181 // If empty, then each Tag will not be associated with any Run.
    182 //	user_name: Must be valid as both a DNS label and Linux username. If
    183 // empty, then the Experiment will not be associated with any User.
    184 //
    185 // Returns the created operation.
    186 func CreateSummaryDbWriter(scope *Scope, writer tf.Output, db_uri tf.Output, experiment_name tf.Output, run_name tf.Output, user_name tf.Output) (o *tf.Operation) {
    187 	if scope.Err() != nil {
    188 		return
    189 	}
    190 	opspec := tf.OpSpec{
    191 		Type: "CreateSummaryDbWriter",
    192 		Input: []tf.Input{
    193 			writer, db_uri, experiment_name, run_name, user_name,
    194 		},
    195 	}
    196 	return scope.AddOperation(opspec)
    197 }
    198 
    199 // Creates a summary file writer accessible by the given resource handle.
    200 //
    201 // Arguments:
    202 //	writer: A handle to the summary writer resource
    203 //	logdir: Directory where the event file will be written.
    204 //	max_queue: Size of the queue of pending events and summaries.
    205 //	flush_millis: How often, in milliseconds, to flush the pending events and
    206 // summaries to disk.
    207 //	filename_suffix: Every event file's name is suffixed with this suffix.
    208 //
    209 // Returns the created operation.
    210 func CreateSummaryFileWriter(scope *Scope, writer tf.Output, logdir tf.Output, max_queue tf.Output, flush_millis tf.Output, filename_suffix tf.Output) (o *tf.Operation) {
    211 	if scope.Err() != nil {
    212 		return
    213 	}
    214 	opspec := tf.OpSpec{
    215 		Type: "CreateSummaryFileWriter",
    216 		Input: []tf.Input{
    217 			writer, logdir, max_queue, flush_millis, filename_suffix,
    218 		},
    219 	}
    220 	return scope.AddOperation(opspec)
    221 }
    222 
    223 // FakeQuantWithMinMaxVarsPerChannelGradientAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannelGradient.
    224 type FakeQuantWithMinMaxVarsPerChannelGradientAttr func(optionalAttr)
    225 
    226 // FakeQuantWithMinMaxVarsPerChannelGradientNumBits sets the optional num_bits attribute to value.
    227 //
    228 // value: The bitwidth of the quantization; between 2 and 8, inclusive.
    229 // If not specified, defaults to 8
    230 func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
    231 	return func(m optionalAttr) {
    232 		m["num_bits"] = value
    233 	}
    234 }
    235 
    236 // FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
    237 //
    238 // value: Whether to quantize into 2^num_bits - 1 distinct values.
    239 // If not specified, defaults to false
    240 func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
    241 	return func(m optionalAttr) {
    242 		m["narrow_range"] = value
    243 	}
    244 }
    245 
    246 // Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
    247 //
    248 // Arguments:
    249 //	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
    250 // shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
    251 //	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
    252 //   same as `gradients`.
    253 // min, max: Quantization interval, floats of shape `[d]`.
    254 //
    255 //
    256 //
    257 // Returns Backpropagated gradients w.r.t. inputs, shape same as
    258 // `inputs`:
    259 //   `gradients * (inputs >= min && inputs <= max)`.Backpropagated gradients w.r.t. min parameter, shape `[d]`:
    260 // `sum_per_d(gradients * (inputs < min))`.Backpropagated gradients w.r.t. max parameter, shape `[d]`:
    261 // `sum_per_d(gradients * (inputs > max))`.
    262 func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
    263 	if scope.Err() != nil {
    264 		return
    265 	}
    266 	attrs := map[string]interface{}{}
    267 	for _, a := range optional {
    268 		a(attrs)
    269 	}
    270 	opspec := tf.OpSpec{
    271 		Type: "FakeQuantWithMinMaxVarsPerChannelGradient",
    272 		Input: []tf.Input{
    273 			gradients, inputs, min, max,
    274 		},
    275 		Attrs: attrs,
    276 	}
    277 	op := scope.AddOperation(opspec)
    278 	return op.Output(0), op.Output(1), op.Output(2)
    279 }
    280 
    281 // Partitions `data` into `num_partitions` tensors using indices from `partitions`.
    282 //
    283 // For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
    284 // becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
    285 // are placed in `outputs[i]` in lexicographic order of `js`, and the first
    286 // dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
    287 // In detail,
    288 //
    289 // ```python
    290 //     outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
    291 //
    292 //     outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
    293 // ```
    294 //
    295 // `data.shape` must start with `partitions.shape`.
    296 //
    297 // For example:
    298 //
    299 // ```python
    300 //     # Scalar partitions.
    301 //     partitions = 1
    302 //     num_partitions = 2
    303 //     data = [10, 20]
    304 //     outputs[0] = []  # Empty with shape [0, 2]
    305 //     outputs[1] = [[10, 20]]
    306 //
    307 //     # Vector partitions.
    308 //     partitions = [0, 0, 1, 1, 0]
    309 //     num_partitions = 2
    310 //     data = [10, 20, 30, 40, 50]
    311 //     outputs[0] = [10, 20, 50]
    312 //     outputs[1] = [30, 40]
    313 // ```
    314 //
    315 // See `dynamic_stitch` for an example on how to merge partitions back.
    316 //
    317 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
    318 // <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
    319 // </div>
    320 //
    321 // Arguments:
    322 //
    323 //	partitions: Any shape.  Indices in the range `[0, num_partitions)`.
    324 //	num_partitions: The number of partitions to output.
    325 func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output) {
    326 	if scope.Err() != nil {
    327 		return
    328 	}
    329 	attrs := map[string]interface{}{"num_partitions": num_partitions}
    330 	opspec := tf.OpSpec{
    331 		Type: "DynamicPartition",
    332 		Input: []tf.Input{
    333 			data, partitions,
    334 		},
    335 		Attrs: attrs,
    336 	}
    337 	op := scope.AddOperation(opspec)
    338 	if scope.Err() != nil {
    339 		return
    340 	}
    341 	var idx int
    342 	var err error
    343 	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
    344 		scope.UpdateErr("DynamicPartition", err)
    345 		return
    346 	}
    347 	return outputs
    348 }
    349 
    350 // MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
    351 type MutableHashTableOfTensorsV2Attr func(optionalAttr)
    352 
    353 // MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
    354 //
    355 // value: If non-empty, this table is placed in the given container.
    356 // Otherwise, a default container is used.
    357 // If not specified, defaults to ""
    358 func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
    359 	return func(m optionalAttr) {
    360 		m["container"] = value
    361 	}
    362 }
    363 
    364 // MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
    365 //
    366 // value: If non-empty, this table is shared under the given name across
    367 // multiple sessions.
    368 // If not specified, defaults to ""
    369 func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
    370 	return func(m optionalAttr) {
    371 		m["shared_name"] = value
    372 	}
    373 }
    374 
    375 // MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
    376 // If not specified, defaults to false
    377 func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
    378 	return func(m optionalAttr) {
    379 		m["use_node_name_sharing"] = value
    380 	}
    381 }
    382 
    383 // MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
    384 // If not specified, defaults to <>
    385 func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
    386 	return func(m optionalAttr) {
    387 		m["value_shape"] = value
    388 	}
    389 }
    390 
    391 // Creates an empty hash table.
    392 //
    393 // This op creates a mutable hash table, specifying the type of its keys and
    394 // values. Each value must be a vector. Data can be inserted into the table using
    395 // the insert operations. It does not support the initialization operation.
    396 //
    397 // Arguments:
    398 //	key_dtype: Type of the table keys.
    399 //	value_dtype: Type of the table values.
    400 //
    401 // Returns Handle to a table.
    402 func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
    403 	if scope.Err() != nil {
    404 		return
    405 	}
    406 	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
    407 	for _, a := range optional {
    408 		a(attrs)
    409 	}
    410 	opspec := tf.OpSpec{
    411 		Type: "MutableHashTableOfTensorsV2",
    412 
    413 		Attrs: attrs,
    414 	}
    415 	op := scope.AddOperation(opspec)
    416 	return op.Output(0)
    417 }
    418 
    419 // ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
    420 type ResourceApplyProximalAdagradAttr func(optionalAttr)
    421 
    422 // ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
    423 //
    424 // value: If True, updating of the var and accum tensors will be protected by
    425 // a lock; otherwise the behavior is undefined, but may exhibit less contention.
    426 // If not specified, defaults to false
    427 func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
    428 	return func(m optionalAttr) {
    429 		m["use_locking"] = value
    430 	}
    431 }
    432 
    433 // Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
    434 //
    435 // accum += grad * grad
    436 // prox_v = var - lr * grad * (1 / sqrt(accum))
    437 // var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
    438 //
    439 // Arguments:
    440 //	var_: Should be from a Variable().
    441 //	accum: Should be from a Variable().
    442 //	lr: Scaling factor. Must be a scalar.
    443 //	l1: L1 regularization. Must be a scalar.
    444 //	l2: L2 regularization. Must be a scalar.
    445 //	grad: The gradient.
    446 //
    447 // Returns the created operation.
    448 func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
    449 	if scope.Err() != nil {
    450 		return
    451 	}
    452 	attrs := map[string]interface{}{}
    453 	for _, a := range optional {
    454 		a(attrs)
    455 	}
    456 	opspec := tf.OpSpec{
    457 		Type: "ResourceApplyProximalAdagrad",
    458 		Input: []tf.Input{
    459 			var_, accum, lr, l1, l2, grad,
    460 		},
    461 		Attrs: attrs,
    462 	}
    463 	return scope.AddOperation(opspec)
    464 }
    465 
    466 // MutableHashTableV2Attr is an optional argument to MutableHashTableV2.
    467 type MutableHashTableV2Attr func(optionalAttr)
    468 
    469 // MutableHashTableV2Container sets the optional container attribute to value.
    470 //
    471 // value: If non-empty, this table is placed in the given container.
    472 // Otherwise, a default container is used.
    473 // If not specified, defaults to ""
    474 func MutableHashTableV2Container(value string) MutableHashTableV2Attr {
    475 	return func(m optionalAttr) {
    476 		m["container"] = value
    477 	}
    478 }
    479 
    480 // MutableHashTableV2SharedName sets the optional shared_name attribute to value.
    481 //
    482 // value: If non-empty, this table is shared under the given name across
    483 // multiple sessions.
    484 // If not specified, defaults to ""
    485 func MutableHashTableV2SharedName(value string) MutableHashTableV2Attr {
    486 	return func(m optionalAttr) {
    487 		m["shared_name"] = value
    488 	}
    489 }
    490 
    491 // MutableHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
    492 //
    493 // value: If true and shared_name is empty, the table is shared
    494 // using the node name.
    495 // If not specified, defaults to false
    496 func MutableHashTableV2UseNodeNameSharing(value bool) MutableHashTableV2Attr {
    497 	return func(m optionalAttr) {
    498 		m["use_node_name_sharing"] = value
    499 	}
    500 }
    501 
    502 // Creates an empty hash table.
    503 //
    504 // This op creates a mutable hash table, specifying the type of its keys and
    505 // values. Each value must be a scalar. Data can be inserted into the table using
    506 // the insert operations. It does not support the initialization operation.
    507 //
    508 // Arguments:
    509 //	key_dtype: Type of the table keys.
    510 //	value_dtype: Type of the table values.
    511 //
    512 // Returns Handle to a table.
    513 func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableV2Attr) (table_handle tf.Output) {
    514 	if scope.Err() != nil {
    515 		return
    516 	}
    517 	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
    518 	for _, a := range optional {
    519 		a(attrs)
    520 	}
    521 	opspec := tf.OpSpec{
    522 		Type: "MutableHashTableV2",
    523 
    524 		Attrs: attrs,
    525 	}
    526 	op := scope.AddOperation(opspec)
    527 	return op.Output(0)
    528 }
    529 
    530 // MapUnstageNoKeyAttr is an optional argument to MapUnstageNoKey.
    531 type MapUnstageNoKeyAttr func(optionalAttr)
    532 
    533 // MapUnstageNoKeyCapacity sets the optional capacity attribute to value.
    534 // If not specified, defaults to 0
    535 //
    536 // REQUIRES: value >= 0
    537 func MapUnstageNoKeyCapacity(value int64) MapUnstageNoKeyAttr {
    538 	return func(m optionalAttr) {
    539 		m["capacity"] = value
    540 	}
    541 }
    542 
    543 // MapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
    544 // If not specified, defaults to 0
    545 //
    546 // REQUIRES: value >= 0
    547 func MapUnstageNoKeyMemoryLimit(value int64) MapUnstageNoKeyAttr {
    548 	return func(m optionalAttr) {
    549 		m["memory_limit"] = value
    550 	}
    551 }
    552 
    553 // MapUnstageNoKeyContainer sets the optional container attribute to value.
    554 // If not specified, defaults to ""
    555 func MapUnstageNoKeyContainer(value string) MapUnstageNoKeyAttr {
    556 	return func(m optionalAttr) {
    557 		m["container"] = value
    558 	}
    559 }
    560 
    561 // MapUnstageNoKeySharedName sets the optional shared_name attribute to value.
    562 // If not specified, defaults to ""
    563 func MapUnstageNoKeySharedName(value string) MapUnstageNoKeyAttr {
    564 	return func(m optionalAttr) {
    565 		m["shared_name"] = value
    566 	}
    567 }
    568 
    569 // Op removes and returns a random (key, value)
    570 //
    571 // from the underlying container.   If the underlying container
    572 // does not contain elements, the op will block until it does.
    573 func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
    574 	if scope.Err() != nil {
    575 		return
    576 	}
    577 	attrs := map[string]interface{}{"dtypes": dtypes}
    578 	for _, a := range optional {
    579 		a(attrs)
    580 	}
    581 	opspec := tf.OpSpec{
    582 		Type: "MapUnstageNoKey",
    583 		Input: []tf.Input{
    584 			indices,
    585 		},
    586 		Attrs: attrs,
    587 	}
    588 	op := scope.AddOperation(opspec)
    589 	if scope.Err() != nil {
    590 		return
    591 	}
    592 	var idx int
    593 	var err error
    594 	key = op.Output(idx)
    595 	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
    596 		scope.UpdateErr("MapUnstageNoKey", err)
    597 		return
    598 	}
    599 	return key, values
    600 }
    601 
    602 // HashTableV2Attr is an optional argument to HashTableV2.
    603 type HashTableV2Attr func(optionalAttr)
    604 
    605 // HashTableV2Container sets the optional container attribute to value.
    606 //
    607 // value: If non-empty, this table is placed in the given container.
    608 // Otherwise, a default container is used.
    609 // If not specified, defaults to ""
    610 func HashTableV2Container(value string) HashTableV2Attr {
    611 	return func(m optionalAttr) {
    612 		m["container"] = value
    613 	}
    614 }
    615 
    616 // HashTableV2SharedName sets the optional shared_name attribute to value.
    617 //
    618 // value: If non-empty, this table is shared under the given name across
    619 // multiple sessions.
    620 // If not specified, defaults to ""
    621 func HashTableV2SharedName(value string) HashTableV2Attr {
    622 	return func(m optionalAttr) {
    623 		m["shared_name"] = value
    624 	}
    625 }
    626 
    627 // HashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
    628 //
    629 // value: If true and shared_name is empty, the table is shared
    630 // using the node name.
    631 // If not specified, defaults to false
    632 func HashTableV2UseNodeNameSharing(value bool) HashTableV2Attr {
    633 	return func(m optionalAttr) {
    634 		m["use_node_name_sharing"] = value
    635 	}
    636 }
    637 
    638 // Creates a non-initialized hash table.
    639 //
    640 // This op creates a hash table, specifying the type of its keys and values.
    641 // Before using the table you will have to initialize it.  After initialization the
    642 // table will be immutable.
    643 //
    644 // Arguments:
    645 //	key_dtype: Type of the table keys.
    646 //	value_dtype: Type of the table values.
    647 //
    648 // Returns Handle to a table.
    649 func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...HashTableV2Attr) (table_handle tf.Output) {
    650 	if scope.Err() != nil {
    651 		return
    652 	}
    653 	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
    654 	for _, a := range optional {
    655 		a(attrs)
    656 	}
    657 	opspec := tf.OpSpec{
    658 		Type: "HashTableV2",
    659 
    660 		Attrs: attrs,
    661 	}
    662 	op := scope.AddOperation(opspec)
    663 	return op.Output(0)
    664 }
    665 
    666 // Replaces the contents of the table with the specified keys and values.
    667 //
    668 // The tensor `keys` must be of the same type as the keys of the table.
    669 // The tensor `values` must be of the type of the table values.
    670 //
    671 // Arguments:
    672 //	table_handle: Handle to the table.
    673 //	keys: Any shape.  Keys to look up.
    674 //	values: Values to associate with keys.
    675 //
    676 // Returns the created operation.
    677 func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
    678 	if scope.Err() != nil {
    679 		return
    680 	}
    681 	opspec := tf.OpSpec{
    682 		Type: "LookupTableImportV2",
    683 		Input: []tf.Input{
    684 			table_handle, keys, values,
    685 		},
    686 	}
    687 	return scope.AddOperation(opspec)
    688 }
    689 
    690 // MapPeekAttr is an optional argument to MapPeek.
    691 type MapPeekAttr func(optionalAttr)
    692 
    693 // MapPeekCapacity sets the optional capacity attribute to value.
    694 // If not specified, defaults to 0
    695 //
    696 // REQUIRES: value >= 0
    697 func MapPeekCapacity(value int64) MapPeekAttr {
    698 	return func(m optionalAttr) {
    699 		m["capacity"] = value
    700 	}
    701 }
    702 
    703 // MapPeekMemoryLimit sets the optional memory_limit attribute to value.
    704 // If not specified, defaults to 0
    705 //
    706 // REQUIRES: value >= 0
    707 func MapPeekMemoryLimit(value int64) MapPeekAttr {
    708 	return func(m optionalAttr) {
    709 		m["memory_limit"] = value
    710 	}
    711 }
    712 
    713 // MapPeekContainer sets the optional container attribute to value.
    714 // If not specified, defaults to ""
    715 func MapPeekContainer(value string) MapPeekAttr {
    716 	return func(m optionalAttr) {
    717 		m["container"] = value
    718 	}
    719 }
    720 
    721 // MapPeekSharedName sets the optional shared_name attribute to value.
    722 // If not specified, defaults to ""
    723 func MapPeekSharedName(value string) MapPeekAttr {
    724 	return func(m optionalAttr) {
    725 		m["shared_name"] = value
    726 	}
    727 }
    728 
    729 // Op peeks at the values at the specified key.  If the
    730 //
    731 // underlying container does not contain this key
    732 // this op will block until it does.
    733 func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
    734 	if scope.Err() != nil {
    735 		return
    736 	}
    737 	attrs := map[string]interface{}{"dtypes": dtypes}
    738 	for _, a := range optional {
    739 		a(attrs)
    740 	}
    741 	opspec := tf.OpSpec{
    742 		Type: "MapPeek",
    743 		Input: []tf.Input{
    744 			key, indices,
    745 		},
    746 		Attrs: attrs,
    747 	}
    748 	op := scope.AddOperation(opspec)
    749 	if scope.Err() != nil {
    750 		return
    751 	}
    752 	var idx int
    753 	var err error
    754 	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
    755 		scope.UpdateErr("MapPeek", err)
    756 		return
    757 	}
    758 	return values
    759 }
    760 
    761 // Returns (x - y)(x - y) element-wise.
    762 //
    763 // *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
    764 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    765 func SquaredDifference(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
    766 	if scope.Err() != nil {
    767 		return
    768 	}
    769 	opspec := tf.OpSpec{
    770 		Type: "SquaredDifference",
    771 		Input: []tf.Input{
    772 			x, y,
    773 		},
    774 	}
    775 	op := scope.AddOperation(opspec)
    776 	return op.Output(0)
    777 }
    778 
    779 // Forwards the input to the output.
    780 //
    781 // This operator represents the loop termination condition used by the
    782 // "pivot" switches of a loop.
    783 //
    784 // Arguments:
    785 //	input: A boolean scalar, representing the branch predicate of the Switch op.
    786 //
    787 // Returns The same tensor as `input`.
    788 func LoopCond(scope *Scope, input tf.Output) (output tf.Output) {
    789 	if scope.Err() != nil {
    790 		return
    791 	}
    792 	opspec := tf.OpSpec{
    793 		Type: "LoopCond",
    794 		Input: []tf.Input{
    795 			input,
    796 		},
    797 	}
    798 	op := scope.AddOperation(opspec)
    799 	return op.Output(0)
    800 }
    801 
    802 // QuantizedMulAttr is an optional argument to QuantizedMul.
    803 type QuantizedMulAttr func(optionalAttr)
    804 
    805 // QuantizedMulToutput sets the optional Toutput attribute to value.
    806 // If not specified, defaults to DT_QINT32
    807 func QuantizedMulToutput(value tf.DataType) QuantizedMulAttr {
    808 	return func(m optionalAttr) {
    809 		m["Toutput"] = value
    810 	}
    811 }
    812 
    813 // Returns x * y element-wise, working on quantized buffers.
    814 //
    815 // Arguments:
    816 //
    817 //
    818 //	min_x: The float value that the lowest quantized `x` value represents.
    819 //	max_x: The float value that the highest quantized `x` value represents.
    820 //	min_y: The float value that the lowest quantized `y` value represents.
    821 //	max_y: The float value that the highest quantized `y` value represents.
    822 //
    823 // Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
    824 //
    825 // *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
    826 // broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
    827 func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedMulAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
    828 	if scope.Err() != nil {
    829 		return
    830 	}
    831 	attrs := map[string]interface{}{}
    832 	for _, a := range optional {
    833 		a(attrs)
    834 	}
    835 	opspec := tf.OpSpec{
    836 		Type: "QuantizedMul",
    837 		Input: []tf.Input{
    838 			x, y, min_x, max_x, min_y, max_y,
    839 		},
    840 		Attrs: attrs,
    841 	}
    842 	op := scope.AddOperation(opspec)
    843 	return op.Output(0), op.Output(1), op.Output(2)
    844 }
    845 
    846 // QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
    847 type QuantizedMatMulAttr func(optionalAttr)
    848 
    849 // QuantizedMatMulToutput sets the optional Toutput attribute to value.
    850 // If not specified, defaults to DT_QINT32
    851 func QuantizedMatMulToutput(value tf.DataType) QuantizedMatMulAttr {
    852 	return func(m optionalAttr) {
    853 		m["Toutput"] = value
    854 	}
    855 }
    856 
    857 // QuantizedMatMulTransposeA sets the optional transpose_a attribute to value.
    858 //
    859 // value: If true, `a` is transposed before multiplication.
    860 // If not specified, defaults to false
    861 func QuantizedMatMulTransposeA(value bool) QuantizedMatMulAttr {
    862 	return func(m optionalAttr) {
    863 		m["transpose_a"] = value
    864 	}
    865 }
    866 
    867 // QuantizedMatMulTransposeB sets the optional transpose_b attribute to value.
    868 //
    869 // value: If true, `b` is transposed before multiplication.
    870 // If not specified, defaults to false
    871 func QuantizedMatMulTransposeB(value bool) QuantizedMatMulAttr {
    872 	return func(m optionalAttr) {
    873 		m["transpose_b"] = value
    874 	}
    875 }
    876 
    877 // QuantizedMatMulTactivation sets the optional Tactivation attribute to value.
    878 //
    879 // value: The type of output produced by activation function
    880 // following this operation.
    881 // If not specified, defaults to DT_QUINT8
    882 func QuantizedMatMulTactivation(value tf.DataType) QuantizedMatMulAttr {
    883 	return func(m optionalAttr) {
    884 		m["Tactivation"] = value
    885 	}
    886 }
    887 
    888 // Perform a quantized matrix multiplication of  `a` by the matrix `b`.
    889 //
    890 // The inputs must be two-dimensional matrices and the inner dimension of
    891 // `a` (after being transposed if `transpose_a` is non-zero) must match the
    892 // outer dimension of `b` (after being transposed if `transposed_b` is
    893 // non-zero).
    894 //
    895 // Arguments:
    896 //	a: Must be a two-dimensional tensor.
    897 //	b: Must be a two-dimensional tensor.
    898 //	min_a: The float value that the lowest quantized `a` value represents.
    899 //	max_a: The float value that the highest quantized `a` value represents.
    900 //	min_b: The float value that the lowest quantized `b` value represents.
    901 //	max_b: The float value that the highest quantized `b` value represents.
    902 //
    903 // Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
    904 func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
    905 	if scope.Err() != nil {
    906 		return
    907 	}
    908 	attrs := map[string]interface{}{}
    909 	for _, a := range optional {
    910 		a(attrs)
    911 	}
    912 	opspec := tf.OpSpec{
    913 		Type: "QuantizedMatMul",
    914 		Input: []tf.Input{
    915 			a, b, min_a, max_a, min_b, max_b,
    916 		},
    917 		Attrs: attrs,
    918 	}
    919 	op := scope.AddOperation(opspec)
    920 	return op.Output(0), op.Output(1), op.Output(2)
    921 }
    922 
    923 // A placeholder op that passes through `input` when its output is not fed.
    924 //
    925 // Arguments:
    926 //	input: The default value to produce when `output` is not fed.
    927 //	shape: The (possibly partial) shape of the tensor.
    928 //
    929 // Returns A placeholder tensor that defaults to `input` if it is not fed.
    930 func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
    931 	if scope.Err() != nil {
    932 		return
    933 	}
    934 	attrs := map[string]interface{}{"shape": shape}
    935 	opspec := tf.OpSpec{
    936 		Type: "PlaceholderWithDefault",
    937 		Input: []tf.Input{
    938 			input,
    939 		},
    940 		Attrs: attrs,
    941 	}
    942 	op := scope.AddOperation(opspec)
    943 	return op.Output(0)
    944 }
    945 
    946 // Returns the complex conjugate of a complex number.
    947 //
    948 // Given a tensor `input` of complex numbers, this operation returns a tensor of
    949 // complex numbers that are the complex conjugate of each element in `input`. The
    950 // complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
    951 // real part and *b* is the imaginary part.
    952 //
    953 // The complex conjugate returned by this operation is of the form \\(a - bj\\).
    954 //
    955 // For example:
    956 //
    957 // ```
    958 // # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
    959 // tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
    960 // ```
    961 func Conj(scope *Scope, input tf.Output) (output tf.Output) {
    962 	if scope.Err() != nil {
    963 		return
    964 	}
    965 	opspec := tf.OpSpec{
    966 		Type: "Conj",
    967 		Input: []tf.Input{
    968 			input,
    969 		},
    970 	}
    971 	op := scope.AddOperation(opspec)
    972 	return op.Output(0)
    973 }
    974 
    975 // ResourceSparseApplyMomentumAttr is an optional argument to ResourceSparseApplyMomentum.
    976 type ResourceSparseApplyMomentumAttr func(optionalAttr)
    977 
    978 // ResourceSparseApplyMomentumUseLocking sets the optional use_locking attribute to value.
    979 //
    980 // value: If `True`, updating of the var and accum tensors will be protected
    981 // by a lock; otherwise the behavior is undefined, but may exhibit less
    982 // contention.
    983 // If not specified, defaults to false
    984 func ResourceSparseApplyMomentumUseLocking(value bool) ResourceSparseApplyMomentumAttr {
    985 	return func(m optionalAttr) {
    986 		m["use_locking"] = value
    987 	}
    988 }
    989 
    990 // ResourceSparseApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
    991 //
    992 // value: If `True`, the tensor passed to compute grad will be
    993 // var - lr * momentum * accum, so in the end, the var you get is actually
    994 // var - lr * momentum * accum.
    995 // If not specified, defaults to false
    996 func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomentumAttr {
    997 	return func(m optionalAttr) {
    998 		m["use_nesterov"] = value
    999 	}
   1000 }
   1001 
   1002 // Update relevant entries in '*var' and '*accum' according to the momentum scheme.
   1003 //
   1004 // Set use_nesterov = True if you want to use Nesterov momentum.
   1005 //
   1006 // That is for rows we have grad for, we update var and accum as follows:
   1007 //
   1008 // accum = accum * momentum + grad
   1009 // var -= lr * accum
   1010 //
   1011 // Arguments:
   1012 //	var_: Should be from a Variable().
   1013 //	accum: Should be from a Variable().
   1014 //	lr: Learning rate. Must be a scalar.
   1015 //	grad: The gradient.
   1016 //	indices: A vector of indices into the first dimension of var and accum.
   1017 //	momentum: Momentum. Must be a scalar.
   1018 //
   1019 // Returns the created operation.
   1020 func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation) {
   1021 	if scope.Err() != nil {
   1022 		return
   1023 	}
   1024 	attrs := map[string]interface{}{}
   1025 	for _, a := range optional {
   1026 		a(attrs)
   1027 	}
   1028 	opspec := tf.OpSpec{
   1029 		Type: "ResourceSparseApplyMomentum",
   1030 		Input: []tf.Input{
   1031 			var_, accum, lr, grad, indices, momentum,
   1032 		},
   1033 		Attrs: attrs,
   1034 	}
   1035 	return scope.AddOperation(opspec)
   1036 }
   1037 
   1038 // Creates a sequence of numbers.
   1039 //
   1040 // This operation creates a sequence of numbers that begins at `start` and
   1041 // extends by increments of `delta` up to but not including `limit`.
   1042 //
   1043 // For example:
   1044 //
   1045 // ```
   1046 // # 'start' is 3
   1047 // # 'limit' is 18
   1048 // # 'delta' is 3
   1049 // tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
   1050 // ```
   1051 //
   1052 // Arguments:
   1053 //	start: 0-D (scalar). First entry in the sequence.
   1054 //	limit: 0-D (scalar). Upper limit of sequence, exclusive.
   1055 //	delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
   1056 //
   1057 // Returns 1-D.
   1058 func Range(scope *Scope, start tf.Output, limit tf.Output, delta tf.Output) (output tf.Output) {
   1059 	if scope.Err() != nil {
   1060 		return
   1061 	}
   1062 	opspec := tf.OpSpec{
   1063 		Type: "Range",
   1064 		Input: []tf.Input{
   1065 			start, limit, delta,
   1066 		},
   1067 	}
   1068 	op := scope.AddOperation(opspec)
   1069 	return op.Output(0)
   1070 }
   1071 
   1072 // Computes gradients for SparseSegmentSqrtN.
   1073 //
   1074 // Returns tensor "output" with same shape as grad, except for dimension 0 whose
   1075 // value is output_dim0.
   1076 //
   1077 // Arguments:
   1078 //	grad: gradient propagated to the SparseSegmentSqrtN op.
   1079 //	indices: indices passed to the corresponding SparseSegmentSqrtN op.
   1080 //	segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
   1081 //	output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
   1082 func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
   1083 	if scope.Err() != nil {
   1084 		return
   1085 	}
   1086 	opspec := tf.OpSpec{
   1087 		Type: "SparseSegmentSqrtNGrad",
   1088 		Input: []tf.Input{
   1089 			grad, indices, segment_ids, output_dim0,
   1090 		},
   1091 	}
   1092 	op := scope.AddOperation(opspec)
   1093 	return op.Output(0)
   1094 }
   1095 
   1096 // Computes the mean along sparse segments of a tensor.
   1097 //
   1098 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   1099 // segments.
   1100 //
   1101 // Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
   1102 // dimension, selecting a subset of dimension 0, specified by `indices`.
   1103 //
   1104 // Arguments:
   1105 //
   1106 //	indices: A 1-D tensor. Has same rank as `segment_ids`.
   1107 //	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
   1108 //
   1109 // Returns Has same shape as data, except for dimension 0 which
   1110 // has size `k`, the number of segments.
   1111 func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
   1112 	if scope.Err() != nil {
   1113 		return
   1114 	}
   1115 	opspec := tf.OpSpec{
   1116 		Type: "SparseSegmentMean",
   1117 		Input: []tf.Input{
   1118 			data, indices, segment_ids,
   1119 		},
   1120 	}
   1121 	op := scope.AddOperation(opspec)
   1122 	return op.Output(0)
   1123 }
   1124 
   1125 // Pop the element at the top of the stack.
   1126 //
   1127 // Arguments:
   1128 //	handle: The handle to a stack.
   1129 //	elem_type: The type of the elem that is popped.
   1130 //
   1131 // Returns The tensor that is popped from the top of the stack.
   1132 func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.Output) {
   1133 	if scope.Err() != nil {
   1134 		return
   1135 	}
   1136 	attrs := map[string]interface{}{"elem_type": elem_type}
   1137 	opspec := tf.OpSpec{
   1138 		Type: "StackPopV2",
   1139 		Input: []tf.Input{
   1140 			handle,
   1141 		},
   1142 		Attrs: attrs,
   1143 	}
   1144 	op := scope.AddOperation(opspec)
   1145 	return op.Output(0)
   1146 }
   1147 
   1148 // Computes the sum along sparse segments of a tensor.
   1149 //
   1150 // Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
   1151 // misisng, the `output` tensor at that position will be zeroed.
   1152 //
   1153 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   1154 // segments.
   1155 //
   1156 // For example:
   1157 //
   1158 // ```python
   1159 // c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
   1160 //
   1161 // tf.sparse_segment_sum_with_num_segments(
   1162 //     c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
   1163 // # => [[0 0 0 0]
   1164 // #     [0 0 0 0]
   1165 // #     [0 0 0 0]]
   1166 //
   1167 // tf.sparse_segment_sum_with_num_segments(c,
   1168 //                                         tf.constant([0, 1]),
   1169 //                                         tf.constant([0, 2],
   1170 //                                         num_segments=4))
   1171 // # => [[ 1  2  3  4]
   1172 // #     [ 0  0  0  0]
   1173 // #     [-1 -2 -3 -4]
   1174 // #     [ 0  0  0  0]]
   1175 // ```
   1176 //
   1177 // Arguments:
   1178 //
   1179 //	indices: A 1-D tensor. Has same rank as `segment_ids`.
   1180 //	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
   1181 //	num_segments: Should equal the number of distinct segment IDs.
   1182 //
   1183 // Returns Has same shape as data, except for dimension 0 which
   1184 // has size `num_segments`.
   1185 func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
   1186 	if scope.Err() != nil {
   1187 		return
   1188 	}
   1189 	opspec := tf.OpSpec{
   1190 		Type: "SparseSegmentSumWithNumSegments",
   1191 		Input: []tf.Input{
   1192 			data, indices, segment_ids, num_segments,
   1193 		},
   1194 	}
   1195 	op := scope.AddOperation(opspec)
   1196 	return op.Output(0)
   1197 }
   1198 
   1199 // SparseToDenseAttr is an optional argument to SparseToDense.
   1200 type SparseToDenseAttr func(optionalAttr)
   1201 
   1202 // SparseToDenseValidateIndices sets the optional validate_indices attribute to value.
   1203 //
   1204 // value: If true, indices are checked to make sure they are sorted in
   1205 // lexicographic order and that there are no repeats.
   1206 // If not specified, defaults to true
   1207 func SparseToDenseValidateIndices(value bool) SparseToDenseAttr {
   1208 	return func(m optionalAttr) {
   1209 		m["validate_indices"] = value
   1210 	}
   1211 }
   1212 
   1213 // Converts a sparse representation into a dense tensor.
   1214 //
   1215 // Builds an array `dense` with shape `output_shape` such that
   1216 //
   1217 // ```
   1218 // # If sparse_indices is scalar
   1219 // dense[i] = (i == sparse_indices ? sparse_values : default_value)
   1220 //
   1221 // # If sparse_indices is a vector, then for each i
   1222 // dense[sparse_indices[i]] = sparse_values[i]
   1223 //
   1224 // # If sparse_indices is an n by d matrix, then for each i in [0, n)
   1225 // dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
   1226 // ```
   1227 //
   1228 // All other values in `dense` are set to `default_value`.  If `sparse_values` is a
   1229 // scalar, all sparse indices are set to this single value.
   1230 //
   1231 // Indices should be sorted in lexicographic order, and indices must not
   1232 // contain any repeats. If `validate_indices` is true, these properties
   1233 // are checked during execution.
   1234 //
   1235 // Arguments:
   1236 //	sparse_indices: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
   1237 // index where `sparse_values[i]` will be placed.
   1238 //	output_shape: 1-D.  Shape of the dense output tensor.
   1239 //	sparse_values: 1-D.  Values corresponding to each row of `sparse_indices`,
   1240 // or a scalar value to be used for all sparse indices.
   1241 //	default_value: Scalar value to set for indices not specified in
   1242 // `sparse_indices`.
   1243 //
   1244 // Returns Dense output tensor of shape `output_shape`.
   1245 func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, sparse_values tf.Output, default_value tf.Output, optional ...SparseToDenseAttr) (dense tf.Output) {
   1246 	if scope.Err() != nil {
   1247 		return
   1248 	}
   1249 	attrs := map[string]interface{}{}
   1250 	for _, a := range optional {
   1251 		a(attrs)
   1252 	}
   1253 	opspec := tf.OpSpec{
   1254 		Type: "SparseToDense",
   1255 		Input: []tf.Input{
   1256 			sparse_indices, output_shape, sparse_values, default_value,
   1257 		},
   1258 		Attrs: attrs,
   1259 	}
   1260 	op := scope.AddOperation(opspec)
   1261 	return op.Output(0)
   1262 }
   1263 
   1264 // Counts the number of occurrences of each value in an integer array.
   1265 //
   1266 // Outputs a vector with length `size` and the same dtype as `weights`. If
   1267 // `weights` are empty, then index `i` stores the number of times the value `i` is
   1268 // counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
   1269 // the value in `weights` at each index where the corresponding value in `arr` is
   1270 // `i`.
   1271 //
   1272 // Values in `arr` outside of the range [0, size) are ignored.
   1273 //
   1274 // Arguments:
   1275 //	arr: int32 `Tensor`.
   1276 //	size: non-negative int32 scalar `Tensor`.
   1277 //	weights: is an int32, int64, float32, or float64 `Tensor` with the same
   1278 // shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
   1279 // equal to 1.
   1280 //
   1281 // Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for
   1282 // each value in the range [0, size).
   1283 func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) {
   1284 	if scope.Err() != nil {
   1285 		return
   1286 	}
   1287 	opspec := tf.OpSpec{
   1288 		Type: "Bincount",
   1289 		Input: []tf.Input{
   1290 			arr, size, weights,
   1291 		},
   1292 	}
   1293 	op := scope.AddOperation(opspec)
   1294 	return op.Output(0)
   1295 }
   1296 
   1297 // Computes the sum along sparse segments of a tensor.
   1298 //
   1299 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   1300 // segments.
   1301 //
   1302 // Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
   1303 // dimension, selecting a subset of dimension 0, specified by `indices`.
   1304 //
   1305 // For example:
   1306 //
   1307 // ```python
   1308 // c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
   1309 //
   1310 // # Select two rows, one segment.
   1311 // tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
   1312 // # => [[0 0 0 0]]
   1313 //
   1314 // # Select two rows, two segment.
   1315 // tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
   1316 // # => [[ 1  2  3  4]
   1317 // #     [-1 -2 -3 -4]]
   1318 //
   1319 // # Select all rows, two segments.
   1320 // tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
   1321 // # => [[0 0 0 0]
   1322 // #     [5 6 7 8]]
   1323 //
   1324 // # Which is equivalent to:
   1325 // tf.segment_sum(c, tf.constant([0, 0, 1]))
   1326 // ```
   1327 //
   1328 // Arguments:
   1329 //
   1330 //	indices: A 1-D tensor. Has same rank as `segment_ids`.
   1331 //	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
   1332 //
   1333 // Returns Has same shape as data, except for dimension 0 which
   1334 // has size `k`, the number of segments.
   1335 func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
   1336 	if scope.Err() != nil {
   1337 		return
   1338 	}
   1339 	opspec := tf.OpSpec{
   1340 		Type: "SparseSegmentSum",
   1341 		Input: []tf.Input{
   1342 			data, indices, segment_ids,
   1343 		},
   1344 	}
   1345 	op := scope.AddOperation(opspec)
   1346 	return op.Output(0)
   1347 }
   1348 
   1349 // Computes hyperbolic sine of x element-wise.
   1350 func Sinh(scope *Scope, x tf.Output) (y tf.Output) {
   1351 	if scope.Err() != nil {
   1352 		return
   1353 	}
   1354 	opspec := tf.OpSpec{
   1355 		Type: "Sinh",
   1356 		Input: []tf.Input{
   1357 			x,
   1358 		},
   1359 	}
   1360 	op := scope.AddOperation(opspec)
   1361 	return op.Output(0)
   1362 }
   1363 
   1364 // Computes the sum along segments of a tensor.
   1365 //
   1366 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   1367 // segments.
   1368 //
   1369 // Computes a tensor such that
   1370 // `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
   1371 // that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
   1372 // need not be sorted and need not cover all values in the full
   1373 // range of valid values.
   1374 //
   1375 // If the sum is empty for a given segment ID `i`, `output[i] = 0`.
   1376 // If the given segment ID `i` is negative, the value is dropped and will not be
   1377 // added to the sum of the segment.
   1378 //
   1379 // `num_segments` should equal the number of distinct segment IDs.
   1380 //
   1381 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   1382 // <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
   1383 // </div>
   1384 //
   1385 // Arguments:
   1386 //
   1387 //	segment_ids: A tensor whose shape is a prefix of `data.shape`.
   1388 //
   1389 //
   1390 // Returns Has same shape as data, except for the first `segment_ids.rank`
   1391 // dimensions, which are replaced with a single dimension which has size
   1392 // `num_segments`.
   1393 func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
   1394 	if scope.Err() != nil {
   1395 		return
   1396 	}
   1397 	opspec := tf.OpSpec{
   1398 		Type: "UnsortedSegmentSum",
   1399 		Input: []tf.Input{
   1400 			data, segment_ids, num_segments,
   1401 		},
   1402 	}
   1403 	op := scope.AddOperation(opspec)
   1404 	return op.Output(0)
   1405 }
   1406 
   1407 // Returns which elements of x are finite.
   1408 //
   1409 // @compatibility(numpy)
   1410 // Equivalent to np.isfinite
   1411 // @end_compatibility
   1412 func IsFinite(scope *Scope, x tf.Output) (y tf.Output) {
   1413 	if scope.Err() != nil {
   1414 		return
   1415 	}
   1416 	opspec := tf.OpSpec{
   1417 		Type: "IsFinite",
   1418 		Input: []tf.Input{
   1419 			x,
   1420 		},
   1421 	}
   1422 	op := scope.AddOperation(opspec)
   1423 	return op.Output(0)
   1424 }
   1425 
   1426 // MatMulAttr is an optional argument to MatMul.
   1427 type MatMulAttr func(optionalAttr)
   1428 
   1429 // MatMulTransposeA sets the optional transpose_a attribute to value.
   1430 //
   1431 // value: If true, "a" is transposed before multiplication.
   1432 // If not specified, defaults to false
   1433 func MatMulTransposeA(value bool) MatMulAttr {
   1434 	return func(m optionalAttr) {
   1435 		m["transpose_a"] = value
   1436 	}
   1437 }
   1438 
   1439 // MatMulTransposeB sets the optional transpose_b attribute to value.
   1440 //
   1441 // value: If true, "b" is transposed before multiplication.
   1442 // If not specified, defaults to false
   1443 func MatMulTransposeB(value bool) MatMulAttr {
   1444 	return func(m optionalAttr) {
   1445 		m["transpose_b"] = value
   1446 	}
   1447 }
   1448 
   1449 // Multiply the matrix "a" by the matrix "b".
   1450 //
   1451 // The inputs must be two-dimensional matrices and the inner dimension of
   1452 // "a" (after being transposed if transpose_a is true) must match the
   1453 // outer dimension of "b" (after being transposed if transposed_b is
   1454 // true).
   1455 //
   1456 // *Note*: The default kernel implementation for MatMul on GPUs uses
   1457 // cublas.
   1458 func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (product tf.Output) {
   1459 	if scope.Err() != nil {
   1460 		return
   1461 	}
   1462 	attrs := map[string]interface{}{}
   1463 	for _, a := range optional {
   1464 		a(attrs)
   1465 	}
   1466 	opspec := tf.OpSpec{
   1467 		Type: "MatMul",
   1468 		Input: []tf.Input{
   1469 			a, b,
   1470 		},
   1471 		Attrs: attrs,
   1472 	}
   1473 	op := scope.AddOperation(opspec)
   1474 	return op.Output(0)
   1475 }
   1476 
   1477 // Selects elements from `x` or `y`, depending on `condition`.
   1478 //
   1479 // The `x`, and `y` tensors must all have the same shape, and the
   1480 // output will also have that shape.
   1481 //
   1482 // The `condition` tensor must be a scalar if `x` and `y` are scalars.
   1483 // If `x` and `y` are vectors or higher rank, then `condition` must be either a
   1484 // scalar, a vector with size matching the first dimension of `x`, or must have
   1485 // the same shape as `x`.
   1486 //
   1487 // The `condition` tensor acts as a mask that chooses, based on the value at each
   1488 // element, whether the corresponding element / row in the output should be
   1489 // taken from `x` (if true) or `y` (if false).
   1490 //
   1491 // If `condition` is a vector and `x` and `y` are higher rank matrices, then
   1492 // it chooses which row (outer dimension) to copy from `x` and `y`.
   1493 // If `condition` has the same shape as `x` and `y`, then it chooses which
   1494 // element to copy from `x` and `y`.
   1495 //
   1496 // For example:
   1497 //
   1498 // ```python
   1499 // # 'condition' tensor is [[True,  False]
   1500 // #                        [False, True]]
   1501 // # 't' is [[1, 2],
   1502 // #         [3, 4]]
   1503 // # 'e' is [[5, 6],
   1504 // #         [7, 8]]
   1505 // select(condition, t, e)  # => [[1, 6], [7, 4]]
   1506 //
   1507 //
   1508 // # 'condition' tensor is [True, False]
   1509 // # 't' is [[1, 2],
   1510 // #         [3, 4]]
   1511 // # 'e' is [[5, 6],
   1512 // #         [7, 8]]
   1513 // select(condition, t, e) ==> [[1, 2],
   1514 //                              [7, 8]]
   1515 //
   1516 // ```
   1517 //
   1518 // Arguments:
   1519 //
   1520 //	x: = A `Tensor` which may have the same shape as `condition`.
   1521 // If `condition` is rank 1, `x` may have higher rank,
   1522 // but its first dimension must match the size of `condition`.
   1523 //	y: = A `Tensor` with the same type and shape as `x`.
   1524 //
   1525 // Returns = A `Tensor` with the same type and shape as `x` and `y`.
   1526 func Select(scope *Scope, condition tf.Output, x tf.Output, y tf.Output) (output tf.Output) {
   1527 	if scope.Err() != nil {
   1528 		return
   1529 	}
   1530 	opspec := tf.OpSpec{
   1531 		Type: "Select",
   1532 		Input: []tf.Input{
   1533 			condition, x, y,
   1534 		},
   1535 	}
   1536 	op := scope.AddOperation(opspec)
   1537 	return op.Output(0)
   1538 }
   1539 
   1540 // Returns the truth value of x OR y element-wise.
   1541 //
   1542 // *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
   1543 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   1544 func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   1545 	if scope.Err() != nil {
   1546 		return
   1547 	}
   1548 	opspec := tf.OpSpec{
   1549 		Type: "LogicalOr",
   1550 		Input: []tf.Input{
   1551 			x, y,
   1552 		},
   1553 	}
   1554 	op := scope.AddOperation(opspec)
   1555 	return op.Output(0)
   1556 }
   1557 
   1558 // Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
   1559 //
   1560 // The regularized incomplete beta integral is defined as:
   1561 //
   1562 //
   1563 // \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
   1564 //
   1565 // where
   1566 //
   1567 //
   1568 // \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
   1569 //
   1570 //
   1571 // is the incomplete beta function and \\(B(a, b)\\) is the *complete*
   1572 // beta function.
   1573 func Betainc(scope *Scope, a tf.Output, b tf.Output, x tf.Output) (z tf.Output) {
   1574 	if scope.Err() != nil {
   1575 		return
   1576 	}
   1577 	opspec := tf.OpSpec{
   1578 		Type: "Betainc",
   1579 		Input: []tf.Input{
   1580 			a, b, x,
   1581 		},
   1582 	}
   1583 	op := scope.AddOperation(opspec)
   1584 	return op.Output(0)
   1585 }
   1586 
   1587 // Computes the sum along sparse segments of a tensor divided by the sqrt of N.
   1588 //
   1589 // N is the size of the segment being reduced.
   1590 //
   1591 // Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
   1592 // misisng, the `output` tensor at that position will be zeroed.
   1593 //
   1594 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   1595 // segments.
   1596 //
   1597 // Arguments:
   1598 //
   1599 //	indices: A 1-D tensor. Has same rank as `segment_ids`.
   1600 //	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
   1601 //	num_segments: Should equal the number of distinct segment IDs.
   1602 //
   1603 // Returns Has same shape as data, except for dimension 0 which
   1604 // has size `k`, the number of segments.
   1605 func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
   1606 	if scope.Err() != nil {
   1607 		return
   1608 	}
   1609 	opspec := tf.OpSpec{
   1610 		Type: "SparseSegmentSqrtNWithNumSegments",
   1611 		Input: []tf.Input{
   1612 			data, indices, segment_ids, num_segments,
   1613 		},
   1614 	}
   1615 	op := scope.AddOperation(opspec)
   1616 	return op.Output(0)
   1617 }
   1618 
   1619 // Compute the upper regularized incomplete Gamma function `Q(a, x)`.
   1620 //
   1621 // The upper regularized incomplete Gamma function is defined as:
   1622 //
   1623 // \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
   1624 //
   1625 // where
   1626 //
   1627 // \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
   1628 //
   1629 // is the upper incomplete Gama function.
   1630 //
   1631 // Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
   1632 // Gamma function.
   1633 func Igammac(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
   1634 	if scope.Err() != nil {
   1635 		return
   1636 	}
   1637 	opspec := tf.OpSpec{
   1638 		Type: "Igammac",
   1639 		Input: []tf.Input{
   1640 			a, x,
   1641 		},
   1642 	}
   1643 	op := scope.AddOperation(opspec)
   1644 	return op.Output(0)
   1645 }
   1646 
   1647 // FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
   1648 type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
   1649 
   1650 // FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
   1651 //
   1652 // value: The bitwidth of the quantization; between 2 and 8, inclusive.
   1653 // If not specified, defaults to 8
   1654 func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr {
   1655 	return func(m optionalAttr) {
   1656 		m["num_bits"] = value
   1657 	}
   1658 }
   1659 
   1660 // FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
   1661 //
   1662 // value: Whether to quantize into 2^num_bits - 1 distinct values.
   1663 // If not specified, defaults to false
   1664 func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
   1665 	return func(m optionalAttr) {
   1666 		m["narrow_range"] = value
   1667 	}
   1668 }
   1669 
   1670 // Compute gradients for a FakeQuantWithMinMaxVars operation.
   1671 //
   1672 // Arguments:
   1673 //	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
   1674 //	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
   1675 // min, max: Quantization interval, scalar floats.
   1676 //
   1677 //
   1678 //
   1679 // Returns Backpropagated gradients w.r.t. inputs:
   1680 // `gradients * (inputs >= min && inputs <= max)`.Backpropagated gradients w.r.t. min parameter:
   1681 // `sum(gradients * (inputs < min))`.Backpropagated gradients w.r.t. max parameter:
   1682 // `sum(gradients * (inputs > max))`.
   1683 func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
   1684 	if scope.Err() != nil {
   1685 		return
   1686 	}
   1687 	attrs := map[string]interface{}{}
   1688 	for _, a := range optional {
   1689 		a(attrs)
   1690 	}
   1691 	opspec := tf.OpSpec{
   1692 		Type: "FakeQuantWithMinMaxVarsGradient",
   1693 		Input: []tf.Input{
   1694 			gradients, inputs, min, max,
   1695 		},
   1696 		Attrs: attrs,
   1697 	}
   1698 	op := scope.AddOperation(opspec)
   1699 	return op.Output(0), op.Output(1), op.Output(2)
   1700 }
   1701 
   1702 // LogUniformCandidateSamplerAttr is an optional argument to LogUniformCandidateSampler.
   1703 type LogUniformCandidateSamplerAttr func(optionalAttr)
   1704 
   1705 // LogUniformCandidateSamplerSeed sets the optional seed attribute to value.
   1706 //
   1707 // value: If either seed or seed2 are set to be non-zero, the random number
   1708 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   1709 // random seed.
   1710 // If not specified, defaults to 0
   1711 func LogUniformCandidateSamplerSeed(value int64) LogUniformCandidateSamplerAttr {
   1712 	return func(m optionalAttr) {
   1713 		m["seed"] = value
   1714 	}
   1715 }
   1716 
   1717 // LogUniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
   1718 //
   1719 // value: An second seed to avoid seed collision.
   1720 // If not specified, defaults to 0
   1721 func LogUniformCandidateSamplerSeed2(value int64) LogUniformCandidateSamplerAttr {
   1722 	return func(m optionalAttr) {
   1723 		m["seed2"] = value
   1724 	}
   1725 }
   1726 
   1727 // Generates labels for candidate sampling with a log-uniform distribution.
   1728 //
   1729 // See explanations of candidate sampling and the data formats at
   1730 // go/candidate-sampling.
   1731 //
   1732 // For each batch, this op picks a single set of sampled candidate labels.
   1733 //
   1734 // The advantages of sampling candidates per-batch are simplicity and the
   1735 // possibility of efficient dense matrix multiplication. The disadvantage is that
   1736 // the sampled candidates must be chosen independently of the context and of the
   1737 // true labels.
   1738 //
   1739 // Arguments:
   1740 //	true_classes: A batch_size * num_true matrix, in which each row contains the
   1741 // IDs of the num_true target_classes in the corresponding original label.
   1742 //	num_true: Number of true labels per context.
   1743 //	num_sampled: Number of candidates to randomly sample.
   1744 //	unique: If unique is true, we sample with rejection, so that all sampled
   1745 // candidates in a batch are unique. This requires some approximation to
   1746 // estimate the post-rejection sampling probabilities.
   1747 //	range_max: The sampler will sample integers from the interval [0, range_max).
   1748 //
   1749 // Returns A vector of length num_sampled, in which each element is
   1750 // the ID of a sampled candidate.A batch_size * num_true matrix, representing
   1751 // the number of times each candidate is expected to occur in a batch
   1752 // of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
   1753 // candidate representing the number of times the candidate is expected
   1754 // to occur in a batch of sampled candidates.  If unique=true, then this is a
   1755 // probability.
   1756 func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LogUniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
   1757 	if scope.Err() != nil {
   1758 		return
   1759 	}
   1760 	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
   1761 	for _, a := range optional {
   1762 		a(attrs)
   1763 	}
   1764 	opspec := tf.OpSpec{
   1765 		Type: "LogUniformCandidateSampler",
   1766 		Input: []tf.Input{
   1767 			true_classes,
   1768 		},
   1769 		Attrs: attrs,
   1770 	}
   1771 	op := scope.AddOperation(opspec)
   1772 	return op.Output(0), op.Output(1), op.Output(2)
   1773 }
   1774 
   1775 // ApproximateEqualAttr is an optional argument to ApproximateEqual.
   1776 type ApproximateEqualAttr func(optionalAttr)
   1777 
   1778 // ApproximateEqualTolerance sets the optional tolerance attribute to value.
   1779 // If not specified, defaults to 1e-05
   1780 func ApproximateEqualTolerance(value float32) ApproximateEqualAttr {
   1781 	return func(m optionalAttr) {
   1782 		m["tolerance"] = value
   1783 	}
   1784 }
   1785 
   1786 // Returns the truth value of abs(x-y) < tolerance element-wise.
   1787 func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output) {
   1788 	if scope.Err() != nil {
   1789 		return
   1790 	}
   1791 	attrs := map[string]interface{}{}
   1792 	for _, a := range optional {
   1793 		a(attrs)
   1794 	}
   1795 	opspec := tf.OpSpec{
   1796 		Type: "ApproximateEqual",
   1797 		Input: []tf.Input{
   1798 			x, y,
   1799 		},
   1800 		Attrs: attrs,
   1801 	}
   1802 	op := scope.AddOperation(opspec)
   1803 	return op.Output(0)
   1804 }
   1805 
   1806 // Returns x / y element-wise.
   1807 //
   1808 // *NOTE*: `Div` supports broadcasting. More about broadcasting
   1809 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   1810 func Div(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   1811 	if scope.Err() != nil {
   1812 		return
   1813 	}
   1814 	opspec := tf.OpSpec{
   1815 		Type: "Div",
   1816 		Input: []tf.Input{
   1817 			x, y,
   1818 		},
   1819 	}
   1820 	op := scope.AddOperation(opspec)
   1821 	return op.Output(0)
   1822 }
   1823 
   1824 // Returns x * y element-wise.
   1825 //
   1826 // *NOTE*: `Multiply` supports broadcasting. More about broadcasting
   1827 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   1828 func Mul(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   1829 	if scope.Err() != nil {
   1830 		return
   1831 	}
   1832 	opspec := tf.OpSpec{
   1833 		Type: "Mul",
   1834 		Input: []tf.Input{
   1835 			x, y,
   1836 		},
   1837 	}
   1838 	op := scope.AddOperation(opspec)
   1839 	return op.Output(0)
   1840 }
   1841 
   1842 // SparseReduceSumSparseAttr is an optional argument to SparseReduceSumSparse.
   1843 type SparseReduceSumSparseAttr func(optionalAttr)
   1844 
   1845 // SparseReduceSumSparseKeepDims sets the optional keep_dims attribute to value.
   1846 //
   1847 // value: If true, retain reduced dimensions with length 1.
   1848 // If not specified, defaults to false
   1849 func SparseReduceSumSparseKeepDims(value bool) SparseReduceSumSparseAttr {
   1850 	return func(m optionalAttr) {
   1851 		m["keep_dims"] = value
   1852 	}
   1853 }
   1854 
   1855 // Computes the sum of elements across dimensions of a SparseTensor.
   1856 //
   1857 // This Op takes a SparseTensor and is the sparse counterpart to
   1858 // `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
   1859 // SparseTensor.
   1860 //
   1861 // Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
   1862 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   1863 // `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
   1864 // with length 1.
   1865 //
   1866 // If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
   1867 // with a single element is returned.  Additionally, the axes can be negative,
   1868 // which are interpreted according to the indexing rules in Python.
   1869 //
   1870 // Arguments:
   1871 //	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   1872 // SparseTensor, possibly not in canonical ordering.
   1873 //	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
   1874 //	input_shape: 1-D.  Shape of the input SparseTensor.
   1875 //	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
   1876 func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
   1877 	if scope.Err() != nil {
   1878 		return
   1879 	}
   1880 	attrs := map[string]interface{}{}
   1881 	for _, a := range optional {
   1882 		a(attrs)
   1883 	}
   1884 	opspec := tf.OpSpec{
   1885 		Type: "SparseReduceSumSparse",
   1886 		Input: []tf.Input{
   1887 			input_indices, input_values, input_shape, reduction_axes,
   1888 		},
   1889 		Attrs: attrs,
   1890 	}
   1891 	op := scope.AddOperation(opspec)
   1892 	return op.Output(0), op.Output(1), op.Output(2)
   1893 }
   1894 
   1895 // BiasAddAttr is an optional argument to BiasAdd.
   1896 type BiasAddAttr func(optionalAttr)
   1897 
   1898 // BiasAddDataFormat sets the optional data_format attribute to value.
   1899 //
   1900 // value: Specify the data format of the input and output data. With the
   1901 // default format "NHWC", the bias tensor will be added to the last dimension
   1902 // of the value tensor.
   1903 // Alternatively, the format could be "NCHW", the data storage order of:
   1904 //     [batch, in_channels, in_height, in_width].
   1905 // The tensor will be added to "in_channels", the third-to-the-last
   1906 //     dimension.
   1907 // If not specified, defaults to "NHWC"
   1908 func BiasAddDataFormat(value string) BiasAddAttr {
   1909 	return func(m optionalAttr) {
   1910 		m["data_format"] = value
   1911 	}
   1912 }
   1913 
   1914 // Adds `bias` to `value`.
   1915 //
   1916 // This is a special case of `tf.add` where `bias` is restricted to be 1-D.
   1917 // Broadcasting is supported, so `value` may have any number of dimensions.
   1918 //
   1919 // Arguments:
   1920 //	value: Any number of dimensions.
   1921 //	bias: 1-D with size the last dimension of `value`.
   1922 //
   1923 // Returns Broadcasted sum of `value` and `bias`.
   1924 func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) {
   1925 	if scope.Err() != nil {
   1926 		return
   1927 	}
   1928 	attrs := map[string]interface{}{}
   1929 	for _, a := range optional {
   1930 		a(attrs)
   1931 	}
   1932 	opspec := tf.OpSpec{
   1933 		Type: "BiasAdd",
   1934 		Input: []tf.Input{
   1935 			value, bias,
   1936 		},
   1937 		Attrs: attrs,
   1938 	}
   1939 	op := scope.AddOperation(opspec)
   1940 	return op.Output(0)
   1941 }
   1942 
   1943 // BiasAddGradAttr is an optional argument to BiasAddGrad.
   1944 type BiasAddGradAttr func(optionalAttr)
   1945 
   1946 // BiasAddGradDataFormat sets the optional data_format attribute to value.
   1947 //
   1948 // value: Specify the data format of the input and output data. With the
   1949 // default format "NHWC", the bias tensor will be added to the last dimension
   1950 // of the value tensor.
   1951 // Alternatively, the format could be "NCHW", the data storage order of:
   1952 //     [batch, in_channels, in_height, in_width].
   1953 // The tensor will be added to "in_channels", the third-to-the-last
   1954 //     dimension.
   1955 // If not specified, defaults to "NHWC"
   1956 func BiasAddGradDataFormat(value string) BiasAddGradAttr {
   1957 	return func(m optionalAttr) {
   1958 		m["data_format"] = value
   1959 	}
   1960 }
   1961 
   1962 // The backward operation for "BiasAdd" on the "bias" tensor.
   1963 //
   1964 // It accumulates all the values from out_backprop into the feature dimension.
   1965 // For NHWC data format, the feature dimension is the last. For NCHW data format,
   1966 // the feature dimension is the third-to-last.
   1967 //
   1968 // Arguments:
   1969 //	out_backprop: Any number of dimensions.
   1970 //
   1971 // Returns 1-D with size the feature dimension of `out_backprop`.
   1972 func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output) {
   1973 	if scope.Err() != nil {
   1974 		return
   1975 	}
   1976 	attrs := map[string]interface{}{}
   1977 	for _, a := range optional {
   1978 		a(attrs)
   1979 	}
   1980 	opspec := tf.OpSpec{
   1981 		Type: "BiasAddGrad",
   1982 		Input: []tf.Input{
   1983 			out_backprop,
   1984 		},
   1985 		Attrs: attrs,
   1986 	}
   1987 	op := scope.AddOperation(opspec)
   1988 	return op.Output(0)
   1989 }
   1990 
   1991 // Returns x + y element-wise.
   1992 //
   1993 // *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
   1994 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   1995 func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   1996 	if scope.Err() != nil {
   1997 		return
   1998 	}
   1999 	opspec := tf.OpSpec{
   2000 		Type: "AddV2",
   2001 		Input: []tf.Input{
   2002 			x, y,
   2003 		},
   2004 	}
   2005 	op := scope.AddOperation(opspec)
   2006 	return op.Output(0)
   2007 }
   2008 
   2009 // Returns x + y element-wise.
   2010 //
   2011 // *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
   2012 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   2013 func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   2014 	if scope.Err() != nil {
   2015 		return
   2016 	}
   2017 	opspec := tf.OpSpec{
   2018 		Type: "Add",
   2019 		Input: []tf.Input{
   2020 			x, y,
   2021 		},
   2022 	}
   2023 	op := scope.AddOperation(opspec)
   2024 	return op.Output(0)
   2025 }
   2026 
   2027 // NthElementAttr is an optional argument to NthElement.
   2028 type NthElementAttr func(optionalAttr)
   2029 
   2030 // NthElementReverse sets the optional reverse attribute to value.
   2031 //
   2032 // value: When set to True, find the nth-largest value in the vector and vice
   2033 // versa.
   2034 // If not specified, defaults to false
   2035 func NthElementReverse(value bool) NthElementAttr {
   2036 	return func(m optionalAttr) {
   2037 		m["reverse"] = value
   2038 	}
   2039 }
   2040 
   2041 // Finds values of the `n`-th order statistic for the last dimension.
   2042 //
   2043 // If the input is a vector (rank-1), finds the entries which is the nth-smallest
   2044 // value in the vector and outputs their values as scalar tensor.
   2045 //
   2046 // For matrices (resp. higher rank input), computes the entries which is the
   2047 // nth-smallest value in each row (resp. vector along the last dimension). Thus,
   2048 //
   2049 //     values.shape = input.shape[:-1]
   2050 //
   2051 // Arguments:
   2052 //	input: 1-D or higher with last dimension at least `n+1`.
   2053 //	n: 0-D. Position of sorted vector to select along the last dimension (along
   2054 // each row for matrices). Valid range of n is `[0, input.shape[:-1])`
   2055 //
   2056 // Returns The `n`-th order statistic along each last dimensional slice.
   2057 func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output) {
   2058 	if scope.Err() != nil {
   2059 		return
   2060 	}
   2061 	attrs := map[string]interface{}{}
   2062 	for _, a := range optional {
   2063 		a(attrs)
   2064 	}
   2065 	opspec := tf.OpSpec{
   2066 		Type: "NthElement",
   2067 		Input: []tf.Input{
   2068 			input, n,
   2069 		},
   2070 		Attrs: attrs,
   2071 	}
   2072 	op := scope.AddOperation(opspec)
   2073 	return op.Output(0)
   2074 }
   2075 
   2076 // Computes the Max along segments of a tensor.
   2077 //
   2078 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   2079 // segments.
   2080 //
   2081 // This operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
   2082 // Instead of computing the sum over segments, it computes the maximum
   2083 // such that:
   2084 //
   2085 // \\(output_i = \max_j data_j\\) where max is over `j` such
   2086 // that `segment_ids[j] == i`.
   2087 //
   2088 // If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type,
   2089 //  `output[i] = numeric_limits<T>::min()`.
   2090 //
   2091 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   2092 // <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
   2093 // </div>
   2094 //
   2095 // Arguments:
   2096 //
   2097 //	segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
   2098 // first dimension.
   2099 //
   2100 //
   2101 // Returns Has same shape as data, except for dimension 0 which
   2102 // has size `num_segments`.
   2103 func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
   2104 	if scope.Err() != nil {
   2105 		return
   2106 	}
   2107 	opspec := tf.OpSpec{
   2108 		Type: "UnsortedSegmentMax",
   2109 		Input: []tf.Input{
   2110 			data, segment_ids, num_segments,
   2111 		},
   2112 	}
   2113 	op := scope.AddOperation(opspec)
   2114 	return op.Output(0)
   2115 }
   2116 
   2117 // Computes exponential of x element-wise.  \\(y = e^x\\).
   2118 func Exp(scope *Scope, x tf.Output) (y tf.Output) {
   2119 	if scope.Err() != nil {
   2120 		return
   2121 	}
   2122 	opspec := tf.OpSpec{
   2123 		Type: "Exp",
   2124 		Input: []tf.Input{
   2125 			x,
   2126 		},
   2127 	}
   2128 	op := scope.AddOperation(opspec)
   2129 	return op.Output(0)
   2130 }
   2131 
   2132 // Returns an element-wise indication of the sign of a number.
   2133 //
   2134 // `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
   2135 //
   2136 // For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
   2137 func Sign(scope *Scope, x tf.Output) (y tf.Output) {
   2138 	if scope.Err() != nil {
   2139 		return
   2140 	}
   2141 	opspec := tf.OpSpec{
   2142 		Type: "Sign",
   2143 		Input: []tf.Input{
   2144 			x,
   2145 		},
   2146 	}
   2147 	op := scope.AddOperation(opspec)
   2148 	return op.Output(0)
   2149 }
   2150 
   2151 // QuantizedAddAttr is an optional argument to QuantizedAdd.
   2152 type QuantizedAddAttr func(optionalAttr)
   2153 
   2154 // QuantizedAddToutput sets the optional Toutput attribute to value.
   2155 // If not specified, defaults to DT_QINT32
   2156 func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr {
   2157 	return func(m optionalAttr) {
   2158 		m["Toutput"] = value
   2159 	}
   2160 }
   2161 
   2162 // Returns x + y element-wise, working on quantized buffers.
   2163 //
   2164 // Arguments:
   2165 //
   2166 //
   2167 //	min_x: The float value that the lowest quantized `x` value represents.
   2168 //	max_x: The float value that the highest quantized `x` value represents.
   2169 //	min_y: The float value that the lowest quantized `y` value represents.
   2170 //	max_y: The float value that the highest quantized `y` value represents.
   2171 //
   2172 // Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
   2173 //
   2174 // *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
   2175 // broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   2176 func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedAddAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
   2177 	if scope.Err() != nil {
   2178 		return
   2179 	}
   2180 	attrs := map[string]interface{}{}
   2181 	for _, a := range optional {
   2182 		a(attrs)
   2183 	}
   2184 	opspec := tf.OpSpec{
   2185 		Type: "QuantizedAdd",
   2186 		Input: []tf.Input{
   2187 			x, y, min_x, max_x, min_y, max_y,
   2188 		},
   2189 		Attrs: attrs,
   2190 	}
   2191 	op := scope.AddOperation(opspec)
   2192 	return op.Output(0), op.Output(1), op.Output(2)
   2193 }
   2194 
   2195 // ArgMinAttr is an optional argument to ArgMin.
   2196 type ArgMinAttr func(optionalAttr)
   2197 
   2198 // ArgMinOutputType sets the optional output_type attribute to value.
   2199 // If not specified, defaults to DT_INT64
   2200 func ArgMinOutputType(value tf.DataType) ArgMinAttr {
   2201 	return func(m optionalAttr) {
   2202 		m["output_type"] = value
   2203 	}
   2204 }
   2205 
   2206 // Returns the index with the smallest value across dimensions of a tensor.
   2207 //
   2208 // Note that in case of ties the identity of the return value is not guaranteed.
   2209 //
   2210 // Arguments:
   2211 //
   2212 //	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
   2213 // Describes which dimension of the input Tensor to reduce across. For vectors,
   2214 // use dimension = 0.
   2215 func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output) {
   2216 	if scope.Err() != nil {
   2217 		return
   2218 	}
   2219 	attrs := map[string]interface{}{}
   2220 	for _, a := range optional {
   2221 		a(attrs)
   2222 	}
   2223 	opspec := tf.OpSpec{
   2224 		Type: "ArgMin",
   2225 		Input: []tf.Input{
   2226 			input, dimension,
   2227 		},
   2228 		Attrs: attrs,
   2229 	}
   2230 	op := scope.AddOperation(opspec)
   2231 	return op.Output(0)
   2232 }
   2233 
   2234 // Convert the quantized 'input' tensor into a lower-precision 'output', using the
   2235 //
   2236 // output range specified with 'requested_output_min' and 'requested_output_max'.
   2237 //
   2238 // [input_min, input_max] are scalar floats that specify the range for the float
   2239 // interpretation of the 'input' data. For example, if input_min is -1.0f and
   2240 // input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
   2241 // value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
   2242 //
   2243 // Arguments:
   2244 //
   2245 //	input_min: The float value that the minimum quantized input value represents.
   2246 //	input_max: The float value that the maximum quantized input value represents.
   2247 //	requested_output_min: The float value that the minimum quantized output value represents.
   2248 //	requested_output_max: The float value that the maximum quantized output value represents.
   2249 //	out_type: The type of the output. Should be a lower bit depth than Tinput.
   2250 //
   2251 // Returns The requested_output_min value is copied into this output.The requested_output_max value is copied into this output.
   2252 func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
   2253 	if scope.Err() != nil {
   2254 		return
   2255 	}
   2256 	attrs := map[string]interface{}{"out_type": out_type}
   2257 	opspec := tf.OpSpec{
   2258 		Type: "Requantize",
   2259 		Input: []tf.Input{
   2260 			input, input_min, input_max, requested_output_min, requested_output_max,
   2261 		},
   2262 		Attrs: attrs,
   2263 	}
   2264 	op := scope.AddOperation(opspec)
   2265 	return op.Output(0), op.Output(1), op.Output(2)
   2266 }
   2267 
   2268 // Computes the determinant of one or more square matrices.
   2269 //
   2270 // The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
   2271 // form square matrices. The output is a tensor containing the determinants
   2272 // for all input submatrices `[..., :, :]`.
   2273 //
   2274 // Arguments:
   2275 //	input: Shape is `[..., M, M]`.
   2276 //
   2277 // Returns Shape is `[...]`.
   2278 func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output) {
   2279 	if scope.Err() != nil {
   2280 		return
   2281 	}
   2282 	opspec := tf.OpSpec{
   2283 		Type: "MatrixDeterminant",
   2284 		Input: []tf.Input{
   2285 			input,
   2286 		},
   2287 	}
   2288 	op := scope.AddOperation(opspec)
   2289 	return op.Output(0)
   2290 }
   2291 
   2292 // Computes sin of x element-wise.
   2293 func Sin(scope *Scope, x tf.Output) (y tf.Output) {
   2294 	if scope.Err() != nil {
   2295 		return
   2296 	}
   2297 	opspec := tf.OpSpec{
   2298 		Type: "Sin",
   2299 		Input: []tf.Input{
   2300 			x,
   2301 		},
   2302 	}
   2303 	op := scope.AddOperation(opspec)
   2304 	return op.Output(0)
   2305 }
   2306 
   2307 // Computes the complementary error function of `x` element-wise.
   2308 func Erfc(scope *Scope, x tf.Output) (y tf.Output) {
   2309 	if scope.Err() != nil {
   2310 		return
   2311 	}
   2312 	opspec := tf.OpSpec{
   2313 		Type: "Erfc",
   2314 		Input: []tf.Input{
   2315 			x,
   2316 		},
   2317 	}
   2318 	op := scope.AddOperation(opspec)
   2319 	return op.Output(0)
   2320 }
   2321 
   2322 // Computes Psi, the derivative of Lgamma (the log of the absolute value of
   2323 //
   2324 // `Gamma(x)`), element-wise.
   2325 func Digamma(scope *Scope, x tf.Output) (y tf.Output) {
   2326 	if scope.Err() != nil {
   2327 		return
   2328 	}
   2329 	opspec := tf.OpSpec{
   2330 		Type: "Digamma",
   2331 		Input: []tf.Input{
   2332 			x,
   2333 		},
   2334 	}
   2335 	op := scope.AddOperation(opspec)
   2336 	return op.Output(0)
   2337 }
   2338 
   2339 // Conv2DBackpropFilterAttr is an optional argument to Conv2DBackpropFilter.
   2340 type Conv2DBackpropFilterAttr func(optionalAttr)
   2341 
   2342 // Conv2DBackpropFilterUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
   2343 // If not specified, defaults to true
   2344 func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr {
   2345 	return func(m optionalAttr) {
   2346 		m["use_cudnn_on_gpu"] = value
   2347 	}
   2348 }
   2349 
   2350 // Conv2DBackpropFilterDataFormat sets the optional data_format attribute to value.
   2351 //
   2352 // value: Specify the data format of the input and output data. With the
   2353 // default format "NHWC", the data is stored in the order of:
   2354 //     [batch, in_height, in_width, in_channels].
   2355 // Alternatively, the format could be "NCHW", the data storage order of:
   2356 //     [batch, in_channels, in_height, in_width].
   2357 // If not specified, defaults to "NHWC"
   2358 func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr {
   2359 	return func(m optionalAttr) {
   2360 		m["data_format"] = value
   2361 	}
   2362 }
   2363 
   2364 // Conv2DBackpropFilterDilations sets the optional dilations attribute to value.
   2365 //
   2366 // value: 1-D tensor of length 4.  The dilation factor for each dimension of
   2367 // `input`. If set to k > 1, there will be k-1 skipped cells between each filter
   2368 // element on that dimension. The dimension order is determined by the value of
   2369 // `data_format`, see above for details. Dilations in the batch and depth
   2370 // dimensions must be 1.
   2371 // If not specified, defaults to <i:1 i:1 i:1 i:1 >
   2372 func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr {
   2373 	return func(m optionalAttr) {
   2374 		m["dilations"] = value
   2375 	}
   2376 }
   2377 
   2378 // Computes the gradients of convolution with respect to the filter.
   2379 //
   2380 // Arguments:
   2381 //	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
   2382 //	filter_sizes: An integer vector representing the tensor shape of `filter`,
   2383 // where `filter` is a 4-D
   2384 // `[filter_height, filter_width, in_channels, out_channels]` tensor.
   2385 //	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
   2386 // Gradients w.r.t. the output of the convolution.
   2387 //	strides: The stride of the sliding window for each dimension of the input
   2388 // of the convolution. Must be in the same order as the dimension specified with
   2389 // format.
   2390 //	padding: The type of padding algorithm to use.
   2391 //
   2392 // Returns 4-D with shape
   2393 // `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
   2394 // the `filter` input of the convolution.
   2395 func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterAttr) (output tf.Output) {
   2396 	if scope.Err() != nil {
   2397 		return
   2398 	}
   2399 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   2400 	for _, a := range optional {
   2401 		a(attrs)
   2402 	}
   2403 	opspec := tf.OpSpec{
   2404 		Type: "Conv2DBackpropFilter",
   2405 		Input: []tf.Input{
   2406 			input, filter_sizes, out_backprop,
   2407 		},
   2408 		Attrs: attrs,
   2409 	}
   2410 	op := scope.AddOperation(opspec)
   2411 	return op.Output(0)
   2412 }
   2413 
   2414 // Returns the number of work units this Reader has finished processing.
   2415 //
   2416 // Arguments:
   2417 //	reader_handle: Handle to a Reader.
   2418 func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output) {
   2419 	if scope.Err() != nil {
   2420 		return
   2421 	}
   2422 	opspec := tf.OpSpec{
   2423 		Type: "ReaderNumWorkUnitsCompletedV2",
   2424 		Input: []tf.Input{
   2425 			reader_handle,
   2426 		},
   2427 	}
   2428 	op := scope.AddOperation(opspec)
   2429 	return op.Output(0)
   2430 }
   2431 
   2432 // Returns x / y element-wise for real types.
   2433 //
   2434 // If `x` and `y` are reals, this will return the floating-point division.
   2435 //
   2436 // *NOTE*: `Div` supports broadcasting. More about broadcasting
   2437 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   2438 func RealDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   2439 	if scope.Err() != nil {
   2440 		return
   2441 	}
   2442 	opspec := tf.OpSpec{
   2443 		Type: "RealDiv",
   2444 		Input: []tf.Input{
   2445 			x, y,
   2446 		},
   2447 	}
   2448 	op := scope.AddOperation(opspec)
   2449 	return op.Output(0)
   2450 }
   2451 
   2452 // Computes the log of the absolute value of `Gamma(x)` element-wise.
   2453 func Lgamma(scope *Scope, x tf.Output) (y tf.Output) {
   2454 	if scope.Err() != nil {
   2455 		return
   2456 	}
   2457 	opspec := tf.OpSpec{
   2458 		Type: "Lgamma",
   2459 		Input: []tf.Input{
   2460 			x,
   2461 		},
   2462 	}
   2463 	op := scope.AddOperation(opspec)
   2464 	return op.Output(0)
   2465 }
   2466 
   2467 // Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
   2468 //
   2469 // For an explanation see "Differentiation of the Cholesky algorithm" by
   2470 // Iain Murray http://arxiv.org/abs/1602.07527.
   2471 //
   2472 // Arguments:
   2473 //	l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
   2474 // Algorithm depends only on lower triangular part of the innermost matrices of
   2475 // this tensor.
   2476 //	grad: df/dl where f is some scalar function. Shape is `[..., M, M]`.
   2477 // Algorithm depends only on lower triangular part of the innermost matrices of
   2478 // this tensor.
   2479 //
   2480 // Returns Symmetrized version of df/dA . Shape is `[..., M, M]`
   2481 func CholeskyGrad(scope *Scope, l tf.Output, grad tf.Output) (output tf.Output) {
   2482 	if scope.Err() != nil {
   2483 		return
   2484 	}
   2485 	opspec := tf.OpSpec{
   2486 		Type: "CholeskyGrad",
   2487 		Input: []tf.Input{
   2488 			l, grad,
   2489 		},
   2490 	}
   2491 	op := scope.AddOperation(opspec)
   2492 	return op.Output(0)
   2493 }
   2494 
   2495 // Computes inverse hyperbolic cosine of x element-wise.
   2496 func Acosh(scope *Scope, x tf.Output) (y tf.Output) {
   2497 	if scope.Err() != nil {
   2498 		return
   2499 	}
   2500 	opspec := tf.OpSpec{
   2501 		Type: "Acosh",
   2502 		Input: []tf.Input{
   2503 			x,
   2504 		},
   2505 	}
   2506 	op := scope.AddOperation(opspec)
   2507 	return op.Output(0)
   2508 }
   2509 
   2510 // SerializeManySparseAttr is an optional argument to SerializeManySparse.
   2511 type SerializeManySparseAttr func(optionalAttr)
   2512 
   2513 // SerializeManySparseOutType sets the optional out_type attribute to value.
   2514 //
   2515 // value: The `dtype` to use for serialization; the supported types are `string`
   2516 // (default) and `variant`.
   2517 // If not specified, defaults to DT_STRING
   2518 func SerializeManySparseOutType(value tf.DataType) SerializeManySparseAttr {
   2519 	return func(m optionalAttr) {
   2520 		m["out_type"] = value
   2521 	}
   2522 }
   2523 
   2524 // Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
   2525 //
   2526 // The `SparseTensor` must have rank `R` greater than 1, and the first dimension
   2527 // is treated as the minibatch dimension.  Elements of the `SparseTensor`
   2528 // must be sorted in increasing order of this first dimension.  The serialized
   2529 // `SparseTensor` objects going into each row of `serialized_sparse` will have
   2530 // rank `R-1`.
   2531 //
   2532 // The minibatch size `N` is extracted from `sparse_shape[0]`.
   2533 //
   2534 // Arguments:
   2535 //	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
   2536 //	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
   2537 //	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
   2538 func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeManySparseAttr) (serialized_sparse tf.Output) {
   2539 	if scope.Err() != nil {
   2540 		return
   2541 	}
   2542 	attrs := map[string]interface{}{}
   2543 	for _, a := range optional {
   2544 		a(attrs)
   2545 	}
   2546 	opspec := tf.OpSpec{
   2547 		Type: "SerializeManySparse",
   2548 		Input: []tf.Input{
   2549 			sparse_indices, sparse_values, sparse_shape,
   2550 		},
   2551 		Attrs: attrs,
   2552 	}
   2553 	op := scope.AddOperation(opspec)
   2554 	return op.Output(0)
   2555 }
   2556 
   2557 // TensorArrayV2Attr is an optional argument to TensorArrayV2.
   2558 type TensorArrayV2Attr func(optionalAttr)
   2559 
   2560 // TensorArrayV2ElementShape sets the optional element_shape attribute to value.
   2561 // If not specified, defaults to <unknown_rank:true >
   2562 func TensorArrayV2ElementShape(value tf.Shape) TensorArrayV2Attr {
   2563 	return func(m optionalAttr) {
   2564 		m["element_shape"] = value
   2565 	}
   2566 }
   2567 
   2568 // TensorArrayV2DynamicSize sets the optional dynamic_size attribute to value.
   2569 // If not specified, defaults to false
   2570 func TensorArrayV2DynamicSize(value bool) TensorArrayV2Attr {
   2571 	return func(m optionalAttr) {
   2572 		m["dynamic_size"] = value
   2573 	}
   2574 }
   2575 
   2576 // TensorArrayV2ClearAfterRead sets the optional clear_after_read attribute to value.
   2577 // If not specified, defaults to true
   2578 func TensorArrayV2ClearAfterRead(value bool) TensorArrayV2Attr {
   2579 	return func(m optionalAttr) {
   2580 		m["clear_after_read"] = value
   2581 	}
   2582 }
   2583 
   2584 // TensorArrayV2TensorArrayName sets the optional tensor_array_name attribute to value.
   2585 // If not specified, defaults to ""
   2586 func TensorArrayV2TensorArrayName(value string) TensorArrayV2Attr {
   2587 	return func(m optionalAttr) {
   2588 		m["tensor_array_name"] = value
   2589 	}
   2590 }
   2591 
   2592 // Deprecated. Use TensorArrayV3
   2593 //
   2594 // DEPRECATED at GraphDef version 26: Use TensorArrayV3
   2595 func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output) {
   2596 	if scope.Err() != nil {
   2597 		return
   2598 	}
   2599 	attrs := map[string]interface{}{"dtype": dtype}
   2600 	for _, a := range optional {
   2601 		a(attrs)
   2602 	}
   2603 	opspec := tf.OpSpec{
   2604 		Type: "TensorArrayV2",
   2605 		Input: []tf.Input{
   2606 			size,
   2607 		},
   2608 		Attrs: attrs,
   2609 	}
   2610 	op := scope.AddOperation(opspec)
   2611 	return op.Output(0)
   2612 }
   2613 
   2614 // Computes the mean along sparse segments of a tensor.
   2615 //
   2616 // Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
   2617 // misisng, the `output` tensor at that position will be zeroed.
   2618 //
   2619 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   2620 // segments.
   2621 //
   2622 // Arguments:
   2623 //
   2624 //	indices: A 1-D tensor. Has same rank as `segment_ids`.
   2625 //	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
   2626 //	num_segments: Should equal the number of distinct segment IDs.
   2627 //
   2628 // Returns Has same shape as data, except for dimension 0 which has size
   2629 // `num_segments`.
   2630 func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
   2631 	if scope.Err() != nil {
   2632 		return
   2633 	}
   2634 	opspec := tf.OpSpec{
   2635 		Type: "SparseSegmentMeanWithNumSegments",
   2636 		Input: []tf.Input{
   2637 			data, indices, segment_ids, num_segments,
   2638 		},
   2639 	}
   2640 	op := scope.AddOperation(opspec)
   2641 	return op.Output(0)
   2642 }
   2643 
   2644 // Computes hyperbolic cosine of x element-wise.
   2645 func Cosh(scope *Scope, x tf.Output) (y tf.Output) {
   2646 	if scope.Err() != nil {
   2647 		return
   2648 	}
   2649 	opspec := tf.OpSpec{
   2650 		Type: "Cosh",
   2651 		Input: []tf.Input{
   2652 			x,
   2653 		},
   2654 	}
   2655 	op := scope.AddOperation(opspec)
   2656 	return op.Output(0)
   2657 }
   2658 
   2659 // Creates a dataset that emits each dim-0 slice of `components` once.
   2660 func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
   2661 	if scope.Err() != nil {
   2662 		return
   2663 	}
   2664 	attrs := map[string]interface{}{"output_shapes": output_shapes}
   2665 	opspec := tf.OpSpec{
   2666 		Type: "TensorSliceDataset",
   2667 		Input: []tf.Input{
   2668 			tf.OutputList(components),
   2669 		},
   2670 		Attrs: attrs,
   2671 	}
   2672 	op := scope.AddOperation(opspec)
   2673 	return op.Output(0)
   2674 }
   2675 
   2676 // Computes natural logarithm of (1 + x) element-wise.
   2677 //
   2678 // I.e., \\(y = \log_e (1 + x)\\).
   2679 func Log1p(scope *Scope, x tf.Output) (y tf.Output) {
   2680 	if scope.Err() != nil {
   2681 		return
   2682 	}
   2683 	opspec := tf.OpSpec{
   2684 		Type: "Log1p",
   2685 		Input: []tf.Input{
   2686 			x,
   2687 		},
   2688 	}
   2689 	op := scope.AddOperation(opspec)
   2690 	return op.Output(0)
   2691 }
   2692 
   2693 // Computes rectified linear 6 gradients for a Relu6 operation.
   2694 //
   2695 // Arguments:
   2696 //	gradients: The backpropagated gradients to the corresponding Relu6 operation.
   2697 //	features: The features passed as input to the corresponding Relu6 operation, or
   2698 // its output; using either one produces the same result.
   2699 //
   2700 // Returns The gradients:
   2701 // `gradients * (features > 0) * (features < 6)`.
   2702 func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
   2703 	if scope.Err() != nil {
   2704 		return
   2705 	}
   2706 	opspec := tf.OpSpec{
   2707 		Type: "Relu6Grad",
   2708 		Input: []tf.Input{
   2709 			gradients, features,
   2710 		},
   2711 	}
   2712 	op := scope.AddOperation(opspec)
   2713 	return op.Output(0)
   2714 }
   2715 
   2716 // ResizeBicubicAttr is an optional argument to ResizeBicubic.
   2717 type ResizeBicubicAttr func(optionalAttr)
   2718 
   2719 // ResizeBicubicAlignCorners sets the optional align_corners attribute to value.
   2720 //
   2721 // value: If true, rescale input by (new_height - 1) / (height - 1), which
   2722 // exactly aligns the 4 corners of images and resized images. If false, rescale
   2723 // by new_height / height. Treat similarly the width dimension.
   2724 // If not specified, defaults to false
   2725 func ResizeBicubicAlignCorners(value bool) ResizeBicubicAttr {
   2726 	return func(m optionalAttr) {
   2727 		m["align_corners"] = value
   2728 	}
   2729 }
   2730 
   2731 // Resize `images` to `size` using bicubic interpolation.
   2732 //
   2733 // Input images can be of different types but output images are always float.
   2734 //
   2735 // Arguments:
   2736 //	images: 4-D with shape `[batch, height, width, channels]`.
   2737 //	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
   2738 // new size for the images.
   2739 //
   2740 // Returns 4-D with shape
   2741 // `[batch, new_height, new_width, channels]`.
   2742 func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output) {
   2743 	if scope.Err() != nil {
   2744 		return
   2745 	}
   2746 	attrs := map[string]interface{}{}
   2747 	for _, a := range optional {
   2748 		a(attrs)
   2749 	}
   2750 	opspec := tf.OpSpec{
   2751 		Type: "ResizeBicubic",
   2752 		Input: []tf.Input{
   2753 			images, size,
   2754 		},
   2755 		Attrs: attrs,
   2756 	}
   2757 	op := scope.AddOperation(opspec)
   2758 	return op.Output(0)
   2759 }
   2760 
   2761 // Computes natural logarithm of x element-wise.
   2762 //
   2763 // I.e., \\(y = \log_e x\\).
   2764 func Log(scope *Scope, x tf.Output) (y tf.Output) {
   2765 	if scope.Err() != nil {
   2766 		return
   2767 	}
   2768 	opspec := tf.OpSpec{
   2769 		Type: "Log",
   2770 		Input: []tf.Input{
   2771 			x,
   2772 		},
   2773 	}
   2774 	op := scope.AddOperation(opspec)
   2775 	return op.Output(0)
   2776 }
   2777 
   2778 // Rounds the values of a tensor to the nearest integer, element-wise.
   2779 //
   2780 // Rounds half to even.  Also known as bankers rounding. If you want to round
   2781 // according to the current system rounding mode use std::cint.
   2782 func Round(scope *Scope, x tf.Output) (y tf.Output) {
   2783 	if scope.Err() != nil {
   2784 		return
   2785 	}
   2786 	opspec := tf.OpSpec{
   2787 		Type: "Round",
   2788 		Input: []tf.Input{
   2789 			x,
   2790 		},
   2791 	}
   2792 	op := scope.AddOperation(opspec)
   2793 	return op.Output(0)
   2794 }
   2795 
   2796 // RecordInputAttr is an optional argument to RecordInput.
   2797 type RecordInputAttr func(optionalAttr)
   2798 
   2799 // RecordInputFileRandomSeed sets the optional file_random_seed attribute to value.
   2800 //
   2801 // value: Random seeds used to produce randomized records.
   2802 // If not specified, defaults to 301
   2803 func RecordInputFileRandomSeed(value int64) RecordInputAttr {
   2804 	return func(m optionalAttr) {
   2805 		m["file_random_seed"] = value
   2806 	}
   2807 }
   2808 
   2809 // RecordInputFileShuffleShiftRatio sets the optional file_shuffle_shift_ratio attribute to value.
   2810 //
   2811 // value: Shifts the list of files after the list is randomly
   2812 // shuffled.
   2813 // If not specified, defaults to 0
   2814 func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr {
   2815 	return func(m optionalAttr) {
   2816 		m["file_shuffle_shift_ratio"] = value
   2817 	}
   2818 }
   2819 
   2820 // RecordInputFileBufferSize sets the optional file_buffer_size attribute to value.
   2821 //
   2822 // value: The randomization shuffling buffer.
   2823 // If not specified, defaults to 10000
   2824 func RecordInputFileBufferSize(value int64) RecordInputAttr {
   2825 	return func(m optionalAttr) {
   2826 		m["file_buffer_size"] = value
   2827 	}
   2828 }
   2829 
   2830 // RecordInputFileParallelism sets the optional file_parallelism attribute to value.
   2831 //
   2832 // value: How many sstables are opened and concurrently iterated over.
   2833 // If not specified, defaults to 16
   2834 func RecordInputFileParallelism(value int64) RecordInputAttr {
   2835 	return func(m optionalAttr) {
   2836 		m["file_parallelism"] = value
   2837 	}
   2838 }
   2839 
   2840 // RecordInputBatchSize sets the optional batch_size attribute to value.
   2841 //
   2842 // value: The batch size.
   2843 // If not specified, defaults to 32
   2844 func RecordInputBatchSize(value int64) RecordInputAttr {
   2845 	return func(m optionalAttr) {
   2846 		m["batch_size"] = value
   2847 	}
   2848 }
   2849 
   2850 // RecordInputCompressionType sets the optional compression_type attribute to value.
   2851 //
   2852 // value: The type of compression for the file. Currently ZLIB and
   2853 // GZIP are supported. Defaults to none.
   2854 // If not specified, defaults to ""
   2855 func RecordInputCompressionType(value string) RecordInputAttr {
   2856 	return func(m optionalAttr) {
   2857 		m["compression_type"] = value
   2858 	}
   2859 }
   2860 
   2861 // Emits randomized records.
   2862 //
   2863 // Arguments:
   2864 //	file_pattern: Glob pattern for the data files.
   2865 //
   2866 // Returns A tensor of shape [batch_size].
   2867 func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output) {
   2868 	if scope.Err() != nil {
   2869 		return
   2870 	}
   2871 	attrs := map[string]interface{}{"file_pattern": file_pattern}
   2872 	for _, a := range optional {
   2873 		a(attrs)
   2874 	}
   2875 	opspec := tf.OpSpec{
   2876 		Type: "RecordInput",
   2877 
   2878 		Attrs: attrs,
   2879 	}
   2880 	op := scope.AddOperation(opspec)
   2881 	return op.Output(0)
   2882 }
   2883 
   2884 // Computes reciprocal of square root of x element-wise.
   2885 //
   2886 // I.e., \\(y = 1 / \sqrt{x}\\).
   2887 func Rsqrt(scope *Scope, x tf.Output) (y tf.Output) {
   2888 	if scope.Err() != nil {
   2889 		return
   2890 	}
   2891 	opspec := tf.OpSpec{
   2892 		Type: "Rsqrt",
   2893 		Input: []tf.Input{
   2894 			x,
   2895 		},
   2896 	}
   2897 	op := scope.AddOperation(opspec)
   2898 	return op.Output(0)
   2899 }
   2900 
   2901 // Inserts a dimension of 1 into a tensor's shape.
   2902 //
   2903 // Given a tensor `input`, this operation inserts a dimension of 1 at the
   2904 // dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
   2905 // zero; if you specify a negative number for `axis` it is counted backward from
   2906 // the end.
   2907 //
   2908 // This operation is useful if you want to add a batch dimension to a single
   2909 // element. For example, if you have a single image of shape `[height, width,
   2910 // channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
   2911 // which will make the shape `[1, height, width, channels]`.
   2912 //
   2913 // Other examples:
   2914 //
   2915 // ```
   2916 // # 't' is a tensor of shape [2]
   2917 // shape(expand_dims(t, 0)) ==> [1, 2]
   2918 // shape(expand_dims(t, 1)) ==> [2, 1]
   2919 // shape(expand_dims(t, -1)) ==> [2, 1]
   2920 //
   2921 // # 't2' is a tensor of shape [2, 3, 5]
   2922 // shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
   2923 // shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
   2924 // shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
   2925 // ```
   2926 //
   2927 // This operation requires that:
   2928 //
   2929 // `-1-input.dims() <= dim <= input.dims()`
   2930 //
   2931 // This operation is related to `squeeze()`, which removes dimensions of
   2932 // size 1.
   2933 //
   2934 // Arguments:
   2935 //
   2936 //	axis: 0-D (scalar). Specifies the dimension index at which to
   2937 // expand the shape of `input`. Must be in the range
   2938 // `[-rank(input) - 1, rank(input)]`.
   2939 //
   2940 // Returns Contains the same data as `input`, but its shape has an additional
   2941 // dimension of size 1 added.
   2942 func ExpandDims(scope *Scope, input tf.Output, axis tf.Output) (output tf.Output) {
   2943 	if scope.Err() != nil {
   2944 		return
   2945 	}
   2946 	opspec := tf.OpSpec{
   2947 		Type: "ExpandDims",
   2948 		Input: []tf.Input{
   2949 			input, axis,
   2950 		},
   2951 	}
   2952 	op := scope.AddOperation(opspec)
   2953 	return op.Output(0)
   2954 }
   2955 
   2956 // MatrixInverseAttr is an optional argument to MatrixInverse.
   2957 type MatrixInverseAttr func(optionalAttr)
   2958 
   2959 // MatrixInverseAdjoint sets the optional adjoint attribute to value.
   2960 // If not specified, defaults to false
   2961 func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
   2962 	return func(m optionalAttr) {
   2963 		m["adjoint"] = value
   2964 	}
   2965 }
   2966 
   2967 // Computes the inverse of one or more square invertible matrices or their
   2968 //
   2969 // adjoints (conjugate transposes).
   2970 //
   2971 // The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
   2972 // form square matrices. The output is a tensor of the same shape as the input
   2973 // containing the inverse for all input submatrices `[..., :, :]`.
   2974 //
   2975 // The op uses LU decomposition with partial pivoting to compute the inverses.
   2976 //
   2977 // If a matrix is not invertible there is no guarantee what the op does. It
   2978 // may detect the condition and raise an exception or it may simply return a
   2979 // garbage result.
   2980 //
   2981 // Arguments:
   2982 //	input: Shape is `[..., M, M]`.
   2983 //
   2984 // Returns Shape is `[..., M, M]`.
   2985 //
   2986 // @compatibility(numpy)
   2987 // Equivalent to np.linalg.inv
   2988 // @end_compatibility
   2989 func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
   2990 	if scope.Err() != nil {
   2991 		return
   2992 	}
   2993 	attrs := map[string]interface{}{}
   2994 	for _, a := range optional {
   2995 		a(attrs)
   2996 	}
   2997 	opspec := tf.OpSpec{
   2998 		Type: "MatrixInverse",
   2999 		Input: []tf.Input{
   3000 			input,
   3001 		},
   3002 		Attrs: attrs,
   3003 	}
   3004 	op := scope.AddOperation(opspec)
   3005 	return op.Output(0)
   3006 }
   3007 
   3008 // Computes square of x element-wise.
   3009 //
   3010 // I.e., \\(y = x * x = x^2\\).
   3011 func Square(scope *Scope, x tf.Output) (y tf.Output) {
   3012 	if scope.Err() != nil {
   3013 		return
   3014 	}
   3015 	opspec := tf.OpSpec{
   3016 		Type: "Square",
   3017 		Input: []tf.Input{
   3018 			x,
   3019 		},
   3020 	}
   3021 	op := scope.AddOperation(opspec)
   3022 	return op.Output(0)
   3023 }
   3024 
   3025 // Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
   3026 //
   3027 // See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
   3028 // ](http://arxiv.org/abs/1511.07289)
   3029 func Elu(scope *Scope, features tf.Output) (activations tf.Output) {
   3030 	if scope.Err() != nil {
   3031 		return
   3032 	}
   3033 	opspec := tf.OpSpec{
   3034 		Type: "Elu",
   3035 		Input: []tf.Input{
   3036 			features,
   3037 		},
   3038 	}
   3039 	op := scope.AddOperation(opspec)
   3040 	return op.Output(0)
   3041 }
   3042 
   3043 // Computes the reciprocal of x element-wise.
   3044 //
   3045 // I.e., \\(y = 1 / x\\).
   3046 func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
   3047 	if scope.Err() != nil {
   3048 		return
   3049 	}
   3050 	opspec := tf.OpSpec{
   3051 		Type: "Reciprocal",
   3052 		Input: []tf.Input{
   3053 			x,
   3054 		},
   3055 	}
   3056 	op := scope.AddOperation(opspec)
   3057 	return op.Output(0)
   3058 }
   3059 
   3060 // OrderedMapClearAttr is an optional argument to OrderedMapClear.
   3061 type OrderedMapClearAttr func(optionalAttr)
   3062 
   3063 // OrderedMapClearCapacity sets the optional capacity attribute to value.
   3064 // If not specified, defaults to 0
   3065 //
   3066 // REQUIRES: value >= 0
   3067 func OrderedMapClearCapacity(value int64) OrderedMapClearAttr {
   3068 	return func(m optionalAttr) {
   3069 		m["capacity"] = value
   3070 	}
   3071 }
   3072 
   3073 // OrderedMapClearMemoryLimit sets the optional memory_limit attribute to value.
   3074 // If not specified, defaults to 0
   3075 //
   3076 // REQUIRES: value >= 0
   3077 func OrderedMapClearMemoryLimit(value int64) OrderedMapClearAttr {
   3078 	return func(m optionalAttr) {
   3079 		m["memory_limit"] = value
   3080 	}
   3081 }
   3082 
   3083 // OrderedMapClearContainer sets the optional container attribute to value.
   3084 // If not specified, defaults to ""
   3085 func OrderedMapClearContainer(value string) OrderedMapClearAttr {
   3086 	return func(m optionalAttr) {
   3087 		m["container"] = value
   3088 	}
   3089 }
   3090 
   3091 // OrderedMapClearSharedName sets the optional shared_name attribute to value.
   3092 // If not specified, defaults to ""
   3093 func OrderedMapClearSharedName(value string) OrderedMapClearAttr {
   3094 	return func(m optionalAttr) {
   3095 		m["shared_name"] = value
   3096 	}
   3097 }
   3098 
   3099 // Op removes all elements in the underlying container.
   3100 //
   3101 // Returns the created operation.
   3102 func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation) {
   3103 	if scope.Err() != nil {
   3104 		return
   3105 	}
   3106 	attrs := map[string]interface{}{"dtypes": dtypes}
   3107 	for _, a := range optional {
   3108 		a(attrs)
   3109 	}
   3110 	opspec := tf.OpSpec{
   3111 		Type: "OrderedMapClear",
   3112 
   3113 		Attrs: attrs,
   3114 	}
   3115 	return scope.AddOperation(opspec)
   3116 }
   3117 
   3118 // Computes the reciprocal of x element-wise.
   3119 //
   3120 // I.e., \\(y = 1 / x\\).
   3121 func Inv(scope *Scope, x tf.Output) (y tf.Output) {
   3122 	if scope.Err() != nil {
   3123 		return
   3124 	}
   3125 	opspec := tf.OpSpec{
   3126 		Type: "Inv",
   3127 		Input: []tf.Input{
   3128 			x,
   3129 		},
   3130 	}
   3131 	op := scope.AddOperation(opspec)
   3132 	return op.Output(0)
   3133 }
   3134 
   3135 // ComplexAbsAttr is an optional argument to ComplexAbs.
   3136 type ComplexAbsAttr func(optionalAttr)
   3137 
   3138 // ComplexAbsTout sets the optional Tout attribute to value.
   3139 // If not specified, defaults to DT_FLOAT
   3140 func ComplexAbsTout(value tf.DataType) ComplexAbsAttr {
   3141 	return func(m optionalAttr) {
   3142 		m["Tout"] = value
   3143 	}
   3144 }
   3145 
   3146 // Computes the complex absolute value of a tensor.
   3147 //
   3148 // Given a tensor `x` of complex numbers, this operation returns a tensor of type
   3149 // `float` or `double` that is the absolute value of each element in `x`. All
   3150 // elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
   3151 // value is computed as \\( \sqrt{a^2 + b^2}\\).
   3152 func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Output) {
   3153 	if scope.Err() != nil {
   3154 		return
   3155 	}
   3156 	attrs := map[string]interface{}{}
   3157 	for _, a := range optional {
   3158 		a(attrs)
   3159 	}
   3160 	opspec := tf.OpSpec{
   3161 		Type: "ComplexAbs",
   3162 		Input: []tf.Input{
   3163 			x,
   3164 		},
   3165 		Attrs: attrs,
   3166 	}
   3167 	op := scope.AddOperation(opspec)
   3168 	return op.Output(0)
   3169 }
   3170 
   3171 // Returns the truth value of x AND y element-wise.
   3172 //
   3173 // *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
   3174 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   3175 func LogicalAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   3176 	if scope.Err() != nil {
   3177 		return
   3178 	}
   3179 	opspec := tf.OpSpec{
   3180 		Type: "LogicalAnd",
   3181 		Input: []tf.Input{
   3182 			x, y,
   3183 		},
   3184 	}
   3185 	op := scope.AddOperation(opspec)
   3186 	return op.Output(0)
   3187 }
   3188 
   3189 // Cast x of type SrcT to y of DstT.
   3190 func Cast(scope *Scope, x tf.Output, DstT tf.DataType) (y tf.Output) {
   3191 	if scope.Err() != nil {
   3192 		return
   3193 	}
   3194 	attrs := map[string]interface{}{"DstT": DstT}
   3195 	opspec := tf.OpSpec{
   3196 		Type: "Cast",
   3197 		Input: []tf.Input{
   3198 			x,
   3199 		},
   3200 		Attrs: attrs,
   3201 	}
   3202 	op := scope.AddOperation(opspec)
   3203 	return op.Output(0)
   3204 }
   3205 
   3206 // MaxAttr is an optional argument to Max.
   3207 type MaxAttr func(optionalAttr)
   3208 
   3209 // MaxKeepDims sets the optional keep_dims attribute to value.
   3210 //
   3211 // value: If true, retain reduced dimensions with length 1.
   3212 // If not specified, defaults to false
   3213 func MaxKeepDims(value bool) MaxAttr {
   3214 	return func(m optionalAttr) {
   3215 		m["keep_dims"] = value
   3216 	}
   3217 }
   3218 
   3219 // Computes the maximum of elements across dimensions of a tensor.
   3220 //
   3221 // Reduces `input` along the dimensions given in `axis`. Unless
   3222 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   3223 // `axis`. If `keep_dims` is true, the reduced dimensions are
   3224 // retained with length 1.
   3225 //
   3226 // Arguments:
   3227 //	input: The tensor to reduce.
   3228 //	axis: The dimensions to reduce. Must be in the range
   3229 // `[-rank(input), rank(input))`.
   3230 //
   3231 // Returns The reduced tensor.
   3232 func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (output tf.Output) {
   3233 	if scope.Err() != nil {
   3234 		return
   3235 	}
   3236 	attrs := map[string]interface{}{}
   3237 	for _, a := range optional {
   3238 		a(attrs)
   3239 	}
   3240 	opspec := tf.OpSpec{
   3241 		Type: "Max",
   3242 		Input: []tf.Input{
   3243 			input, axis,
   3244 		},
   3245 		Attrs: attrs,
   3246 	}
   3247 	op := scope.AddOperation(opspec)
   3248 	return op.Output(0)
   3249 }
   3250 
   3251 // Quantized Batch normalization.
   3252 //
   3253 // This op is deprecated and will be removed in the future. Prefer
   3254 // `tf.nn.batch_normalization`.
   3255 //
   3256 // Arguments:
   3257 //	t: A 4D input Tensor.
   3258 //	t_min: The value represented by the lowest quantized input.
   3259 //	t_max: The value represented by the highest quantized input.
   3260 //	m: A 1D mean Tensor with size matching the last dimension of t.
   3261 // This is the first output from tf.nn.moments,
   3262 // or a saved moving average thereof.
   3263 //	m_min: The value represented by the lowest quantized mean.
   3264 //	m_max: The value represented by the highest quantized mean.
   3265 //	v: A 1D variance Tensor with size matching the last dimension of t.
   3266 // This is the second output from tf.nn.moments,
   3267 // or a saved moving average thereof.
   3268 //	v_min: The value represented by the lowest quantized variance.
   3269 //	v_max: The value represented by the highest quantized variance.
   3270 //	beta: A 1D beta Tensor with size matching the last dimension of t.
   3271 // An offset to be added to the normalized tensor.
   3272 //	beta_min: The value represented by the lowest quantized offset.
   3273 //	beta_max: The value represented by the highest quantized offset.
   3274 //	gamma: A 1D gamma Tensor with size matching the last dimension of t.
   3275 // If "scale_after_normalization" is true, this tensor will be multiplied
   3276 // with the normalized tensor.
   3277 //	gamma_min: The value represented by the lowest quantized gamma.
   3278 //	gamma_max: The value represented by the highest quantized gamma.
   3279 //
   3280 //	variance_epsilon: A small float number to avoid dividing by 0.
   3281 //	scale_after_normalization: A bool indicating whether the resulted tensor
   3282 // needs to be multiplied with gamma.
   3283 func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, m_min tf.Output, m_max tf.Output, v tf.Output, v_min tf.Output, v_max tf.Output, beta tf.Output, beta_min tf.Output, beta_max tf.Output, gamma tf.Output, gamma_min tf.Output, gamma_max tf.Output, out_type tf.DataType, variance_epsilon float32, scale_after_normalization bool) (result tf.Output, result_min tf.Output, result_max tf.Output) {
   3284 	if scope.Err() != nil {
   3285 		return
   3286 	}
   3287 	attrs := map[string]interface{}{"out_type": out_type, "variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
   3288 	opspec := tf.OpSpec{
   3289 		Type: "QuantizedBatchNormWithGlobalNormalization",
   3290 		Input: []tf.Input{
   3291 			t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
   3292 		},
   3293 		Attrs: attrs,
   3294 	}
   3295 	op := scope.AddOperation(opspec)
   3296 	return op.Output(0), op.Output(1), op.Output(2)
   3297 }
   3298 
   3299 // HistogramFixedWidthAttr is an optional argument to HistogramFixedWidth.
   3300 type HistogramFixedWidthAttr func(optionalAttr)
   3301 
   3302 // HistogramFixedWidthDtype sets the optional dtype attribute to value.
   3303 // If not specified, defaults to DT_INT32
   3304 func HistogramFixedWidthDtype(value tf.DataType) HistogramFixedWidthAttr {
   3305 	return func(m optionalAttr) {
   3306 		m["dtype"] = value
   3307 	}
   3308 }
   3309 
   3310 // Return histogram of values.
   3311 //
   3312 // Given the tensor `values`, this operation returns a rank 1 histogram counting
   3313 // the number of entries in `values` that fall into every bin.  The bins are
   3314 // equal width and determined by the arguments `value_range` and `nbins`.
   3315 //
   3316 // ```python
   3317 // # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
   3318 // nbins = 5
   3319 // value_range = [0.0, 5.0]
   3320 // new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
   3321 //
   3322 // with tf.get_default_session() as sess:
   3323 //   hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
   3324 //   variables.global_variables_initializer().run()
   3325 //   sess.run(hist) => [2, 1, 1, 0, 2]
   3326 // ```
   3327 //
   3328 // Arguments:
   3329 //	values: Numeric `Tensor`.
   3330 //	value_range: Shape [2] `Tensor` of same `dtype` as `values`.
   3331 // values <= value_range[0] will be mapped to hist[0],
   3332 // values >= value_range[1] will be mapped to hist[-1].
   3333 //	nbins: Scalar `int32 Tensor`.  Number of histogram bins.
   3334 //
   3335 // Returns A 1-D `Tensor` holding histogram of values.
   3336 func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output) {
   3337 	if scope.Err() != nil {
   3338 		return
   3339 	}
   3340 	attrs := map[string]interface{}{}
   3341 	for _, a := range optional {
   3342 		a(attrs)
   3343 	}
   3344 	opspec := tf.OpSpec{
   3345 		Type: "HistogramFixedWidth",
   3346 		Input: []tf.Input{
   3347 			values, value_range, nbins,
   3348 		},
   3349 		Attrs: attrs,
   3350 	}
   3351 	op := scope.AddOperation(opspec)
   3352 	return op.Output(0)
   3353 }
   3354 
   3355 // Adds Tensor 'bias' to Tensor 'input' for Quantized types.
   3356 //
   3357 // Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
   3358 //
   3359 // Arguments:
   3360 //
   3361 //	bias: A 1D bias Tensor with size matching the last dimension of 'input'.
   3362 //	min_input: The float value that the lowest quantized input value represents.
   3363 //	max_input: The float value that the highest quantized input value represents.
   3364 //	min_bias: The float value that the lowest quantized bias value represents.
   3365 //	max_bias: The float value that the highest quantized bias value represents.
   3366 //
   3367 //
   3368 // Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
   3369 func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_bias tf.Output, max_bias tf.Output, out_type tf.DataType) (output tf.Output, min_out tf.Output, max_out tf.Output) {
   3370 	if scope.Err() != nil {
   3371 		return
   3372 	}
   3373 	attrs := map[string]interface{}{"out_type": out_type}
   3374 	opspec := tf.OpSpec{
   3375 		Type: "QuantizedBiasAdd",
   3376 		Input: []tf.Input{
   3377 			input, bias, min_input, max_input, min_bias, max_bias,
   3378 		},
   3379 		Attrs: attrs,
   3380 	}
   3381 	op := scope.AddOperation(opspec)
   3382 	return op.Output(0), op.Output(1), op.Output(2)
   3383 }
   3384 
   3385 // Produces the average pool of the input tensor for quantized types.
   3386 //
   3387 // Arguments:
   3388 //	input: 4-D with shape `[batch, height, width, channels]`.
   3389 //	min_input: The float value that the lowest quantized input value represents.
   3390 //	max_input: The float value that the highest quantized input value represents.
   3391 //	ksize: The size of the window for each dimension of the input tensor.
   3392 // The length must be 4 to match the number of dimensions of the input.
   3393 //	strides: The stride of the sliding window for each dimension of the input
   3394 // tensor.  The length must be 4 to match the number of dimensions of the input.
   3395 //	padding: The type of padding algorithm to use.
   3396 //
   3397 // Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
   3398 func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
   3399 	if scope.Err() != nil {
   3400 		return
   3401 	}
   3402 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   3403 	opspec := tf.OpSpec{
   3404 		Type: "QuantizedAvgPool",
   3405 		Input: []tf.Input{
   3406 			input, min_input, max_input,
   3407 		},
   3408 		Attrs: attrs,
   3409 	}
   3410 	op := scope.AddOperation(opspec)
   3411 	return op.Output(0), op.Output(1), op.Output(2)
   3412 }
   3413 
   3414 // Updates the table to associates keys with values.
   3415 //
   3416 // The tensor `keys` must be of the same type as the keys of the table.
   3417 // The tensor `values` must be of the type of the table values.
   3418 //
   3419 // Arguments:
   3420 //	table_handle: Handle to the table.
   3421 //	keys: Any shape.  Keys to look up.
   3422 //	values: Values to associate with keys.
   3423 //
   3424 // Returns the created operation.
   3425 func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
   3426 	if scope.Err() != nil {
   3427 		return
   3428 	}
   3429 	opspec := tf.OpSpec{
   3430 		Type: "LookupTableInsertV2",
   3431 		Input: []tf.Input{
   3432 			table_handle, keys, values,
   3433 		},
   3434 	}
   3435 	return scope.AddOperation(opspec)
   3436 }
   3437 
   3438 // FractionalAvgPoolAttr is an optional argument to FractionalAvgPool.
   3439 type FractionalAvgPoolAttr func(optionalAttr)
   3440 
   3441 // FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value.
   3442 //
   3443 // value: When set to True, generates the pooling sequence in a
   3444 // pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
   3445 // Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
   3446 // difference between pseudorandom and random.
   3447 // If not specified, defaults to false
   3448 func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr {
   3449 	return func(m optionalAttr) {
   3450 		m["pseudo_random"] = value
   3451 	}
   3452 }
   3453 
   3454 // FractionalAvgPoolOverlapping sets the optional overlapping attribute to value.
   3455 //
   3456 // value: When set to True, it means when pooling, the values at the boundary
   3457 // of adjacent pooling cells are used by both cells. For example:
   3458 //
   3459 // `index  0  1  2  3  4`
   3460 //
   3461 // `value  20 5  16 3  7`
   3462 //
   3463 // If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
   3464 // The result would be [41/3, 26/3] for fractional avg pooling.
   3465 // If not specified, defaults to false
   3466 func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr {
   3467 	return func(m optionalAttr) {
   3468 		m["overlapping"] = value
   3469 	}
   3470 }
   3471 
   3472 // FractionalAvgPoolDeterministic sets the optional deterministic attribute to value.
   3473 //
   3474 // value: When set to True, a fixed pooling region will be used when
   3475 // iterating over a FractionalAvgPool node in the computation graph. Mainly used
   3476 // in unit test to make FractionalAvgPool deterministic.
   3477 // If not specified, defaults to false
   3478 func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr {
   3479 	return func(m optionalAttr) {
   3480 		m["deterministic"] = value
   3481 	}
   3482 }
   3483 
   3484 // FractionalAvgPoolSeed sets the optional seed attribute to value.
   3485 //
   3486 // value: If either seed or seed2 are set to be non-zero, the random number
   3487 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   3488 // random seed.
   3489 // If not specified, defaults to 0
   3490 func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr {
   3491 	return func(m optionalAttr) {
   3492 		m["seed"] = value
   3493 	}
   3494 }
   3495 
   3496 // FractionalAvgPoolSeed2 sets the optional seed2 attribute to value.
   3497 //
   3498 // value: An second seed to avoid seed collision.
   3499 // If not specified, defaults to 0
   3500 func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr {
   3501 	return func(m optionalAttr) {
   3502 		m["seed2"] = value
   3503 	}
   3504 }
   3505 
   3506 // Performs fractional average pooling on the input.
   3507 //
   3508 // Fractional average pooling is similar to Fractional max pooling in the pooling
   3509 // region generation step. The only difference is that after pooling regions are
   3510 // generated, a mean operation is performed instead of a max operation in each
   3511 // pooling region.
   3512 //
   3513 // Arguments:
   3514 //	value: 4-D with shape `[batch, height, width, channels]`.
   3515 //	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
   3516 // supports row and col dimension and should be >= 1.0. For example, a valid
   3517 // pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
   3518 // must be 1.0 because we don't allow pooling on batch and channels
   3519 // dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
   3520 // respectively.
   3521 //
   3522 // Returns output tensor after fractional avg pooling.row pooling sequence, needed to calculate gradient.column pooling sequence, needed to calculate gradient.
   3523 func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
   3524 	if scope.Err() != nil {
   3525 		return
   3526 	}
   3527 	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
   3528 	for _, a := range optional {
   3529 		a(attrs)
   3530 	}
   3531 	opspec := tf.OpSpec{
   3532 		Type: "FractionalAvgPool",
   3533 		Input: []tf.Input{
   3534 			value,
   3535 		},
   3536 		Attrs: attrs,
   3537 	}
   3538 	op := scope.AddOperation(opspec)
   3539 	return op.Output(0), op.Output(1), op.Output(2)
   3540 }
   3541 
   3542 // RandomCropAttr is an optional argument to RandomCrop.
   3543 type RandomCropAttr func(optionalAttr)
   3544 
   3545 // RandomCropSeed sets the optional seed attribute to value.
   3546 //
   3547 // value: If either seed or seed2 are set to be non-zero, the random number
   3548 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   3549 // random seed.
   3550 // If not specified, defaults to 0
   3551 func RandomCropSeed(value int64) RandomCropAttr {
   3552 	return func(m optionalAttr) {
   3553 		m["seed"] = value
   3554 	}
   3555 }
   3556 
   3557 // RandomCropSeed2 sets the optional seed2 attribute to value.
   3558 //
   3559 // value: An second seed to avoid seed collision.
   3560 // If not specified, defaults to 0
   3561 func RandomCropSeed2(value int64) RandomCropAttr {
   3562 	return func(m optionalAttr) {
   3563 		m["seed2"] = value
   3564 	}
   3565 }
   3566 
   3567 // Randomly crop `image`.
   3568 //
   3569 // DEPRECATED at GraphDef version 8: Random crop is now pure Python
   3570 //
   3571 // `size` is a 1-D int64 tensor with 2 elements representing the crop height and
   3572 // width.  The values must be non negative.
   3573 //
   3574 // This Op picks a random location in `image` and crops a `height` by `width`
   3575 // rectangle from that location.  The random location is picked so the cropped
   3576 // area will fit inside the original image.
   3577 //
   3578 // Arguments:
   3579 //	image: 3-D of shape `[height, width, channels]`.
   3580 //	size: 1-D of length 2 containing: `crop_height`, `crop_width`..
   3581 //
   3582 // Returns 3-D of shape `[crop_height, crop_width, channels].`
   3583 func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output) {
   3584 	if scope.Err() != nil {
   3585 		return
   3586 	}
   3587 	attrs := map[string]interface{}{}
   3588 	for _, a := range optional {
   3589 		a(attrs)
   3590 	}
   3591 	opspec := tf.OpSpec{
   3592 		Type: "RandomCrop",
   3593 		Input: []tf.Input{
   3594 			image, size,
   3595 		},
   3596 		Attrs: attrs,
   3597 	}
   3598 	op := scope.AddOperation(opspec)
   3599 	return op.Output(0)
   3600 }
   3601 
   3602 // TopKV2Attr is an optional argument to TopKV2.
   3603 type TopKV2Attr func(optionalAttr)
   3604 
   3605 // TopKV2Sorted sets the optional sorted attribute to value.
   3606 //
   3607 // value: If true the resulting `k` elements will be sorted by the values in
   3608 // descending order.
   3609 // If not specified, defaults to true
   3610 func TopKV2Sorted(value bool) TopKV2Attr {
   3611 	return func(m optionalAttr) {
   3612 		m["sorted"] = value
   3613 	}
   3614 }
   3615 
   3616 // Finds values and indices of the `k` largest elements for the last dimension.
   3617 //
   3618 // If the input is a vector (rank-1), finds the `k` largest entries in the vector
   3619 // and outputs their values and indices as vectors.  Thus `values[j]` is the
   3620 // `j`-th largest entry in `input`, and its index is `indices[j]`.
   3621 //
   3622 // For matrices (resp. higher rank input), computes the top `k` entries in each
   3623 // row (resp. vector along the last dimension).  Thus,
   3624 //
   3625 //     values.shape = indices.shape = input.shape[:-1] + [k]
   3626 //
   3627 // If two elements are equal, the lower-index element appears first.
   3628 //
   3629 // Arguments:
   3630 //	input: 1-D or higher with last dimension at least `k`.
   3631 //	k: 0-D.  Number of top elements to look for along the last dimension (along each
   3632 // row for matrices).
   3633 //
   3634 // Returns The `k` largest elements along each last dimensional slice.The indices of `values` within the last dimension of `input`.
   3635 func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output) {
   3636 	if scope.Err() != nil {
   3637 		return
   3638 	}
   3639 	attrs := map[string]interface{}{}
   3640 	for _, a := range optional {
   3641 		a(attrs)
   3642 	}
   3643 	opspec := tf.OpSpec{
   3644 		Type: "TopKV2",
   3645 		Input: []tf.Input{
   3646 			input, k,
   3647 		},
   3648 		Attrs: attrs,
   3649 	}
   3650 	op := scope.AddOperation(opspec)
   3651 	return op.Output(0), op.Output(1)
   3652 }
   3653 
   3654 // Returns x // y element-wise.
   3655 //
   3656 // *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
   3657 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   3658 func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   3659 	if scope.Err() != nil {
   3660 		return
   3661 	}
   3662 	opspec := tf.OpSpec{
   3663 		Type: "FloorDiv",
   3664 		Input: []tf.Input{
   3665 			x, y,
   3666 		},
   3667 	}
   3668 	op := scope.AddOperation(opspec)
   3669 	return op.Output(0)
   3670 }
   3671 
   3672 // Returns a batched diagonal tensor with a given batched diagonal values.
   3673 //
   3674 // Given a `diagonal`, this operation returns a tensor with the `diagonal` and
   3675 // everything else padded with zeros. The diagonal is computed as follows:
   3676 //
   3677 // Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
   3678 // tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
   3679 //
   3680 // `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
   3681 //
   3682 // For example:
   3683 //
   3684 // ```
   3685 // # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
   3686 //
   3687 // and diagonal.shape = (2, 4)
   3688 //
   3689 // tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
   3690 //                                      [0, 2, 0, 0]
   3691 //                                      [0, 0, 3, 0]
   3692 //                                      [0, 0, 0, 4]],
   3693 //                                     [[5, 0, 0, 0]
   3694 //                                      [0, 6, 0, 0]
   3695 //                                      [0, 0, 7, 0]
   3696 //                                      [0, 0, 0, 8]]]
   3697 //
   3698 // which has shape (2, 4, 4)
   3699 // ```
   3700 //
   3701 // Arguments:
   3702 //	diagonal: Rank `k`, where `k >= 1`.
   3703 //
   3704 // Returns Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
   3705 func MatrixDiag(scope *Scope, diagonal tf.Output) (output tf.Output) {
   3706 	if scope.Err() != nil {
   3707 		return
   3708 	}
   3709 	opspec := tf.OpSpec{
   3710 		Type: "MatrixDiag",
   3711 		Input: []tf.Input{
   3712 			diagonal,
   3713 		},
   3714 	}
   3715 	op := scope.AddOperation(opspec)
   3716 	return op.Output(0)
   3717 }
   3718 
   3719 // Says whether the targets are in the top `K` predictions.
   3720 //
   3721 // This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
   3722 // prediction for the target class is among the top `k` predictions among
   3723 // all predictions for example `i`. Note that the behavior of `InTopK` differs
   3724 // from the `TopK` op in its handling of ties; if multiple classes have the
   3725 // same prediction value and straddle the top-`k` boundary, all of those
   3726 // classes are considered to be in the top `k`.
   3727 //
   3728 // More formally, let
   3729 //
   3730 //   \\(predictions_i\\) be the predictions for all classes for example `i`,
   3731 //   \\(targets_i\\) be the target class for example `i`,
   3732 //   \\(out_i\\) be the output for example `i`,
   3733 //
   3734 // $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
   3735 //
   3736 // Arguments:
   3737 //	predictions: A `batch_size` x `classes` tensor.
   3738 //	targets: A `batch_size` vector of class ids.
   3739 //	k: Number of top elements to look at for computing precision.
   3740 //
   3741 // Returns Computed Precision at `k` as a `bool Tensor`.
   3742 func InTopK(scope *Scope, predictions tf.Output, targets tf.Output, k int64) (precision tf.Output) {
   3743 	if scope.Err() != nil {
   3744 		return
   3745 	}
   3746 	attrs := map[string]interface{}{"k": k}
   3747 	opspec := tf.OpSpec{
   3748 		Type: "InTopK",
   3749 		Input: []tf.Input{
   3750 			predictions, targets,
   3751 		},
   3752 		Attrs: attrs,
   3753 	}
   3754 	op := scope.AddOperation(opspec)
   3755 	return op.Output(0)
   3756 }
   3757 
   3758 // Given a quantized tensor described by (input, input_min, input_max), outputs a
   3759 //
   3760 // range that covers the actual values present in that tensor.  This op is
   3761 // typically used to produce the requested_output_min and requested_output_max for
   3762 // Requantize.
   3763 //
   3764 // Arguments:
   3765 //
   3766 //	input_min: The float value that the minimum quantized input value represents.
   3767 //	input_max: The float value that the maximum quantized input value represents.
   3768 //
   3769 // Returns The computed min output.the computed max output.
   3770 func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output) {
   3771 	if scope.Err() != nil {
   3772 		return
   3773 	}
   3774 	opspec := tf.OpSpec{
   3775 		Type: "RequantizationRange",
   3776 		Input: []tf.Input{
   3777 			input, input_min, input_max,
   3778 		},
   3779 	}
   3780 	op := scope.AddOperation(opspec)
   3781 	return op.Output(0), op.Output(1)
   3782 }
   3783 
   3784 // Returns the truth value of (x <= y) element-wise.
   3785 //
   3786 // *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
   3787 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   3788 func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   3789 	if scope.Err() != nil {
   3790 		return
   3791 	}
   3792 	opspec := tf.OpSpec{
   3793 		Type: "LessEqual",
   3794 		Input: []tf.Input{
   3795 			x, y,
   3796 		},
   3797 	}
   3798 	op := scope.AddOperation(opspec)
   3799 	return op.Output(0)
   3800 }
   3801 
   3802 // Computes softmax activations.
   3803 //
   3804 // For each batch `i` and class `j` we have
   3805 //
   3806 //     softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
   3807 //
   3808 // Arguments:
   3809 //	logits: 2-D with shape `[batch_size, num_classes]`.
   3810 //
   3811 // Returns Same shape as `logits`.
   3812 func Softmax(scope *Scope, logits tf.Output) (softmax tf.Output) {
   3813 	if scope.Err() != nil {
   3814 		return
   3815 	}
   3816 	opspec := tf.OpSpec{
   3817 		Type: "Softmax",
   3818 		Input: []tf.Input{
   3819 			logits,
   3820 		},
   3821 	}
   3822 	op := scope.AddOperation(opspec)
   3823 	return op.Output(0)
   3824 }
   3825 
   3826 // DecodeBmpAttr is an optional argument to DecodeBmp.
   3827 type DecodeBmpAttr func(optionalAttr)
   3828 
   3829 // DecodeBmpChannels sets the optional channels attribute to value.
   3830 // If not specified, defaults to 0
   3831 func DecodeBmpChannels(value int64) DecodeBmpAttr {
   3832 	return func(m optionalAttr) {
   3833 		m["channels"] = value
   3834 	}
   3835 }
   3836 
   3837 // Decode the first frame of a BMP-encoded image to a uint8 tensor.
   3838 //
   3839 // The attr `channels` indicates the desired number of color channels for the
   3840 // decoded image.
   3841 //
   3842 // Accepted values are:
   3843 //
   3844 // *   0: Use the number of channels in the BMP-encoded image.
   3845 // *   3: output an RGB image.
   3846 // *   4: output an RGBA image.
   3847 //
   3848 // Arguments:
   3849 //	contents: 0-D.  The BMP-encoded image.
   3850 //
   3851 // Returns 3-D with shape `[height, width, channels]`. RGB order
   3852 func DecodeBmp(scope *Scope, contents tf.Output, optional ...DecodeBmpAttr) (image tf.Output) {
   3853 	if scope.Err() != nil {
   3854 		return
   3855 	}
   3856 	attrs := map[string]interface{}{}
   3857 	for _, a := range optional {
   3858 		a(attrs)
   3859 	}
   3860 	opspec := tf.OpSpec{
   3861 		Type: "DecodeBmp",
   3862 		Input: []tf.Input{
   3863 			contents,
   3864 		},
   3865 		Attrs: attrs,
   3866 	}
   3867 	op := scope.AddOperation(opspec)
   3868 	return op.Output(0)
   3869 }
   3870 
   3871 // Computes softsign gradients for a softsign operation.
   3872 //
   3873 // Arguments:
   3874 //	gradients: The backpropagated gradients to the corresponding softsign operation.
   3875 //	features: The features passed as input to the corresponding softsign operation.
   3876 //
   3877 // Returns The gradients: `gradients / (1 + abs(features)) ** 2`.
   3878 func SoftsignGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
   3879 	if scope.Err() != nil {
   3880 		return
   3881 	}
   3882 	opspec := tf.OpSpec{
   3883 		Type: "SoftsignGrad",
   3884 		Input: []tf.Input{
   3885 			gradients, features,
   3886 		},
   3887 	}
   3888 	op := scope.AddOperation(opspec)
   3889 	return op.Output(0)
   3890 }
   3891 
   3892 // BatchMatMulAttr is an optional argument to BatchMatMul.
   3893 type BatchMatMulAttr func(optionalAttr)
   3894 
   3895 // BatchMatMulAdjX sets the optional adj_x attribute to value.
   3896 //
   3897 // value: If `True`, adjoint the slices of `x`. Defaults to `False`.
   3898 // If not specified, defaults to false
   3899 func BatchMatMulAdjX(value bool) BatchMatMulAttr {
   3900 	return func(m optionalAttr) {
   3901 		m["adj_x"] = value
   3902 	}
   3903 }
   3904 
   3905 // BatchMatMulAdjY sets the optional adj_y attribute to value.
   3906 //
   3907 // value: If `True`, adjoint the slices of `y`. Defaults to `False`.
   3908 // If not specified, defaults to false
   3909 func BatchMatMulAdjY(value bool) BatchMatMulAttr {
   3910 	return func(m optionalAttr) {
   3911 		m["adj_y"] = value
   3912 	}
   3913 }
   3914 
   3915 // Multiplies slices of two tensors in batches.
   3916 //
   3917 // Multiplies all slices of `Tensor` `x` and `y` (each slice can be
   3918 // viewed as an element of a batch), and arranges the individual results
   3919 // in a single output tensor of the same batch size. Each of the
   3920 // individual slices can optionally be adjointed (to adjoint a matrix
   3921 // means to transpose and conjugate it) before multiplication by setting
   3922 // the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
   3923 //
   3924 // The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
   3925 // and `[..., r_y, c_y]`.
   3926 //
   3927 // The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
   3928 //
   3929 //     r_o = c_x if adj_x else r_x
   3930 //     c_o = r_y if adj_y else c_y
   3931 //
   3932 // It is computed as:
   3933 //
   3934 //     output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
   3935 //
   3936 // Arguments:
   3937 //	x: 2-D or higher with shape `[..., r_x, c_x]`.
   3938 //	y: 2-D or higher with shape `[..., r_y, c_y]`.
   3939 //
   3940 // Returns 3-D or higher with shape `[..., r_o, c_o]`
   3941 func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output) {
   3942 	if scope.Err() != nil {
   3943 		return
   3944 	}
   3945 	attrs := map[string]interface{}{}
   3946 	for _, a := range optional {
   3947 		a(attrs)
   3948 	}
   3949 	opspec := tf.OpSpec{
   3950 		Type: "BatchMatMul",
   3951 		Input: []tf.Input{
   3952 			x, y,
   3953 		},
   3954 		Attrs: attrs,
   3955 	}
   3956 	op := scope.AddOperation(opspec)
   3957 	return op.Output(0)
   3958 }
   3959 
   3960 // Pads a tensor.
   3961 //
   3962 // This operation pads `input` according to the `paddings` and `constant_values`
   3963 // you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
   3964 // the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
   3965 // how many padding values to add before the contents of `input` in that dimension,
   3966 // and `paddings[D, 1]` indicates how many padding values to add after the contents
   3967 // of `input` in that dimension. `constant_values` is a scalar tensor of the same
   3968 // type as `input` that indicates the value to use for padding `input`.
   3969 //
   3970 // The padded size of each dimension D of the output is:
   3971 //
   3972 // `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
   3973 //
   3974 // For example:
   3975 //
   3976 // ```
   3977 // # 't' is [[1, 1], [2, 2]]
   3978 // # 'paddings' is [[1, 1], [2, 2]]
   3979 // # 'constant_values' is 0
   3980 // # rank of 't' is 2
   3981 // pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
   3982 //                       [0, 0, 1, 1, 0, 0]
   3983 //                       [0, 0, 2, 2, 0, 0]
   3984 //                       [0, 0, 0, 0, 0, 0]]
   3985 // ```
   3986 func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output) {
   3987 	if scope.Err() != nil {
   3988 		return
   3989 	}
   3990 	opspec := tf.OpSpec{
   3991 		Type: "PadV2",
   3992 		Input: []tf.Input{
   3993 			input, paddings, constant_values,
   3994 		},
   3995 	}
   3996 	op := scope.AddOperation(opspec)
   3997 	return op.Output(0)
   3998 }
   3999 
   4000 // Returns which elements of x are NaN.
   4001 //
   4002 // @compatibility(numpy)
   4003 // Equivalent to np.isnan
   4004 // @end_compatibility
   4005 func IsNan(scope *Scope, x tf.Output) (y tf.Output) {
   4006 	if scope.Err() != nil {
   4007 		return
   4008 	}
   4009 	opspec := tf.OpSpec{
   4010 		Type: "IsNan",
   4011 		Input: []tf.Input{
   4012 			x,
   4013 		},
   4014 	}
   4015 	op := scope.AddOperation(opspec)
   4016 	return op.Output(0)
   4017 }
   4018 
   4019 // FractionalAvgPoolGradAttr is an optional argument to FractionalAvgPoolGrad.
   4020 type FractionalAvgPoolGradAttr func(optionalAttr)
   4021 
   4022 // FractionalAvgPoolGradOverlapping sets the optional overlapping attribute to value.
   4023 //
   4024 // value: When set to True, it means when pooling, the values at the boundary
   4025 // of adjacent pooling cells are used by both cells. For example:
   4026 //
   4027 // `index  0  1  2  3  4`
   4028 //
   4029 // `value  20 5  16 3  7`
   4030 //
   4031 // If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
   4032 // The result would be [41/3, 26/3] for fractional avg pooling.
   4033 // If not specified, defaults to false
   4034 func FractionalAvgPoolGradOverlapping(value bool) FractionalAvgPoolGradAttr {
   4035 	return func(m optionalAttr) {
   4036 		m["overlapping"] = value
   4037 	}
   4038 }
   4039 
   4040 // Computes gradient of the FractionalAvgPool function.
   4041 //
   4042 // Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
   4043 // FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
   4044 // out_backprop to those indices that form the same pooling cell. Therefore, we
   4045 // just need to know the shape of original input tensor, instead of the whole
   4046 // tensor.
   4047 //
   4048 // Arguments:
   4049 //	orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool`
   4050 //	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
   4051 // w.r.t. the output of `fractional_avg_pool`.
   4052 //	row_pooling_sequence: row pooling sequence, form pooling region with
   4053 // col_pooling_sequence.
   4054 //	col_pooling_sequence: column pooling sequence, form pooling region with
   4055 // row_pooling sequence.
   4056 //
   4057 // Returns 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
   4058 func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalAvgPoolGradAttr) (output tf.Output) {
   4059 	if scope.Err() != nil {
   4060 		return
   4061 	}
   4062 	attrs := map[string]interface{}{}
   4063 	for _, a := range optional {
   4064 		a(attrs)
   4065 	}
   4066 	opspec := tf.OpSpec{
   4067 		Type: "FractionalAvgPoolGrad",
   4068 		Input: []tf.Input{
   4069 			orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence,
   4070 		},
   4071 		Attrs: attrs,
   4072 	}
   4073 	op := scope.AddOperation(opspec)
   4074 	return op.Output(0)
   4075 }
   4076 
   4077 // Computes gradients for the exponential linear (Elu) operation.
   4078 //
   4079 // Arguments:
   4080 //	gradients: The backpropagated gradients to the corresponding Elu operation.
   4081 //	outputs: The outputs of the corresponding Elu operation.
   4082 //
   4083 // Returns The gradients: `gradients * (outputs + 1)` if outputs < 0,
   4084 // `gradients` otherwise.
   4085 func EluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
   4086 	if scope.Err() != nil {
   4087 		return
   4088 	}
   4089 	opspec := tf.OpSpec{
   4090 		Type: "EluGrad",
   4091 		Input: []tf.Input{
   4092 			gradients, outputs,
   4093 		},
   4094 	}
   4095 	op := scope.AddOperation(opspec)
   4096 	return op.Output(0)
   4097 }
   4098 
   4099 // Converts each string in the input Tensor to its hash mod by a number of buckets.
   4100 //
   4101 // The hash function is deterministic on the content of the string within the
   4102 // process.
   4103 //
   4104 // Note that the hash function may change from time to time.
   4105 // This functionality will be deprecated and it's recommended to use
   4106 // `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
   4107 //
   4108 // Arguments:
   4109 //
   4110 //	num_buckets: The number of buckets.
   4111 //
   4112 // Returns A Tensor of the same shape as the input `string_tensor`.
   4113 func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output) {
   4114 	if scope.Err() != nil {
   4115 		return
   4116 	}
   4117 	attrs := map[string]interface{}{"num_buckets": num_buckets}
   4118 	opspec := tf.OpSpec{
   4119 		Type: "StringToHashBucket",
   4120 		Input: []tf.Input{
   4121 			string_tensor,
   4122 		},
   4123 		Attrs: attrs,
   4124 	}
   4125 	op := scope.AddOperation(opspec)
   4126 	return op.Output(0)
   4127 }
   4128 
   4129 // Creates a dataset that contains `count` elements from the `input_dataset`.
   4130 //
   4131 // Arguments:
   4132 //
   4133 //	count: A scalar representing the number of elements from the `input_dataset`
   4134 // that should be taken. A value of `-1` indicates that all of `input_dataset`
   4135 // is taken.
   4136 //
   4137 //
   4138 func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   4139 	if scope.Err() != nil {
   4140 		return
   4141 	}
   4142 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   4143 	opspec := tf.OpSpec{
   4144 		Type: "TakeDataset",
   4145 		Input: []tf.Input{
   4146 			input_dataset, count,
   4147 		},
   4148 		Attrs: attrs,
   4149 	}
   4150 	op := scope.AddOperation(opspec)
   4151 	return op.Output(0)
   4152 }
   4153 
   4154 // Computes rectified linear 6: `min(max(features, 0), 6)`.
   4155 func Relu6(scope *Scope, features tf.Output) (activations tf.Output) {
   4156 	if scope.Err() != nil {
   4157 		return
   4158 	}
   4159 	opspec := tf.OpSpec{
   4160 		Type: "Relu6",
   4161 		Input: []tf.Input{
   4162 			features,
   4163 		},
   4164 	}
   4165 	op := scope.AddOperation(opspec)
   4166 	return op.Output(0)
   4167 }
   4168 
   4169 // Computes rectified linear gradients for a Relu operation.
   4170 //
   4171 // Arguments:
   4172 //	gradients: The backpropagated gradients to the corresponding Relu operation.
   4173 //	features: The features passed as input to the corresponding Relu operation, OR
   4174 // the outputs of that operation (both work equivalently).
   4175 //
   4176 // Returns `gradients * (features > 0)`.
   4177 func ReluGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
   4178 	if scope.Err() != nil {
   4179 		return
   4180 	}
   4181 	opspec := tf.OpSpec{
   4182 		Type: "ReluGrad",
   4183 		Input: []tf.Input{
   4184 			gradients, features,
   4185 		},
   4186 	}
   4187 	op := scope.AddOperation(opspec)
   4188 	return op.Output(0)
   4189 }
   4190 
   4191 // Computes the gradient of morphological 2-D dilation with respect to the input.
   4192 //
   4193 // Arguments:
   4194 //	input: 4-D with shape `[batch, in_height, in_width, depth]`.
   4195 //	filter: 3-D with shape `[filter_height, filter_width, depth]`.
   4196 //	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
   4197 //	strides: 1-D of length 4. The stride of the sliding window for each dimension of
   4198 // the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
   4199 //	rates: 1-D of length 4. The input stride for atrous morphological dilation.
   4200 // Must be: `[1, rate_height, rate_width, 1]`.
   4201 //	padding: The type of padding algorithm to use.
   4202 //
   4203 // Returns 4-D with shape `[batch, in_height, in_width, depth]`.
   4204 func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (in_backprop tf.Output) {
   4205 	if scope.Err() != nil {
   4206 		return
   4207 	}
   4208 	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
   4209 	opspec := tf.OpSpec{
   4210 		Type: "Dilation2DBackpropInput",
   4211 		Input: []tf.Input{
   4212 			input, filter, out_backprop,
   4213 		},
   4214 		Attrs: attrs,
   4215 	}
   4216 	op := scope.AddOperation(opspec)
   4217 	return op.Output(0)
   4218 }
   4219 
   4220 // CTCBeamSearchDecoderAttr is an optional argument to CTCBeamSearchDecoder.
   4221 type CTCBeamSearchDecoderAttr func(optionalAttr)
   4222 
   4223 // CTCBeamSearchDecoderMergeRepeated sets the optional merge_repeated attribute to value.
   4224 //
   4225 // value: If true, merge repeated classes in output.
   4226 // If not specified, defaults to true
   4227 func CTCBeamSearchDecoderMergeRepeated(value bool) CTCBeamSearchDecoderAttr {
   4228 	return func(m optionalAttr) {
   4229 		m["merge_repeated"] = value
   4230 	}
   4231 }
   4232 
   4233 // Performs beam search decoding on the logits given in input.
   4234 //
   4235 // A note about the attribute merge_repeated: For the beam search decoder,
   4236 // this means that if consecutive entries in a beam are the same, only
   4237 // the first of these is emitted.  That is, when the top path is "A B B B B",
   4238 // "A B" is returned if merge_repeated = True but "A B B B B" is
   4239 // returned if merge_repeated = False.
   4240 //
   4241 // Arguments:
   4242 //	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
   4243 //	sequence_length: A vector containing sequence lengths, size `(batch)`.
   4244 //	beam_width: A scalar >= 0 (beam search beam width).
   4245 //	top_paths: A scalar >= 0, <= beam_width (controls output size).
   4246 //
   4247 // Returns A list (length: top_paths) of indices matrices.  Matrix j,
   4248 // size `(total_decoded_outputs[j] x 2)`, has indices of a
   4249 // `SparseTensor<int64, 2>`.  The rows store: [batch, time].A list (length: top_paths) of values vectors.  Vector j,
   4250 // size `(length total_decoded_outputs[j])`, has the values of a
   4251 // `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.A list (length: top_paths) of shape vector.  Vector j,
   4252 // size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
   4253 // Its values are: `[batch_size, max_decoded_length[j]]`.A matrix, shaped: `(batch_size x top_paths)`.  The
   4254 // sequence log-probabilities.
   4255 func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, top_paths int64, optional ...CTCBeamSearchDecoderAttr) (decoded_indices []tf.Output, decoded_values []tf.Output, decoded_shape []tf.Output, log_probability tf.Output) {
   4256 	if scope.Err() != nil {
   4257 		return
   4258 	}
   4259 	attrs := map[string]interface{}{"beam_width": beam_width, "top_paths": top_paths}
   4260 	for _, a := range optional {
   4261 		a(attrs)
   4262 	}
   4263 	opspec := tf.OpSpec{
   4264 		Type: "CTCBeamSearchDecoder",
   4265 		Input: []tf.Input{
   4266 			inputs, sequence_length,
   4267 		},
   4268 		Attrs: attrs,
   4269 	}
   4270 	op := scope.AddOperation(opspec)
   4271 	if scope.Err() != nil {
   4272 		return
   4273 	}
   4274 	var idx int
   4275 	var err error
   4276 	if decoded_indices, idx, err = makeOutputList(op, idx, "decoded_indices"); err != nil {
   4277 		scope.UpdateErr("CTCBeamSearchDecoder", err)
   4278 		return
   4279 	}
   4280 	if decoded_values, idx, err = makeOutputList(op, idx, "decoded_values"); err != nil {
   4281 		scope.UpdateErr("CTCBeamSearchDecoder", err)
   4282 		return
   4283 	}
   4284 	if decoded_shape, idx, err = makeOutputList(op, idx, "decoded_shape"); err != nil {
   4285 		scope.UpdateErr("CTCBeamSearchDecoder", err)
   4286 		return
   4287 	}
   4288 	log_probability = op.Output(idx)
   4289 	return decoded_indices, decoded_values, decoded_shape, log_probability
   4290 }
   4291 
   4292 // AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
   4293 type AudioSpectrogramAttr func(optionalAttr)
   4294 
   4295 // AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
   4296 //
   4297 // value: Whether to return the squared magnitude or just the
   4298 // magnitude. Using squared magnitude can avoid extra calculations.
   4299 // If not specified, defaults to false
   4300 func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {
   4301 	return func(m optionalAttr) {
   4302 		m["magnitude_squared"] = value
   4303 	}
   4304 }
   4305 
   4306 // Produces a visualization of audio data over time.
   4307 //
   4308 // Spectrograms are a standard way of representing audio information as a series of
   4309 // slices of frequency information, one slice for each window of time. By joining
   4310 // these together into a sequence, they form a distinctive fingerprint of the sound
   4311 // over time.
   4312 //
   4313 // This op expects to receive audio data as an input, stored as floats in the range
   4314 // -1 to 1, together with a window width in samples, and a stride specifying how
   4315 // far to move the window between slices. From this it generates a three
   4316 // dimensional output. The lowest dimension has an amplitude value for each
   4317 // frequency during that time slice. The next dimension is time, with successive
   4318 // frequency slices. The final dimension is for the channels in the input, so a
   4319 // stereo audio input would have two here for example.
   4320 //
   4321 // This means the layout when converted and saved as an image is rotated 90 degrees
   4322 // clockwise from a typical spectrogram. Time is descending down the Y axis, and
   4323 // the frequency decreases from left to right.
   4324 //
   4325 // Each value in the result represents the square root of the sum of the real and
   4326 // imaginary parts of an FFT on the current window of samples. In this way, the
   4327 // lowest dimension represents the power of each frequency in the current window,
   4328 // and adjacent windows are concatenated in the next dimension.
   4329 //
   4330 // To get a more intuitive and visual look at what this operation does, you can run
   4331 // tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
   4332 // resulting spectrogram as a PNG image.
   4333 //
   4334 // Arguments:
   4335 //	input: Float representation of audio data.
   4336 //	window_size: How wide the input window is in samples. For the highest efficiency
   4337 // this should be a power of two, but other values are accepted.
   4338 //	stride: How widely apart the center of adjacent sample windows should be.
   4339 //
   4340 // Returns 3D representation of the audio frequencies as an image.
   4341 func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional ...AudioSpectrogramAttr) (spectrogram tf.Output) {
   4342 	if scope.Err() != nil {
   4343 		return
   4344 	}
   4345 	attrs := map[string]interface{}{"window_size": window_size, "stride": stride}
   4346 	for _, a := range optional {
   4347 		a(attrs)
   4348 	}
   4349 	opspec := tf.OpSpec{
   4350 		Type: "AudioSpectrogram",
   4351 		Input: []tf.Input{
   4352 			input,
   4353 		},
   4354 		Attrs: attrs,
   4355 	}
   4356 	op := scope.AddOperation(opspec)
   4357 	return op.Output(0)
   4358 }
   4359 
   4360 // Compute the polygamma function \\(\psi^{(n)}(x)\\).
   4361 //
   4362 // The polygamma function is defined as:
   4363 //
   4364 //
   4365 // \\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
   4366 //
   4367 // where \\(\psi(x)\\) is the digamma function.
   4368 func Polygamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
   4369 	if scope.Err() != nil {
   4370 		return
   4371 	}
   4372 	opspec := tf.OpSpec{
   4373 		Type: "Polygamma",
   4374 		Input: []tf.Input{
   4375 			a, x,
   4376 		},
   4377 	}
   4378 	op := scope.AddOperation(opspec)
   4379 	return op.Output(0)
   4380 }
   4381 
   4382 // Computes second-order gradients of the maxpooling function.
   4383 //
   4384 // Arguments:
   4385 //	input: The original input.
   4386 //	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
   4387 // input of `max_pool`.
   4388 //	argmax: The indices of the maximum values chosen for each output of `max_pool`.
   4389 //	ksize: The size of the window for each dimension of the input tensor.
   4390 //	strides: The stride of the sliding window for each dimension of the
   4391 // input tensor.
   4392 //	padding: The type of padding algorithm to use.
   4393 //
   4394 // Returns Gradients of gradients w.r.t. the input of `max_pool`.
   4395 func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output) {
   4396 	if scope.Err() != nil {
   4397 		return
   4398 	}
   4399 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   4400 	opspec := tf.OpSpec{
   4401 		Type: "MaxPoolGradGradWithArgmax",
   4402 		Input: []tf.Input{
   4403 			input, grad, argmax,
   4404 		},
   4405 		Attrs: attrs,
   4406 	}
   4407 	op := scope.AddOperation(opspec)
   4408 	return op.Output(0)
   4409 }
   4410 
   4411 // MaxPoolGradGradV2Attr is an optional argument to MaxPoolGradGradV2.
   4412 type MaxPoolGradGradV2Attr func(optionalAttr)
   4413 
   4414 // MaxPoolGradGradV2DataFormat sets the optional data_format attribute to value.
   4415 //
   4416 // value: Specify the data format of the input and output data. With the
   4417 // default format "NHWC", the data is stored in the order of:
   4418 //     [batch, in_height, in_width, in_channels].
   4419 // Alternatively, the format could be "NCHW", the data storage order of:
   4420 //     [batch, in_channels, in_height, in_width].
   4421 // If not specified, defaults to "NHWC"
   4422 func MaxPoolGradGradV2DataFormat(value string) MaxPoolGradGradV2Attr {
   4423 	return func(m optionalAttr) {
   4424 		m["data_format"] = value
   4425 	}
   4426 }
   4427 
   4428 // Computes second-order gradients of the maxpooling function.
   4429 //
   4430 // Arguments:
   4431 //	orig_input: The original input tensor.
   4432 //	orig_output: The original output tensor.
   4433 //	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
   4434 //	ksize: The size of the window for each dimension of the input tensor.
   4435 //	strides: The stride of the sliding window for each dimension of the
   4436 // input tensor.
   4437 //	padding: The type of padding algorithm to use.
   4438 //
   4439 // Returns Gradients of gradients w.r.t. the input to `max_pool`.
   4440 func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradGradV2Attr) (output tf.Output) {
   4441 	if scope.Err() != nil {
   4442 		return
   4443 	}
   4444 	attrs := map[string]interface{}{"padding": padding}
   4445 	for _, a := range optional {
   4446 		a(attrs)
   4447 	}
   4448 	opspec := tf.OpSpec{
   4449 		Type: "MaxPoolGradGradV2",
   4450 		Input: []tf.Input{
   4451 			orig_input, orig_output, grad, ksize, strides,
   4452 		},
   4453 		Attrs: attrs,
   4454 	}
   4455 	op := scope.AddOperation(opspec)
   4456 	return op.Output(0)
   4457 }
   4458 
   4459 // Fast Fourier transform.
   4460 //
   4461 // Computes the 1-dimensional discrete Fourier transform over the inner-most
   4462 // dimension of `input`.
   4463 //
   4464 // Arguments:
   4465 //	input: A complex64 tensor.
   4466 //
   4467 // Returns A complex64 tensor of the same shape as `input`. The inner-most
   4468 //   dimension of `input` is replaced with its 1D Fourier transform.
   4469 //
   4470 // @compatibility(numpy)
   4471 // Equivalent to np.fft.fft
   4472 // @end_compatibility
   4473 func FFT(scope *Scope, input tf.Output) (output tf.Output) {
   4474 	if scope.Err() != nil {
   4475 		return
   4476 	}
   4477 	opspec := tf.OpSpec{
   4478 		Type: "FFT",
   4479 		Input: []tf.Input{
   4480 			input,
   4481 		},
   4482 	}
   4483 	op := scope.AddOperation(opspec)
   4484 	return op.Output(0)
   4485 }
   4486 
   4487 // MaxPoolAttr is an optional argument to MaxPool.
   4488 type MaxPoolAttr func(optionalAttr)
   4489 
   4490 // MaxPoolDataFormat sets the optional data_format attribute to value.
   4491 //
   4492 // value: Specify the data format of the input and output data. With the
   4493 // default format "NHWC", the data is stored in the order of:
   4494 //     [batch, in_height, in_width, in_channels].
   4495 // Alternatively, the format could be "NCHW", the data storage order of:
   4496 //     [batch, in_channels, in_height, in_width].
   4497 // If not specified, defaults to "NHWC"
   4498 func MaxPoolDataFormat(value string) MaxPoolAttr {
   4499 	return func(m optionalAttr) {
   4500 		m["data_format"] = value
   4501 	}
   4502 }
   4503 
   4504 // Performs max pooling on the input.
   4505 //
   4506 // Arguments:
   4507 //	input: 4-D input to pool over.
   4508 //	ksize: The size of the window for each dimension of the input tensor.
   4509 //	strides: The stride of the sliding window for each dimension of the
   4510 // input tensor.
   4511 //	padding: The type of padding algorithm to use.
   4512 //
   4513 // Returns The max pooled output tensor.
   4514 func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolAttr) (output tf.Output) {
   4515 	if scope.Err() != nil {
   4516 		return
   4517 	}
   4518 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   4519 	for _, a := range optional {
   4520 		a(attrs)
   4521 	}
   4522 	opspec := tf.OpSpec{
   4523 		Type: "MaxPool",
   4524 		Input: []tf.Input{
   4525 			input,
   4526 		},
   4527 		Attrs: attrs,
   4528 	}
   4529 	op := scope.AddOperation(opspec)
   4530 	return op.Output(0)
   4531 }
   4532 
   4533 // Bucketizes 'input' based on 'boundaries'.
   4534 //
   4535 // For example, if the inputs are
   4536 //     boundaries = [0, 10, 100]
   4537 //     input = [[-5, 10000]
   4538 //              [150,   10]
   4539 //              [5,    100]]
   4540 //
   4541 // then the output will be
   4542 //     output = [[0, 3]
   4543 //               [3, 2]
   4544 //               [1, 3]]
   4545 //
   4546 // Arguments:
   4547 //	input: Any shape of Tensor contains with int or float type.
   4548 //	boundaries: A sorted list of floats gives the boundary of the buckets.
   4549 //
   4550 // Returns Same shape with 'input', each value of input replaced with bucket index.
   4551 //
   4552 // @compatibility(numpy)
   4553 // Equivalent to np.digitize.
   4554 // @end_compatibility
   4555 func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.Output) {
   4556 	if scope.Err() != nil {
   4557 		return
   4558 	}
   4559 	attrs := map[string]interface{}{"boundaries": boundaries}
   4560 	opspec := tf.OpSpec{
   4561 		Type: "Bucketize",
   4562 		Input: []tf.Input{
   4563 			input,
   4564 		},
   4565 		Attrs: attrs,
   4566 	}
   4567 	op := scope.AddOperation(opspec)
   4568 	return op.Output(0)
   4569 }
   4570 
   4571 // Computes gradients of the maxpooling function.
   4572 //
   4573 // Arguments:
   4574 //	input: The original input.
   4575 //	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
   4576 // output of `max_pool`.
   4577 //	argmax: The indices of the maximum values chosen for each output of `max_pool`.
   4578 //	ksize: The size of the window for each dimension of the input tensor.
   4579 //	strides: The stride of the sliding window for each dimension of the
   4580 // input tensor.
   4581 //	padding: The type of padding algorithm to use.
   4582 //
   4583 // Returns Gradients w.r.t. the input of `max_pool`.
   4584 func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output) {
   4585 	if scope.Err() != nil {
   4586 		return
   4587 	}
   4588 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   4589 	opspec := tf.OpSpec{
   4590 		Type: "MaxPoolGradWithArgmax",
   4591 		Input: []tf.Input{
   4592 			input, grad, argmax,
   4593 		},
   4594 		Attrs: attrs,
   4595 	}
   4596 	op := scope.AddOperation(opspec)
   4597 	return op.Output(0)
   4598 }
   4599 
   4600 // CriticalSectionOpAttr is an optional argument to CriticalSectionOp.
   4601 type CriticalSectionOpAttr func(optionalAttr)
   4602 
   4603 // CriticalSectionOpContainer sets the optional container attribute to value.
   4604 //
   4605 // value: the container this critical section is placed in.
   4606 // If not specified, defaults to ""
   4607 func CriticalSectionOpContainer(value string) CriticalSectionOpAttr {
   4608 	return func(m optionalAttr) {
   4609 		m["container"] = value
   4610 	}
   4611 }
   4612 
   4613 // CriticalSectionOpSharedName sets the optional shared_name attribute to value.
   4614 //
   4615 // value: the name by which this critical section is referred to.
   4616 // If not specified, defaults to ""
   4617 func CriticalSectionOpSharedName(value string) CriticalSectionOpAttr {
   4618 	return func(m optionalAttr) {
   4619 		m["shared_name"] = value
   4620 	}
   4621 }
   4622 
   4623 // Creates a handle to a CriticalSection resource.
   4624 func CriticalSectionOp(scope *Scope, optional ...CriticalSectionOpAttr) (resource tf.Output) {
   4625 	if scope.Err() != nil {
   4626 		return
   4627 	}
   4628 	attrs := map[string]interface{}{}
   4629 	for _, a := range optional {
   4630 		a(attrs)
   4631 	}
   4632 	opspec := tf.OpSpec{
   4633 		Type: "CriticalSectionOp",
   4634 
   4635 		Attrs: attrs,
   4636 	}
   4637 	op := scope.AddOperation(opspec)
   4638 	return op.Output(0)
   4639 }
   4640 
   4641 // FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
   4642 type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
   4643 
   4644 // FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value.
   4645 // If not specified, defaults to -6
   4646 func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr {
   4647 	return func(m optionalAttr) {
   4648 		m["min"] = value
   4649 	}
   4650 }
   4651 
   4652 // FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value.
   4653 // If not specified, defaults to 6
   4654 func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr {
   4655 	return func(m optionalAttr) {
   4656 		m["max"] = value
   4657 	}
   4658 }
   4659 
   4660 // FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value.
   4661 // If not specified, defaults to 8
   4662 func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr {
   4663 	return func(m optionalAttr) {
   4664 		m["num_bits"] = value
   4665 	}
   4666 }
   4667 
   4668 // FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
   4669 // If not specified, defaults to false
   4670 func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
   4671 	return func(m optionalAttr) {
   4672 		m["narrow_range"] = value
   4673 	}
   4674 }
   4675 
   4676 // Compute gradients for a FakeQuantWithMinMaxArgs operation.
   4677 //
   4678 // Arguments:
   4679 //	gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
   4680 //	inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
   4681 //
   4682 // Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
   4683 // `gradients * (inputs >= min && inputs <= max)`.
   4684 func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output) {
   4685 	if scope.Err() != nil {
   4686 		return
   4687 	}
   4688 	attrs := map[string]interface{}{}
   4689 	for _, a := range optional {
   4690 		a(attrs)
   4691 	}
   4692 	opspec := tf.OpSpec{
   4693 		Type: "FakeQuantWithMinMaxArgsGradient",
   4694 		Input: []tf.Input{
   4695 			gradients, inputs,
   4696 		},
   4697 		Attrs: attrs,
   4698 	}
   4699 	op := scope.AddOperation(opspec)
   4700 	return op.Output(0)
   4701 }
   4702 
   4703 // AvgPool3DAttr is an optional argument to AvgPool3D.
   4704 type AvgPool3DAttr func(optionalAttr)
   4705 
   4706 // AvgPool3DDataFormat sets the optional data_format attribute to value.
   4707 //
   4708 // value: The data format of the input and output data. With the
   4709 // default format "NDHWC", the data is stored in the order of:
   4710 //     [batch, in_depth, in_height, in_width, in_channels].
   4711 // Alternatively, the format could be "NCDHW", the data storage order is:
   4712 //     [batch, in_channels, in_depth, in_height, in_width].
   4713 // If not specified, defaults to "NDHWC"
   4714 func AvgPool3DDataFormat(value string) AvgPool3DAttr {
   4715 	return func(m optionalAttr) {
   4716 		m["data_format"] = value
   4717 	}
   4718 }
   4719 
   4720 // Performs 3D average pooling on the input.
   4721 //
   4722 // Arguments:
   4723 //	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
   4724 //	ksize: 1-D tensor of length 5. The size of the window for each dimension of
   4725 // the input tensor. Must have `ksize[0] = ksize[4] = 1`.
   4726 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   4727 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   4728 //	padding: The type of padding algorithm to use.
   4729 //
   4730 // Returns The average pooled output tensor.
   4731 func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
   4732 	if scope.Err() != nil {
   4733 		return
   4734 	}
   4735 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   4736 	for _, a := range optional {
   4737 		a(attrs)
   4738 	}
   4739 	opspec := tf.OpSpec{
   4740 		Type: "AvgPool3D",
   4741 		Input: []tf.Input{
   4742 			input,
   4743 		},
   4744 		Attrs: attrs,
   4745 	}
   4746 	op := scope.AddOperation(opspec)
   4747 	return op.Output(0)
   4748 }
   4749 
   4750 // Returns element-wise remainder of division. This emulates C semantics in that
   4751 //
   4752 // the result here is consistent with a truncating divide. E.g.
   4753 // `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
   4754 //
   4755 // *NOTE*: `Mod` supports broadcasting. More about broadcasting
   4756 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   4757 func Mod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   4758 	if scope.Err() != nil {
   4759 		return
   4760 	}
   4761 	opspec := tf.OpSpec{
   4762 		Type: "Mod",
   4763 		Input: []tf.Input{
   4764 			x, y,
   4765 		},
   4766 	}
   4767 	op := scope.AddOperation(opspec)
   4768 	return op.Output(0)
   4769 }
   4770 
   4771 // Computes square root of x element-wise.
   4772 //
   4773 // I.e., \\(y = \sqrt{x} = x^{1/2}\\).
   4774 func Sqrt(scope *Scope, x tf.Output) (y tf.Output) {
   4775 	if scope.Err() != nil {
   4776 		return
   4777 	}
   4778 	opspec := tf.OpSpec{
   4779 		Type: "Sqrt",
   4780 		Input: []tf.Input{
   4781 			x,
   4782 		},
   4783 	}
   4784 	op := scope.AddOperation(opspec)
   4785 	return op.Output(0)
   4786 }
   4787 
   4788 // Computes the gradients of 3-D convolution with respect to the filter.
   4789 //
   4790 // DEPRECATED at GraphDef version 10: Use Conv3DBackpropFilterV2
   4791 //
   4792 // Arguments:
   4793 //	input: Shape `[batch, depth, rows, cols, in_channels]`.
   4794 //	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
   4795 // `in_channels` must match between `input` and `filter`.
   4796 //	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
   4797 // out_channels]`.
   4798 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   4799 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   4800 //	padding: The type of padding algorithm to use.
   4801 func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string) (output tf.Output) {
   4802 	if scope.Err() != nil {
   4803 		return
   4804 	}
   4805 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   4806 	opspec := tf.OpSpec{
   4807 		Type: "Conv3DBackpropFilter",
   4808 		Input: []tf.Input{
   4809 			input, filter, out_backprop,
   4810 		},
   4811 		Attrs: attrs,
   4812 	}
   4813 	op := scope.AddOperation(opspec)
   4814 	return op.Output(0)
   4815 }
   4816 
   4817 // Computes the gradient for the rsqrt of `x` wrt its input.
   4818 //
   4819 // Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
   4820 // is the corresponding input gradient.
   4821 func RsqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
   4822 	if scope.Err() != nil {
   4823 		return
   4824 	}
   4825 	opspec := tf.OpSpec{
   4826 		Type: "RsqrtGrad",
   4827 		Input: []tf.Input{
   4828 			y, dy,
   4829 		},
   4830 	}
   4831 	op := scope.AddOperation(opspec)
   4832 	return op.Output(0)
   4833 }
   4834 
   4835 // ReverseSequenceAttr is an optional argument to ReverseSequence.
   4836 type ReverseSequenceAttr func(optionalAttr)
   4837 
   4838 // ReverseSequenceBatchDim sets the optional batch_dim attribute to value.
   4839 //
   4840 // value: The dimension along which reversal is performed.
   4841 // If not specified, defaults to 0
   4842 func ReverseSequenceBatchDim(value int64) ReverseSequenceAttr {
   4843 	return func(m optionalAttr) {
   4844 		m["batch_dim"] = value
   4845 	}
   4846 }
   4847 
   4848 // Reverses variable length slices.
   4849 //
   4850 // This op first slices `input` along the dimension `batch_dim`, and for each
   4851 // slice `i`, reverses the first `seq_lengths[i]` elements along
   4852 // the dimension `seq_dim`.
   4853 //
   4854 // The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
   4855 // and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
   4856 //
   4857 // The output slice `i` along dimension `batch_dim` is then given by input
   4858 // slice `i`, with the first `seq_lengths[i]` slices along dimension
   4859 // `seq_dim` reversed.
   4860 //
   4861 // For example:
   4862 //
   4863 // ```
   4864 // # Given this:
   4865 // batch_dim = 0
   4866 // seq_dim = 1
   4867 // input.dims = (4, 8, ...)
   4868 // seq_lengths = [7, 2, 3, 5]
   4869 //
   4870 // # then slices of input are reversed on seq_dim, but only up to seq_lengths:
   4871 // output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
   4872 // output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
   4873 // output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
   4874 // output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
   4875 //
   4876 // # while entries past seq_lens are copied through:
   4877 // output[0, 7:, :, ...] = input[0, 7:, :, ...]
   4878 // output[1, 2:, :, ...] = input[1, 2:, :, ...]
   4879 // output[2, 3:, :, ...] = input[2, 3:, :, ...]
   4880 // output[3, 2:, :, ...] = input[3, 2:, :, ...]
   4881 // ```
   4882 //
   4883 // In contrast, if:
   4884 //
   4885 // ```
   4886 // # Given this:
   4887 // batch_dim = 2
   4888 // seq_dim = 0
   4889 // input.dims = (8, ?, 4, ...)
   4890 // seq_lengths = [7, 2, 3, 5]
   4891 //
   4892 // # then slices of input are reversed on seq_dim, but only up to seq_lengths:
   4893 // output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
   4894 // output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
   4895 // output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
   4896 // output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
   4897 //
   4898 // # while entries past seq_lens are copied through:
   4899 // output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
   4900 // output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
   4901 // output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
   4902 // output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
   4903 // ```
   4904 //
   4905 // Arguments:
   4906 //	input: The input to reverse.
   4907 //	seq_lengths: 1-D with length `input.dims(batch_dim)` and
   4908 // `max(seq_lengths) <= input.dims(seq_dim)`
   4909 //	seq_dim: The dimension which is partially reversed.
   4910 //
   4911 // Returns The partially reversed input. It has the same shape as `input`.
   4912 func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, optional ...ReverseSequenceAttr) (output tf.Output) {
   4913 	if scope.Err() != nil {
   4914 		return
   4915 	}
   4916 	attrs := map[string]interface{}{"seq_dim": seq_dim}
   4917 	for _, a := range optional {
   4918 		a(attrs)
   4919 	}
   4920 	opspec := tf.OpSpec{
   4921 		Type: "ReverseSequence",
   4922 		Input: []tf.Input{
   4923 			input, seq_lengths,
   4924 		},
   4925 		Attrs: attrs,
   4926 	}
   4927 	op := scope.AddOperation(opspec)
   4928 	return op.Output(0)
   4929 }
   4930 
   4931 // DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative.
   4932 type DepthwiseConv2dNativeAttr func(optionalAttr)
   4933 
   4934 // DepthwiseConv2dNativeDataFormat sets the optional data_format attribute to value.
   4935 //
   4936 // value: Specify the data format of the input and output data. With the
   4937 // default format "NHWC", the data is stored in the order of:
   4938 //     [batch, height, width, channels].
   4939 // Alternatively, the format could be "NCHW", the data storage order of:
   4940 //     [batch, channels, height, width].
   4941 // If not specified, defaults to "NHWC"
   4942 func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr {
   4943 	return func(m optionalAttr) {
   4944 		m["data_format"] = value
   4945 	}
   4946 }
   4947 
   4948 // DepthwiseConv2dNativeDilations sets the optional dilations attribute to value.
   4949 //
   4950 // value: 1-D tensor of length 4.  The dilation factor for each dimension of
   4951 // `input`. If set to k > 1, there will be k-1 skipped cells between each filter
   4952 // element on that dimension. The dimension order is determined by the value of
   4953 // `data_format`, see above for details. Dilations in the batch and depth
   4954 // dimensions must be 1.
   4955 // If not specified, defaults to <i:1 i:1 i:1 i:1 >
   4956 func DepthwiseConv2dNativeDilations(value []int64) DepthwiseConv2dNativeAttr {
   4957 	return func(m optionalAttr) {
   4958 		m["dilations"] = value
   4959 	}
   4960 }
   4961 
   4962 // Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
   4963 //
   4964 // Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
   4965 // and a filter / kernel tensor of shape
   4966 // `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
   4967 // `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
   4968 // a different filter to each input channel (expanding from 1 channel to
   4969 // `channel_multiplier` channels for each), then concatenates the results
   4970 // together. Thus, the output has `in_channels * channel_multiplier` channels.
   4971 //
   4972 // ```
   4973 // for k in 0..in_channels-1
   4974 //   for q in 0..channel_multiplier-1
   4975 //     output[b, i, j, k * channel_multiplier + q] =
   4976 //       sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
   4977 //                         filter[di, dj, k, q]
   4978 // ```
   4979 //
   4980 // Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
   4981 // horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
   4982 //
   4983 // Arguments:
   4984 //
   4985 //
   4986 //	strides: 1-D of length 4.  The stride of the sliding window for each dimension
   4987 // of `input`.
   4988 //	padding: The type of padding algorithm to use.
   4989 func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeAttr) (output tf.Output) {
   4990 	if scope.Err() != nil {
   4991 		return
   4992 	}
   4993 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   4994 	for _, a := range optional {
   4995 		a(attrs)
   4996 	}
   4997 	opspec := tf.OpSpec{
   4998 		Type: "DepthwiseConv2dNative",
   4999 		Input: []tf.Input{
   5000 			input, filter,
   5001 		},
   5002 		Attrs: attrs,
   5003 	}
   5004 	op := scope.AddOperation(opspec)
   5005 	return op.Output(0)
   5006 }
   5007 
   5008 // TensorArrayGatherV3Attr is an optional argument to TensorArrayGatherV3.
   5009 type TensorArrayGatherV3Attr func(optionalAttr)
   5010 
   5011 // TensorArrayGatherV3ElementShape sets the optional element_shape attribute to value.
   5012 //
   5013 // value: The expected shape of an element, if known. Used to
   5014 // validate the shapes of TensorArray elements. If this shape is not
   5015 // fully specified, gathering zero-size TensorArrays is an error.
   5016 // If not specified, defaults to <unknown_rank:true >
   5017 func TensorArrayGatherV3ElementShape(value tf.Shape) TensorArrayGatherV3Attr {
   5018 	return func(m optionalAttr) {
   5019 		m["element_shape"] = value
   5020 	}
   5021 }
   5022 
   5023 // Gather specific elements from the TensorArray into output `value`.
   5024 //
   5025 // All elements selected by `indices` must have the same shape.
   5026 //
   5027 // Arguments:
   5028 //	handle: The handle to a TensorArray.
   5029 //	indices: The locations in the TensorArray from which to read tensor elements.
   5030 //	flow_in: A float scalar that enforces proper chaining of operations.
   5031 //	dtype: The type of the elem that is returned.
   5032 //
   5033 // Returns All of the elements in the TensorArray, concatenated along a new
   5034 // axis (the new dimension 0).
   5035 func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV3Attr) (value tf.Output) {
   5036 	if scope.Err() != nil {
   5037 		return
   5038 	}
   5039 	attrs := map[string]interface{}{"dtype": dtype}
   5040 	for _, a := range optional {
   5041 		a(attrs)
   5042 	}
   5043 	opspec := tf.OpSpec{
   5044 		Type: "TensorArrayGatherV3",
   5045 		Input: []tf.Input{
   5046 			handle, indices, flow_in,
   5047 		},
   5048 		Attrs: attrs,
   5049 	}
   5050 	op := scope.AddOperation(opspec)
   5051 	return op.Output(0)
   5052 }
   5053 
   5054 // Converts each string in the input Tensor to its hash mod by a number of buckets.
   5055 //
   5056 // The hash function is deterministic on the content of the string within the
   5057 // process and will never change. However, it is not suitable for cryptography.
   5058 // This function may be used when CPU time is scarce and inputs are trusted or
   5059 // unimportant. There is a risk of adversaries constructing inputs that all hash
   5060 // to the same bucket. To prevent this problem, use a strong hash function with
   5061 // `tf.string_to_hash_bucket_strong`.
   5062 //
   5063 // Arguments:
   5064 //	input: The strings to assign a hash bucket.
   5065 //	num_buckets: The number of buckets.
   5066 //
   5067 // Returns A Tensor of the same shape as the input `string_tensor`.
   5068 func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (output tf.Output) {
   5069 	if scope.Err() != nil {
   5070 		return
   5071 	}
   5072 	attrs := map[string]interface{}{"num_buckets": num_buckets}
   5073 	opspec := tf.OpSpec{
   5074 		Type: "StringToHashBucketFast",
   5075 		Input: []tf.Input{
   5076 			input,
   5077 		},
   5078 		Attrs: attrs,
   5079 	}
   5080 	op := scope.AddOperation(opspec)
   5081 	return op.Output(0)
   5082 }
   5083 
   5084 // Returns the max of x and y (i.e. x > y ? x : y) element-wise.
   5085 //
   5086 // *NOTE*: `Maximum` supports broadcasting. More about broadcasting
   5087 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   5088 func Maximum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   5089 	if scope.Err() != nil {
   5090 		return
   5091 	}
   5092 	opspec := tf.OpSpec{
   5093 		Type: "Maximum",
   5094 		Input: []tf.Input{
   5095 			x, y,
   5096 		},
   5097 	}
   5098 	op := scope.AddOperation(opspec)
   5099 	return op.Output(0)
   5100 }
   5101 
   5102 // Outputs all keys and values in the table.
   5103 //
   5104 // Arguments:
   5105 //	table_handle: Handle to the table.
   5106 //
   5107 //
   5108 //
   5109 // Returns Vector of all keys present in the table.Tensor of all values in the table. Indexed in parallel with `keys`.
   5110 func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output) {
   5111 	if scope.Err() != nil {
   5112 		return
   5113 	}
   5114 	attrs := map[string]interface{}{"Tkeys": Tkeys, "Tvalues": Tvalues}
   5115 	opspec := tf.OpSpec{
   5116 		Type: "LookupTableExportV2",
   5117 		Input: []tf.Input{
   5118 			table_handle,
   5119 		},
   5120 		Attrs: attrs,
   5121 	}
   5122 	op := scope.AddOperation(opspec)
   5123 	return op.Output(0), op.Output(1)
   5124 }
   5125 
   5126 // Real-valued fast Fourier transform.
   5127 //
   5128 // Computes the 1-dimensional discrete Fourier transform of a real-valued signal
   5129 // over the inner-most dimension of `input`.
   5130 //
   5131 // Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
   5132 // `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
   5133 // followed by the `fft_length / 2` positive-frequency terms.
   5134 //
   5135 // Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
   5136 // corresponding dimension of `input`, the dimension is cropped. If it is larger,
   5137 // the dimension is padded with zeros.
   5138 //
   5139 // Arguments:
   5140 //	input: A float32 tensor.
   5141 //	fft_length: An int32 tensor of shape [1]. The FFT length.
   5142 //
   5143 // Returns A complex64 tensor of the same rank as `input`. The inner-most
   5144 //   dimension of `input` is replaced with the `fft_length / 2 + 1` unique
   5145 //   frequency components of its 1D Fourier transform.
   5146 //
   5147 // @compatibility(numpy)
   5148 // Equivalent to np.fft.rfft
   5149 // @end_compatibility
   5150 func RFFT(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
   5151 	if scope.Err() != nil {
   5152 		return
   5153 	}
   5154 	opspec := tf.OpSpec{
   5155 		Type: "RFFT",
   5156 		Input: []tf.Input{
   5157 			input, fft_length,
   5158 		},
   5159 	}
   5160 	op := scope.AddOperation(opspec)
   5161 	return op.Output(0)
   5162 }
   5163 
   5164 // ComplexAttr is an optional argument to Complex.
   5165 type ComplexAttr func(optionalAttr)
   5166 
   5167 // ComplexTout sets the optional Tout attribute to value.
   5168 // If not specified, defaults to DT_COMPLEX64
   5169 func ComplexTout(value tf.DataType) ComplexAttr {
   5170 	return func(m optionalAttr) {
   5171 		m["Tout"] = value
   5172 	}
   5173 }
   5174 
   5175 // Converts two real numbers to a complex number.
   5176 //
   5177 // Given a tensor `real` representing the real part of a complex number, and a
   5178 // tensor `imag` representing the imaginary part of a complex number, this
   5179 // operation returns complex numbers elementwise of the form \\(a + bj\\), where
   5180 // *a* represents the `real` part and *b* represents the `imag` part.
   5181 //
   5182 // The input tensors `real` and `imag` must have the same shape.
   5183 //
   5184 // For example:
   5185 //
   5186 // ```
   5187 // # tensor 'real' is [2.25, 3.25]
   5188 // # tensor `imag` is [4.75, 5.75]
   5189 // tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
   5190 // ```
   5191 func Complex(scope *Scope, real tf.Output, imag tf.Output, optional ...ComplexAttr) (out tf.Output) {
   5192 	if scope.Err() != nil {
   5193 		return
   5194 	}
   5195 	attrs := map[string]interface{}{}
   5196 	for _, a := range optional {
   5197 		a(attrs)
   5198 	}
   5199 	opspec := tf.OpSpec{
   5200 		Type: "Complex",
   5201 		Input: []tf.Input{
   5202 			real, imag,
   5203 		},
   5204 		Attrs: attrs,
   5205 	}
   5206 	op := scope.AddOperation(opspec)
   5207 	return op.Output(0)
   5208 }
   5209 
   5210 // ImagAttr is an optional argument to Imag.
   5211 type ImagAttr func(optionalAttr)
   5212 
   5213 // ImagTout sets the optional Tout attribute to value.
   5214 // If not specified, defaults to DT_FLOAT
   5215 func ImagTout(value tf.DataType) ImagAttr {
   5216 	return func(m optionalAttr) {
   5217 		m["Tout"] = value
   5218 	}
   5219 }
   5220 
   5221 // Returns the imaginary part of a complex number.
   5222 //
   5223 // Given a tensor `input` of complex numbers, this operation returns a tensor of
   5224 // type `float` that is the imaginary part of each element in `input`. All
   5225 // elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
   5226 // is the real part and *b* is the imaginary part returned by this operation.
   5227 //
   5228 // For example:
   5229 //
   5230 // ```
   5231 // # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
   5232 // tf.imag(input) ==> [4.75, 5.75]
   5233 // ```
   5234 func Imag(scope *Scope, input tf.Output, optional ...ImagAttr) (output tf.Output) {
   5235 	if scope.Err() != nil {
   5236 		return
   5237 	}
   5238 	attrs := map[string]interface{}{}
   5239 	for _, a := range optional {
   5240 		a(attrs)
   5241 	}
   5242 	opspec := tf.OpSpec{
   5243 		Type: "Imag",
   5244 		Input: []tf.Input{
   5245 			input,
   5246 		},
   5247 		Attrs: attrs,
   5248 	}
   5249 	op := scope.AddOperation(opspec)
   5250 	return op.Output(0)
   5251 }
   5252 
   5253 // Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
   5254 //
   5255 // The Hurwitz zeta function is defined as:
   5256 //
   5257 //
   5258 // \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
   5259 func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output) {
   5260 	if scope.Err() != nil {
   5261 		return
   5262 	}
   5263 	opspec := tf.OpSpec{
   5264 		Type: "Zeta",
   5265 		Input: []tf.Input{
   5266 			x, q,
   5267 		},
   5268 	}
   5269 	op := scope.AddOperation(opspec)
   5270 	return op.Output(0)
   5271 }
   5272 
   5273 // LRNGradAttr is an optional argument to LRNGrad.
   5274 type LRNGradAttr func(optionalAttr)
   5275 
   5276 // LRNGradDepthRadius sets the optional depth_radius attribute to value.
   5277 //
   5278 // value: A depth radius.
   5279 // If not specified, defaults to 5
   5280 func LRNGradDepthRadius(value int64) LRNGradAttr {
   5281 	return func(m optionalAttr) {
   5282 		m["depth_radius"] = value
   5283 	}
   5284 }
   5285 
   5286 // LRNGradBias sets the optional bias attribute to value.
   5287 //
   5288 // value: An offset (usually > 0 to avoid dividing by 0).
   5289 // If not specified, defaults to 1
   5290 func LRNGradBias(value float32) LRNGradAttr {
   5291 	return func(m optionalAttr) {
   5292 		m["bias"] = value
   5293 	}
   5294 }
   5295 
   5296 // LRNGradAlpha sets the optional alpha attribute to value.
   5297 //
   5298 // value: A scale factor, usually positive.
   5299 // If not specified, defaults to 1
   5300 func LRNGradAlpha(value float32) LRNGradAttr {
   5301 	return func(m optionalAttr) {
   5302 		m["alpha"] = value
   5303 	}
   5304 }
   5305 
   5306 // LRNGradBeta sets the optional beta attribute to value.
   5307 //
   5308 // value: An exponent.
   5309 // If not specified, defaults to 0.5
   5310 func LRNGradBeta(value float32) LRNGradAttr {
   5311 	return func(m optionalAttr) {
   5312 		m["beta"] = value
   5313 	}
   5314 }
   5315 
   5316 // Gradients for Local Response Normalization.
   5317 //
   5318 // Arguments:
   5319 //	input_grads: 4-D with shape `[batch, height, width, channels]`.
   5320 //	input_image: 4-D with shape `[batch, height, width, channels]`.
   5321 //	output_image: 4-D with shape `[batch, height, width, channels]`.
   5322 //
   5323 // Returns The gradients for LRN.
   5324 func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_image tf.Output, optional ...LRNGradAttr) (output tf.Output) {
   5325 	if scope.Err() != nil {
   5326 		return
   5327 	}
   5328 	attrs := map[string]interface{}{}
   5329 	for _, a := range optional {
   5330 		a(attrs)
   5331 	}
   5332 	opspec := tf.OpSpec{
   5333 		Type: "LRNGrad",
   5334 		Input: []tf.Input{
   5335 			input_grads, input_image, output_image,
   5336 		},
   5337 		Attrs: attrs,
   5338 	}
   5339 	op := scope.AddOperation(opspec)
   5340 	return op.Output(0)
   5341 }
   5342 
   5343 // AnyAttr is an optional argument to Any.
   5344 type AnyAttr func(optionalAttr)
   5345 
   5346 // AnyKeepDims sets the optional keep_dims attribute to value.
   5347 //
   5348 // value: If true, retain reduced dimensions with length 1.
   5349 // If not specified, defaults to false
   5350 func AnyKeepDims(value bool) AnyAttr {
   5351 	return func(m optionalAttr) {
   5352 		m["keep_dims"] = value
   5353 	}
   5354 }
   5355 
   5356 // Computes the "logical or" of elements across dimensions of a tensor.
   5357 //
   5358 // Reduces `input` along the dimensions given in `axis`. Unless
   5359 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   5360 // `axis`. If `keep_dims` is true, the reduced dimensions are
   5361 // retained with length 1.
   5362 //
   5363 // Arguments:
   5364 //	input: The tensor to reduce.
   5365 //	axis: The dimensions to reduce. Must be in the range
   5366 // `[-rank(input), rank(input))`.
   5367 //
   5368 // Returns The reduced tensor.
   5369 func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (output tf.Output) {
   5370 	if scope.Err() != nil {
   5371 		return
   5372 	}
   5373 	attrs := map[string]interface{}{}
   5374 	for _, a := range optional {
   5375 		a(attrs)
   5376 	}
   5377 	opspec := tf.OpSpec{
   5378 		Type: "Any",
   5379 		Input: []tf.Input{
   5380 			input, axis,
   5381 		},
   5382 		Attrs: attrs,
   5383 	}
   5384 	op := scope.AddOperation(opspec)
   5385 	return op.Output(0)
   5386 }
   5387 
   5388 // ResourceApplyFtrlAttr is an optional argument to ResourceApplyFtrl.
   5389 type ResourceApplyFtrlAttr func(optionalAttr)
   5390 
   5391 // ResourceApplyFtrlUseLocking sets the optional use_locking attribute to value.
   5392 //
   5393 // value: If `True`, updating of the var and accum tensors will be protected
   5394 // by a lock; otherwise the behavior is undefined, but may exhibit less
   5395 // contention.
   5396 // If not specified, defaults to false
   5397 func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr {
   5398 	return func(m optionalAttr) {
   5399 		m["use_locking"] = value
   5400 	}
   5401 }
   5402 
   5403 // Update '*var' according to the Ftrl-proximal scheme.
   5404 //
   5405 // accum_new = accum + grad * grad
   5406 // linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
   5407 // quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
   5408 // var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
   5409 // accum = accum_new
   5410 //
   5411 // Arguments:
   5412 //	var_: Should be from a Variable().
   5413 //	accum: Should be from a Variable().
   5414 //	linear: Should be from a Variable().
   5415 //	grad: The gradient.
   5416 //	lr: Scaling factor. Must be a scalar.
   5417 //	l1: L1 regulariation. Must be a scalar.
   5418 //	l2: L2 regulariation. Must be a scalar.
   5419 //	lr_power: Scaling factor. Must be a scalar.
   5420 //
   5421 // Returns the created operation.
   5422 func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation) {
   5423 	if scope.Err() != nil {
   5424 		return
   5425 	}
   5426 	attrs := map[string]interface{}{}
   5427 	for _, a := range optional {
   5428 		a(attrs)
   5429 	}
   5430 	opspec := tf.OpSpec{
   5431 		Type: "ResourceApplyFtrl",
   5432 		Input: []tf.Input{
   5433 			var_, accum, linear, grad, lr, l1, l2, lr_power,
   5434 		},
   5435 		Attrs: attrs,
   5436 	}
   5437 	return scope.AddOperation(opspec)
   5438 }
   5439 
   5440 // RandomUniformAttr is an optional argument to RandomUniform.
   5441 type RandomUniformAttr func(optionalAttr)
   5442 
   5443 // RandomUniformSeed sets the optional seed attribute to value.
   5444 //
   5445 // value: If either `seed` or `seed2` are set to be non-zero, the random number
   5446 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   5447 // random seed.
   5448 // If not specified, defaults to 0
   5449 func RandomUniformSeed(value int64) RandomUniformAttr {
   5450 	return func(m optionalAttr) {
   5451 		m["seed"] = value
   5452 	}
   5453 }
   5454 
   5455 // RandomUniformSeed2 sets the optional seed2 attribute to value.
   5456 //
   5457 // value: A second seed to avoid seed collision.
   5458 // If not specified, defaults to 0
   5459 func RandomUniformSeed2(value int64) RandomUniformAttr {
   5460 	return func(m optionalAttr) {
   5461 		m["seed2"] = value
   5462 	}
   5463 }
   5464 
   5465 // Outputs random values from a uniform distribution.
   5466 //
   5467 // The generated values follow a uniform distribution in the range `[0, 1)`. The
   5468 // lower bound 0 is included in the range, while the upper bound 1 is excluded.
   5469 //
   5470 // Arguments:
   5471 //	shape: The shape of the output tensor.
   5472 //	dtype: The type of the output.
   5473 //
   5474 // Returns A tensor of the specified shape filled with uniform random values.
   5475 func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomUniformAttr) (output tf.Output) {
   5476 	if scope.Err() != nil {
   5477 		return
   5478 	}
   5479 	attrs := map[string]interface{}{"dtype": dtype}
   5480 	for _, a := range optional {
   5481 		a(attrs)
   5482 	}
   5483 	opspec := tf.OpSpec{
   5484 		Type: "RandomUniform",
   5485 		Input: []tf.Input{
   5486 			shape,
   5487 		},
   5488 		Attrs: attrs,
   5489 	}
   5490 	op := scope.AddOperation(opspec)
   5491 	return op.Output(0)
   5492 }
   5493 
   5494 // AssertAttr is an optional argument to Assert.
   5495 type AssertAttr func(optionalAttr)
   5496 
   5497 // AssertSummarize sets the optional summarize attribute to value.
   5498 //
   5499 // value: Print this many entries of each tensor.
   5500 // If not specified, defaults to 3
   5501 func AssertSummarize(value int64) AssertAttr {
   5502 	return func(m optionalAttr) {
   5503 		m["summarize"] = value
   5504 	}
   5505 }
   5506 
   5507 // Asserts that the given condition is true.
   5508 //
   5509 // If `condition` evaluates to false, print the list of tensors in `data`.
   5510 // `summarize` determines how many entries of the tensors to print.
   5511 //
   5512 // Arguments:
   5513 //	condition: The condition to evaluate.
   5514 //	data: The tensors to print out when condition is false.
   5515 //
   5516 // Returns the created operation.
   5517 func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation) {
   5518 	if scope.Err() != nil {
   5519 		return
   5520 	}
   5521 	attrs := map[string]interface{}{}
   5522 	for _, a := range optional {
   5523 		a(attrs)
   5524 	}
   5525 	opspec := tf.OpSpec{
   5526 		Type: "Assert",
   5527 		Input: []tf.Input{
   5528 			condition, tf.OutputList(data),
   5529 		},
   5530 		Attrs: attrs,
   5531 	}
   5532 	return scope.AddOperation(opspec)
   5533 }
   5534 
   5535 // Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
   5536 //
   5537 // For each entry in `x`, calculates the number of `1` (on) bits in the binary
   5538 // representation of that entry.
   5539 //
   5540 // **NOTE**: It is more efficient to first `tf.bitcast` your tensors into
   5541 // `int32` or `int64` and perform the bitcount on the result, than to feed in
   5542 // 8- or 16-bit inputs and then aggregate the resulting counts.
   5543 func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {
   5544 	if scope.Err() != nil {
   5545 		return
   5546 	}
   5547 	opspec := tf.OpSpec{
   5548 		Type: "PopulationCount",
   5549 		Input: []tf.Input{
   5550 			x,
   5551 		},
   5552 	}
   5553 	op := scope.AddOperation(opspec)
   5554 	return op.Output(0)
   5555 }
   5556 
   5557 // Split a `SparseTensor` into `num_split` tensors along one dimension.
   5558 //
   5559 // If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
   5560 // `[0 : shape[split_dim] % num_split]` gets one extra dimension.
   5561 // For example, if `split_dim = 1` and `num_split = 2` and the input is
   5562 //
   5563 //     input_tensor = shape = [2, 7]
   5564 //     [    a   d e  ]
   5565 //     [b c          ]
   5566 //
   5567 // Graphically the output tensors are:
   5568 //
   5569 //     output_tensor[0] = shape = [2, 4]
   5570 //     [    a  ]
   5571 //     [b c    ]
   5572 //
   5573 //     output_tensor[1] = shape = [2, 3]
   5574 //     [ d e  ]
   5575 //     [      ]
   5576 //
   5577 // Arguments:
   5578 //	split_dim: 0-D.  The dimension along which to split.  Must be in the range
   5579 // `[0, rank(shape))`.
   5580 //	indices: 2-D tensor represents the indices of the sparse tensor.
   5581 //	values: 1-D tensor represents the values of the sparse tensor.
   5582 //	shape: 1-D. tensor represents the shape of the sparse tensor.
   5583 // output indices: A list of 1-D tensors represents the indices of the output
   5584 // sparse tensors.
   5585 //	num_split: The number of ways to split.
   5586 //
   5587 // Returns A list of 1-D tensors represents the values of the output sparse
   5588 // tensors.A list of 1-D tensors represents the shape of the output sparse
   5589 // tensors.
   5590 func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, shape tf.Output, num_split int64) (output_indices []tf.Output, output_values []tf.Output, output_shape []tf.Output) {
   5591 	if scope.Err() != nil {
   5592 		return
   5593 	}
   5594 	attrs := map[string]interface{}{"num_split": num_split}
   5595 	opspec := tf.OpSpec{
   5596 		Type: "SparseSplit",
   5597 		Input: []tf.Input{
   5598 			split_dim, indices, values, shape,
   5599 		},
   5600 		Attrs: attrs,
   5601 	}
   5602 	op := scope.AddOperation(opspec)
   5603 	if scope.Err() != nil {
   5604 		return
   5605 	}
   5606 	var idx int
   5607 	var err error
   5608 	if output_indices, idx, err = makeOutputList(op, idx, "output_indices"); err != nil {
   5609 		scope.UpdateErr("SparseSplit", err)
   5610 		return
   5611 	}
   5612 	if output_values, idx, err = makeOutputList(op, idx, "output_values"); err != nil {
   5613 		scope.UpdateErr("SparseSplit", err)
   5614 		return
   5615 	}
   5616 	if output_shape, idx, err = makeOutputList(op, idx, "output_shape"); err != nil {
   5617 		scope.UpdateErr("SparseSplit", err)
   5618 		return
   5619 	}
   5620 	return output_indices, output_values, output_shape
   5621 }
   5622 
   5623 // Returns the truth value of (x < y) element-wise.
   5624 //
   5625 // *NOTE*: `Less` supports broadcasting. More about broadcasting
   5626 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   5627 func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   5628 	if scope.Err() != nil {
   5629 		return
   5630 	}
   5631 	opspec := tf.OpSpec{
   5632 		Type: "Less",
   5633 		Input: []tf.Input{
   5634 			x, y,
   5635 		},
   5636 	}
   5637 	op := scope.AddOperation(opspec)
   5638 	return op.Output(0)
   5639 }
   5640 
   5641 // QuantizedReluXAttr is an optional argument to QuantizedReluX.
   5642 type QuantizedReluXAttr func(optionalAttr)
   5643 
   5644 // QuantizedReluXOutType sets the optional out_type attribute to value.
   5645 // If not specified, defaults to DT_QUINT8
   5646 func QuantizedReluXOutType(value tf.DataType) QuantizedReluXAttr {
   5647 	return func(m optionalAttr) {
   5648 		m["out_type"] = value
   5649 	}
   5650 }
   5651 
   5652 // Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
   5653 //
   5654 // Arguments:
   5655 //
   5656 //
   5657 //	min_features: The float value that the lowest quantized value represents.
   5658 //	max_features: The float value that the highest quantized value represents.
   5659 //
   5660 // Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
   5661 func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluXAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
   5662 	if scope.Err() != nil {
   5663 		return
   5664 	}
   5665 	attrs := map[string]interface{}{}
   5666 	for _, a := range optional {
   5667 		a(attrs)
   5668 	}
   5669 	opspec := tf.OpSpec{
   5670 		Type: "QuantizedReluX",
   5671 		Input: []tf.Input{
   5672 			features, max_value, min_features, max_features,
   5673 		},
   5674 		Attrs: attrs,
   5675 	}
   5676 	op := scope.AddOperation(opspec)
   5677 	return op.Output(0), op.Output(1), op.Output(2)
   5678 }
   5679 
   5680 // SummaryWriterAttr is an optional argument to SummaryWriter.
   5681 type SummaryWriterAttr func(optionalAttr)
   5682 
   5683 // SummaryWriterSharedName sets the optional shared_name attribute to value.
   5684 // If not specified, defaults to ""
   5685 func SummaryWriterSharedName(value string) SummaryWriterAttr {
   5686 	return func(m optionalAttr) {
   5687 		m["shared_name"] = value
   5688 	}
   5689 }
   5690 
   5691 // SummaryWriterContainer sets the optional container attribute to value.
   5692 // If not specified, defaults to ""
   5693 func SummaryWriterContainer(value string) SummaryWriterAttr {
   5694 	return func(m optionalAttr) {
   5695 		m["container"] = value
   5696 	}
   5697 }
   5698 
   5699 // Returns a handle to be used to access a summary writer.
   5700 //
   5701 // The summary writer is an in-graph resource which can be used by ops to write
   5702 // summaries to event files.
   5703 //
   5704 // Returns the summary writer resource. Scalar handle.
   5705 func SummaryWriter(scope *Scope, optional ...SummaryWriterAttr) (writer tf.Output) {
   5706 	if scope.Err() != nil {
   5707 		return
   5708 	}
   5709 	attrs := map[string]interface{}{}
   5710 	for _, a := range optional {
   5711 		a(attrs)
   5712 	}
   5713 	opspec := tf.OpSpec{
   5714 		Type: "SummaryWriter",
   5715 
   5716 		Attrs: attrs,
   5717 	}
   5718 	op := scope.AddOperation(opspec)
   5719 	return op.Output(0)
   5720 }
   5721 
   5722 // Computes gradients for SparseSegmentMean.
   5723 //
   5724 // Returns tensor "output" with same shape as grad, except for dimension 0 whose
   5725 // value is output_dim0.
   5726 //
   5727 // Arguments:
   5728 //	grad: gradient propagated to the SparseSegmentMean op.
   5729 //	indices: indices passed to the corresponding SparseSegmentMean op.
   5730 //	segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.
   5731 //	output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
   5732 func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
   5733 	if scope.Err() != nil {
   5734 		return
   5735 	}
   5736 	opspec := tf.OpSpec{
   5737 		Type: "SparseSegmentMeanGrad",
   5738 		Input: []tf.Input{
   5739 			grad, indices, segment_ids, output_dim0,
   5740 		},
   5741 	}
   5742 	op := scope.AddOperation(opspec)
   5743 	return op.Output(0)
   5744 }
   5745 
   5746 // Applies softmax to a batched N-D `SparseTensor`.
   5747 //
   5748 // The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`
   5749 // (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
   5750 //
   5751 // This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
   5752 // logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
   5753 // zero elements do not participate*.  Specifically, the algorithm is equivalent
   5754 // to the following:
   5755 //
   5756 //   (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
   5757 //       with shape `[B, C]`, along the size-C dimension;
   5758 //   (2) Masks out the original implicitly-zero locations;
   5759 //   (3) Renormalizes the remaining elements.
   5760 //
   5761 // Hence, the `SparseTensor` result has exactly the same non-zero indices and
   5762 // shape.
   5763 //
   5764 // Arguments:
   5765 //	sp_indices: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
   5766 // SparseTensor, in canonical ordering.
   5767 //	sp_values: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
   5768 //	sp_shape: 1-D.  Shape of the input SparseTensor.
   5769 //
   5770 // Returns 1-D.  The `NNZ` values for the result `SparseTensor`.
   5771 func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output) {
   5772 	if scope.Err() != nil {
   5773 		return
   5774 	}
   5775 	opspec := tf.OpSpec{
   5776 		Type: "SparseSoftmax",
   5777 		Input: []tf.Input{
   5778 			sp_indices, sp_values, sp_shape,
   5779 		},
   5780 	}
   5781 	op := scope.AddOperation(opspec)
   5782 	return op.Output(0)
   5783 }
   5784 
   5785 // RandomPoissonAttr is an optional argument to RandomPoisson.
   5786 type RandomPoissonAttr func(optionalAttr)
   5787 
   5788 // RandomPoissonSeed sets the optional seed attribute to value.
   5789 // If not specified, defaults to 0
   5790 func RandomPoissonSeed(value int64) RandomPoissonAttr {
   5791 	return func(m optionalAttr) {
   5792 		m["seed"] = value
   5793 	}
   5794 }
   5795 
   5796 // RandomPoissonSeed2 sets the optional seed2 attribute to value.
   5797 // If not specified, defaults to 0
   5798 func RandomPoissonSeed2(value int64) RandomPoissonAttr {
   5799 	return func(m optionalAttr) {
   5800 		m["seed2"] = value
   5801 	}
   5802 }
   5803 
   5804 // Use RandomPoissonV2 instead.
   5805 //
   5806 // DEPRECATED at GraphDef version 25: Replaced by RandomPoissonV2
   5807 func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output) {
   5808 	if scope.Err() != nil {
   5809 		return
   5810 	}
   5811 	attrs := map[string]interface{}{}
   5812 	for _, a := range optional {
   5813 		a(attrs)
   5814 	}
   5815 	opspec := tf.OpSpec{
   5816 		Type: "RandomPoisson",
   5817 		Input: []tf.Input{
   5818 			shape, rate,
   5819 		},
   5820 		Attrs: attrs,
   5821 	}
   5822 	op := scope.AddOperation(opspec)
   5823 	return op.Output(0)
   5824 }
   5825 
   5826 // MaxPoolGradV2Attr is an optional argument to MaxPoolGradV2.
   5827 type MaxPoolGradV2Attr func(optionalAttr)
   5828 
   5829 // MaxPoolGradV2DataFormat sets the optional data_format attribute to value.
   5830 //
   5831 // value: Specify the data format of the input and output data. With the
   5832 // default format "NHWC", the data is stored in the order of:
   5833 //     [batch, in_height, in_width, in_channels].
   5834 // Alternatively, the format could be "NCHW", the data storage order of:
   5835 //     [batch, in_channels, in_height, in_width].
   5836 // If not specified, defaults to "NHWC"
   5837 func MaxPoolGradV2DataFormat(value string) MaxPoolGradV2Attr {
   5838 	return func(m optionalAttr) {
   5839 		m["data_format"] = value
   5840 	}
   5841 }
   5842 
   5843 // Computes gradients of the maxpooling function.
   5844 //
   5845 // Arguments:
   5846 //	orig_input: The original input tensor.
   5847 //	orig_output: The original output tensor.
   5848 //	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
   5849 //	ksize: The size of the window for each dimension of the input tensor.
   5850 //	strides: The stride of the sliding window for each dimension of the
   5851 // input tensor.
   5852 //	padding: The type of padding algorithm to use.
   5853 //
   5854 // Returns Gradients w.r.t. the input to `max_pool`.
   5855 func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradV2Attr) (output tf.Output) {
   5856 	if scope.Err() != nil {
   5857 		return
   5858 	}
   5859 	attrs := map[string]interface{}{"padding": padding}
   5860 	for _, a := range optional {
   5861 		a(attrs)
   5862 	}
   5863 	opspec := tf.OpSpec{
   5864 		Type: "MaxPoolGradV2",
   5865 		Input: []tf.Input{
   5866 			orig_input, orig_output, grad, ksize, strides,
   5867 		},
   5868 		Attrs: attrs,
   5869 	}
   5870 	op := scope.AddOperation(opspec)
   5871 	return op.Output(0)
   5872 }
   5873 
   5874 // Restore a reader to a previously saved state.
   5875 //
   5876 // Not all Readers support being restored, so this can produce an
   5877 // Unimplemented error.
   5878 //
   5879 // Arguments:
   5880 //	reader_handle: Handle to a Reader.
   5881 //	state: Result of a ReaderSerializeState of a Reader with type
   5882 // matching reader_handle.
   5883 //
   5884 // Returns the created operation.
   5885 func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation) {
   5886 	if scope.Err() != nil {
   5887 		return
   5888 	}
   5889 	opspec := tf.OpSpec{
   5890 		Type: "ReaderRestoreStateV2",
   5891 		Input: []tf.Input{
   5892 			reader_handle, state,
   5893 		},
   5894 	}
   5895 	return scope.AddOperation(opspec)
   5896 }
   5897 
   5898 // ResourceSparseApplyFtrlV2Attr is an optional argument to ResourceSparseApplyFtrlV2.
   5899 type ResourceSparseApplyFtrlV2Attr func(optionalAttr)
   5900 
   5901 // ResourceSparseApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
   5902 //
   5903 // value: If `True`, updating of the var and accum tensors will be protected
   5904 // by a lock; otherwise the behavior is undefined, but may exhibit less
   5905 // contention.
   5906 // If not specified, defaults to false
   5907 func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2Attr {
   5908 	return func(m optionalAttr) {
   5909 		m["use_locking"] = value
   5910 	}
   5911 }
   5912 
   5913 // Update relevant entries in '*var' according to the Ftrl-proximal scheme.
   5914 //
   5915 // That is for rows we have grad for, we update var, accum and linear as follows:
   5916 // grad_with_shrinkage = grad + 2 * l2_shrinkage * var
   5917 // accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
   5918 // linear += grad_with_shrinkage +
   5919 //     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
   5920 // quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
   5921 // var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
   5922 // accum = accum_new
   5923 //
   5924 // Arguments:
   5925 //	var_: Should be from a Variable().
   5926 //	accum: Should be from a Variable().
   5927 //	linear: Should be from a Variable().
   5928 //	grad: The gradient.
   5929 //	indices: A vector of indices into the first dimension of var and accum.
   5930 //	lr: Scaling factor. Must be a scalar.
   5931 //	l1: L1 regularization. Must be a scalar.
   5932 //	l2: L2 shrinkage regulariation. Must be a scalar.
   5933 //
   5934 //	lr_power: Scaling factor. Must be a scalar.
   5935 //
   5936 // Returns the created operation.
   5937 func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlV2Attr) (o *tf.Operation) {
   5938 	if scope.Err() != nil {
   5939 		return
   5940 	}
   5941 	attrs := map[string]interface{}{}
   5942 	for _, a := range optional {
   5943 		a(attrs)
   5944 	}
   5945 	opspec := tf.OpSpec{
   5946 		Type: "ResourceSparseApplyFtrlV2",
   5947 		Input: []tf.Input{
   5948 			var_, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power,
   5949 		},
   5950 		Attrs: attrs,
   5951 	}
   5952 	return scope.AddOperation(opspec)
   5953 }
   5954 
   5955 // Associates the given iterator with the given statistics aggregator.
   5956 //
   5957 // Returns the created operation.
   5958 func IteratorSetStatsAggregator(scope *Scope, iterator_handle tf.Output, stats_aggregator_handle tf.Output) (o *tf.Operation) {
   5959 	if scope.Err() != nil {
   5960 		return
   5961 	}
   5962 	opspec := tf.OpSpec{
   5963 		Type: "IteratorSetStatsAggregator",
   5964 		Input: []tf.Input{
   5965 			iterator_handle, stats_aggregator_handle,
   5966 		},
   5967 	}
   5968 	return scope.AddOperation(opspec)
   5969 }
   5970 
   5971 // Returns element-wise smallest integer in not less than x.
   5972 func Ceil(scope *Scope, x tf.Output) (y tf.Output) {
   5973 	if scope.Err() != nil {
   5974 		return
   5975 	}
   5976 	opspec := tf.OpSpec{
   5977 		Type: "Ceil",
   5978 		Input: []tf.Input{
   5979 			x,
   5980 		},
   5981 	}
   5982 	op := scope.AddOperation(opspec)
   5983 	return op.Output(0)
   5984 }
   5985 
   5986 // Computes the number of elements in the given table.
   5987 //
   5988 // Arguments:
   5989 //	table_handle: Handle to the table.
   5990 //
   5991 // Returns Scalar that contains number of elements in the table.
   5992 func LookupTableSizeV2(scope *Scope, table_handle tf.Output) (size tf.Output) {
   5993 	if scope.Err() != nil {
   5994 		return
   5995 	}
   5996 	opspec := tf.OpSpec{
   5997 		Type: "LookupTableSizeV2",
   5998 		Input: []tf.Input{
   5999 			table_handle,
   6000 		},
   6001 	}
   6002 	op := scope.AddOperation(opspec)
   6003 	return op.Output(0)
   6004 }
   6005 
   6006 // ResizeBilinearGradAttr is an optional argument to ResizeBilinearGrad.
   6007 type ResizeBilinearGradAttr func(optionalAttr)
   6008 
   6009 // ResizeBilinearGradAlignCorners sets the optional align_corners attribute to value.
   6010 //
   6011 // value: If true, rescale grads by (orig_height - 1) / (height - 1), which
   6012 // exactly aligns the 4 corners of grads and original_image. If false, rescale by
   6013 // orig_height / height. Treat similarly the width dimension.
   6014 // If not specified, defaults to false
   6015 func ResizeBilinearGradAlignCorners(value bool) ResizeBilinearGradAttr {
   6016 	return func(m optionalAttr) {
   6017 		m["align_corners"] = value
   6018 	}
   6019 }
   6020 
   6021 // Computes the gradient of bilinear interpolation.
   6022 //
   6023 // Arguments:
   6024 //	grads: 4-D with shape `[batch, height, width, channels]`.
   6025 //	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
   6026 // The image tensor that was resized.
   6027 //
   6028 // Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
   6029 // Gradients with respect to the input image. Input image must have been
   6030 // float or double.
   6031 func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBilinearGradAttr) (output tf.Output) {
   6032 	if scope.Err() != nil {
   6033 		return
   6034 	}
   6035 	attrs := map[string]interface{}{}
   6036 	for _, a := range optional {
   6037 		a(attrs)
   6038 	}
   6039 	opspec := tf.OpSpec{
   6040 		Type: "ResizeBilinearGrad",
   6041 		Input: []tf.Input{
   6042 			grads, original_image,
   6043 		},
   6044 		Attrs: attrs,
   6045 	}
   6046 	op := scope.AddOperation(opspec)
   6047 	return op.Output(0)
   6048 }
   6049 
   6050 // Computes the sum along sparse segments of a tensor divided by the sqrt of N.
   6051 //
   6052 // N is the size of the segment being reduced.
   6053 //
   6054 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   6055 // segments.
   6056 //
   6057 // Arguments:
   6058 //
   6059 //	indices: A 1-D tensor. Has same rank as `segment_ids`.
   6060 //	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
   6061 //
   6062 // Returns Has same shape as data, except for dimension 0 which
   6063 // has size `k`, the number of segments.
   6064 func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
   6065 	if scope.Err() != nil {
   6066 		return
   6067 	}
   6068 	opspec := tf.OpSpec{
   6069 		Type: "SparseSegmentSqrtN",
   6070 		Input: []tf.Input{
   6071 			data, indices, segment_ids,
   6072 		},
   6073 	}
   6074 	op := scope.AddOperation(opspec)
   6075 	return op.Output(0)
   6076 }
   6077 
   6078 // StatelessTruncatedNormalAttr is an optional argument to StatelessTruncatedNormal.
   6079 type StatelessTruncatedNormalAttr func(optionalAttr)
   6080 
   6081 // StatelessTruncatedNormalDtype sets the optional dtype attribute to value.
   6082 //
   6083 // value: The type of the output.
   6084 // If not specified, defaults to DT_FLOAT
   6085 func StatelessTruncatedNormalDtype(value tf.DataType) StatelessTruncatedNormalAttr {
   6086 	return func(m optionalAttr) {
   6087 		m["dtype"] = value
   6088 	}
   6089 }
   6090 
   6091 // Outputs deterministic pseudorandom values from a truncated normal distribution.
   6092 //
   6093 // The generated values follow a normal distribution with mean 0 and standard
   6094 // deviation 1, except that values whose magnitude is more than 2 standard
   6095 // deviations from the mean are dropped and re-picked.
   6096 //
   6097 // The outputs are a deterministic function of `shape` and `seed`.
   6098 //
   6099 // Arguments:
   6100 //	shape: The shape of the output tensor.
   6101 //	seed: 2 seeds (shape [2]).
   6102 //
   6103 // Returns Random values with specified shape.
   6104 func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessTruncatedNormalAttr) (output tf.Output) {
   6105 	if scope.Err() != nil {
   6106 		return
   6107 	}
   6108 	attrs := map[string]interface{}{}
   6109 	for _, a := range optional {
   6110 		a(attrs)
   6111 	}
   6112 	opspec := tf.OpSpec{
   6113 		Type: "StatelessTruncatedNormal",
   6114 		Input: []tf.Input{
   6115 			shape, seed,
   6116 		},
   6117 		Attrs: attrs,
   6118 	}
   6119 	op := scope.AddOperation(opspec)
   6120 	return op.Output(0)
   6121 }
   6122 
   6123 // RestoreSliceAttr is an optional argument to RestoreSlice.
   6124 type RestoreSliceAttr func(optionalAttr)
   6125 
   6126 // RestoreSlicePreferredShard sets the optional preferred_shard attribute to value.
   6127 //
   6128 // value: Index of file to open first if multiple files match
   6129 // `file_pattern`. See the documentation for `Restore`.
   6130 // If not specified, defaults to -1
   6131 func RestoreSlicePreferredShard(value int64) RestoreSliceAttr {
   6132 	return func(m optionalAttr) {
   6133 		m["preferred_shard"] = value
   6134 	}
   6135 }
   6136 
   6137 // Restores a tensor from checkpoint files.
   6138 //
   6139 // This is like `Restore` except that restored tensor can be listed as filling
   6140 // only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
   6141 // larger tensor and the slice that the restored tensor covers.
   6142 //
   6143 // The `shape_and_slice` input has the same format as the
   6144 // elements of the `shapes_and_slices` input of the `SaveSlices` op.
   6145 //
   6146 // Arguments:
   6147 //	file_pattern: Must have a single element. The pattern of the files from
   6148 // which we read the tensor.
   6149 //	tensor_name: Must have a single element. The name of the tensor to be
   6150 // restored.
   6151 //	shape_and_slice: Scalar. The shapes and slice specifications to use when
   6152 // restoring a tensors.
   6153 //	dt: The type of the tensor to be restored.
   6154 //
   6155 // Returns The restored tensor.
   6156 func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, shape_and_slice tf.Output, dt tf.DataType, optional ...RestoreSliceAttr) (tensor tf.Output) {
   6157 	if scope.Err() != nil {
   6158 		return
   6159 	}
   6160 	attrs := map[string]interface{}{"dt": dt}
   6161 	for _, a := range optional {
   6162 		a(attrs)
   6163 	}
   6164 	opspec := tf.OpSpec{
   6165 		Type: "RestoreSlice",
   6166 		Input: []tf.Input{
   6167 			file_pattern, tensor_name, shape_and_slice,
   6168 		},
   6169 		Attrs: attrs,
   6170 	}
   6171 	op := scope.AddOperation(opspec)
   6172 	return op.Output(0)
   6173 }
   6174 
   6175 // UniqueWithCountsAttr is an optional argument to UniqueWithCounts.
   6176 type UniqueWithCountsAttr func(optionalAttr)
   6177 
   6178 // UniqueWithCountsOutIdx sets the optional out_idx attribute to value.
   6179 // If not specified, defaults to DT_INT32
   6180 func UniqueWithCountsOutIdx(value tf.DataType) UniqueWithCountsAttr {
   6181 	return func(m optionalAttr) {
   6182 		m["out_idx"] = value
   6183 	}
   6184 }
   6185 
   6186 // Finds unique elements in a 1-D tensor.
   6187 //
   6188 // This operation returns a tensor `y` containing all of the unique elements of `x`
   6189 // sorted in the same order that they occur in `x`. This operation also returns a
   6190 // tensor `idx` the same size as `x` that contains the index of each value of `x`
   6191 // in the unique output `y`. Finally, it returns a third tensor `count` that
   6192 // contains the count of each element of `y` in `x`. In other words:
   6193 //
   6194 // `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
   6195 //
   6196 // For example:
   6197 //
   6198 // ```
   6199 // # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
   6200 // y, idx, count = unique_with_counts(x)
   6201 // y ==> [1, 2, 4, 7, 8]
   6202 // idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
   6203 // count ==> [2, 1, 3, 1, 2]
   6204 // ```
   6205 //
   6206 // Arguments:
   6207 //	x: 1-D.
   6208 //
   6209 // Returns 1-D.1-D.1-D.
   6210 func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output) {
   6211 	if scope.Err() != nil {
   6212 		return
   6213 	}
   6214 	attrs := map[string]interface{}{}
   6215 	for _, a := range optional {
   6216 		a(attrs)
   6217 	}
   6218 	opspec := tf.OpSpec{
   6219 		Type: "UniqueWithCounts",
   6220 		Input: []tf.Input{
   6221 			x,
   6222 		},
   6223 		Attrs: attrs,
   6224 	}
   6225 	op := scope.AddOperation(opspec)
   6226 	return op.Output(0), op.Output(1), op.Output(2)
   6227 }
   6228 
   6229 // StatelessRandomNormalAttr is an optional argument to StatelessRandomNormal.
   6230 type StatelessRandomNormalAttr func(optionalAttr)
   6231 
   6232 // StatelessRandomNormalDtype sets the optional dtype attribute to value.
   6233 //
   6234 // value: The type of the output.
   6235 // If not specified, defaults to DT_FLOAT
   6236 func StatelessRandomNormalDtype(value tf.DataType) StatelessRandomNormalAttr {
   6237 	return func(m optionalAttr) {
   6238 		m["dtype"] = value
   6239 	}
   6240 }
   6241 
   6242 // Outputs deterministic pseudorandom values from a normal distribution.
   6243 //
   6244 // The generated values will have mean 0 and standard deviation 1.
   6245 //
   6246 // The outputs are a deterministic function of `shape` and `seed`.
   6247 //
   6248 // Arguments:
   6249 //	shape: The shape of the output tensor.
   6250 //	seed: 2 seeds (shape [2]).
   6251 //
   6252 // Returns Random values with specified shape.
   6253 func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomNormalAttr) (output tf.Output) {
   6254 	if scope.Err() != nil {
   6255 		return
   6256 	}
   6257 	attrs := map[string]interface{}{}
   6258 	for _, a := range optional {
   6259 		a(attrs)
   6260 	}
   6261 	opspec := tf.OpSpec{
   6262 		Type: "StatelessRandomNormal",
   6263 		Input: []tf.Input{
   6264 			shape, seed,
   6265 		},
   6266 		Attrs: attrs,
   6267 	}
   6268 	op := scope.AddOperation(opspec)
   6269 	return op.Output(0)
   6270 }
   6271 
   6272 // Reshapes a quantized tensor as per the Reshape op.
   6273 //
   6274 // ```
   6275 //
   6276 // Arguments:
   6277 //
   6278 //	shape: Defines the shape of the output tensor.
   6279 //	input_min: The minimum value of the input.
   6280 //	input_max: The maximum value of the input.
   6281 //
   6282 // Returns This value is copied from input_min.This value is copied from input_max.
   6283 func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
   6284 	if scope.Err() != nil {
   6285 		return
   6286 	}
   6287 	opspec := tf.OpSpec{
   6288 		Type: "QuantizedReshape",
   6289 		Input: []tf.Input{
   6290 			tensor, shape, input_min, input_max,
   6291 		},
   6292 	}
   6293 	op := scope.AddOperation(opspec)
   6294 	return op.Output(0), op.Output(1), op.Output(2)
   6295 }
   6296 
   6297 // GatherAttr is an optional argument to Gather.
   6298 type GatherAttr func(optionalAttr)
   6299 
   6300 // GatherValidateIndices sets the optional validate_indices attribute to value.
   6301 // If not specified, defaults to true
   6302 func GatherValidateIndices(value bool) GatherAttr {
   6303 	return func(m optionalAttr) {
   6304 		m["validate_indices"] = value
   6305 	}
   6306 }
   6307 
   6308 // Gather slices from `params` according to `indices`.
   6309 //
   6310 // `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
   6311 // Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
   6312 //
   6313 // ```python
   6314 //     # Scalar indices
   6315 //     output[:, ..., :] = params[indices, :, ... :]
   6316 //
   6317 //     # Vector indices
   6318 //     output[i, :, ..., :] = params[indices[i], :, ... :]
   6319 //
   6320 //     # Higher rank indices
   6321 //     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
   6322 // ```
   6323 //
   6324 // If `indices` is a permutation and `len(indices) == params.shape[0]` then
   6325 // this operation will permute `params` accordingly.
   6326 //
   6327 // `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
   6328 // `indices` are always validated to be within range. If assigned to GPU,
   6329 // out-of-bound indices result in safe but unspecified behavior, which may include
   6330 // raising an error.
   6331 //
   6332 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   6333 // <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
   6334 // </div>
   6335 func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output) {
   6336 	if scope.Err() != nil {
   6337 		return
   6338 	}
   6339 	attrs := map[string]interface{}{}
   6340 	for _, a := range optional {
   6341 		a(attrs)
   6342 	}
   6343 	opspec := tf.OpSpec{
   6344 		Type: "Gather",
   6345 		Input: []tf.Input{
   6346 			params, indices,
   6347 		},
   6348 		Attrs: attrs,
   6349 	}
   6350 	op := scope.AddOperation(opspec)
   6351 	return op.Output(0)
   6352 }
   6353 
   6354 // Returns the truth value of (x != y) element-wise.
   6355 //
   6356 // *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
   6357 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   6358 func NotEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   6359 	if scope.Err() != nil {
   6360 		return
   6361 	}
   6362 	opspec := tf.OpSpec{
   6363 		Type: "NotEqual",
   6364 		Input: []tf.Input{
   6365 			x, y,
   6366 		},
   6367 	}
   6368 	op := scope.AddOperation(opspec)
   6369 	return op.Output(0)
   6370 }
   6371 
   6372 // Inverse 3D real-valued fast Fourier transform.
   6373 //
   6374 // Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
   6375 // signal over the inner-most 3 dimensions of `input`.
   6376 //
   6377 // The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
   6378 // The inner-most dimension contains the `fft_length / 2 + 1` unique components of
   6379 // the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
   6380 // from the size of the inner-most 3 dimensions of `input`. If the FFT length used
   6381 // to compute `input` is odd, it should be provided since it cannot be inferred
   6382 // properly.
   6383 //
   6384 // Along each axis `IRFFT3D` is computed on, if `fft_length` (or
   6385 // `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
   6386 // corresponding dimension of `input`, the dimension is cropped. If it is larger,
   6387 // the dimension is padded with zeros.
   6388 //
   6389 // Arguments:
   6390 //	input: A complex64 tensor.
   6391 //	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
   6392 //
   6393 // Returns A float32 tensor of the same rank as `input`. The inner-most 3
   6394 //   dimensions of `input` are replaced with the `fft_length` samples of their
   6395 //   inverse 3D real Fourier transform.
   6396 //
   6397 // @compatibility(numpy)
   6398 // Equivalent to np.irfftn with 3 dimensions.
   6399 // @end_compatibility
   6400 func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
   6401 	if scope.Err() != nil {
   6402 		return
   6403 	}
   6404 	opspec := tf.OpSpec{
   6405 		Type: "IRFFT3D",
   6406 		Input: []tf.Input{
   6407 			input, fft_length,
   6408 		},
   6409 	}
   6410 	op := scope.AddOperation(opspec)
   6411 	return op.Output(0)
   6412 }
   6413 
   6414 // StringSplitAttr is an optional argument to StringSplit.
   6415 type StringSplitAttr func(optionalAttr)
   6416 
   6417 // StringSplitSkipEmpty sets the optional skip_empty attribute to value.
   6418 //
   6419 // value: A `bool`. If `True`, skip the empty strings from the result.
   6420 // If not specified, defaults to true
   6421 func StringSplitSkipEmpty(value bool) StringSplitAttr {
   6422 	return func(m optionalAttr) {
   6423 		m["skip_empty"] = value
   6424 	}
   6425 }
   6426 
   6427 // Split elements of `input` based on `delimiter` into a `SparseTensor`.
   6428 //
   6429 // Let N be the size of source (typically N will be the batch size). Split each
   6430 // element of `input` based on `delimiter` and return a `SparseTensor`
   6431 // containing the splitted tokens. Empty tokens are ignored.
   6432 //
   6433 // `delimiter` can be empty, or a string of split characters. If `delimiter` is an
   6434 //  empty string, each element of `input` is split into individual single-byte
   6435 //  character strings, including splitting of UTF-8 multibyte sequences. Otherwise
   6436 //  every character of `delimiter` is a potential split point.
   6437 //
   6438 // For example:
   6439 //   N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
   6440 //   will be
   6441 //
   6442 //   indices = [0, 0;
   6443 //              0, 1;
   6444 //              1, 0;
   6445 //              1, 1;
   6446 //              1, 2]
   6447 //   shape = [2, 3]
   6448 //   values = ['hello', 'world', 'a', 'b', 'c']
   6449 //
   6450 // Arguments:
   6451 //	input: 1-D. Strings to split.
   6452 //	delimiter: 0-D. Delimiter characters (bytes), or empty string.
   6453 //
   6454 // Returns A dense matrix of int64 representing the indices of the sparse tensor.A vector of strings corresponding to the splited values.a length-2 vector of int64 representing the shape of the sparse
   6455 // tensor, where the first value is N and the second value is the maximum number
   6456 // of tokens in a single input entry.
   6457 func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) {
   6458 	if scope.Err() != nil {
   6459 		return
   6460 	}
   6461 	attrs := map[string]interface{}{}
   6462 	for _, a := range optional {
   6463 		a(attrs)
   6464 	}
   6465 	opspec := tf.OpSpec{
   6466 		Type: "StringSplit",
   6467 		Input: []tf.Input{
   6468 			input, delimiter,
   6469 		},
   6470 		Attrs: attrs,
   6471 	}
   6472 	op := scope.AddOperation(opspec)
   6473 	return op.Output(0), op.Output(1), op.Output(2)
   6474 }
   6475 
   6476 // WriteAudioSummaryAttr is an optional argument to WriteAudioSummary.
   6477 type WriteAudioSummaryAttr func(optionalAttr)
   6478 
   6479 // WriteAudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
   6480 //
   6481 // value: Max number of batch elements to generate audio for.
   6482 // If not specified, defaults to 3
   6483 //
   6484 // REQUIRES: value >= 1
   6485 func WriteAudioSummaryMaxOutputs(value int64) WriteAudioSummaryAttr {
   6486 	return func(m optionalAttr) {
   6487 		m["max_outputs"] = value
   6488 	}
   6489 }
   6490 
   6491 // Writes a `Summary` protocol buffer with audio.
   6492 //
   6493 // The summary has up to `max_outputs` summary values containing audio. The
   6494 // audio is built from `tensor` which must be 3-D with shape `[batch_size,
   6495 // frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
   6496 // assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
   6497 //
   6498 // The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
   6499 // build the `tag` of the summary values:
   6500 //
   6501 // *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
   6502 // *  If `max_outputs` is greater than 1, the summary value tags are
   6503 //    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
   6504 //
   6505 // Arguments:
   6506 //	writer: A handle to a summary writer.
   6507 //	step: The step to write the summary for.
   6508 //	tag: Scalar. Used to build the `tag` attribute of the summary values.
   6509 //	tensor: 2-D of shape `[batch_size, frames]`.
   6510 //	sample_rate: The sample rate of the signal in hertz.
   6511 //
   6512 // Returns the created operation.
   6513 func WriteAudioSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...WriteAudioSummaryAttr) (o *tf.Operation) {
   6514 	if scope.Err() != nil {
   6515 		return
   6516 	}
   6517 	attrs := map[string]interface{}{}
   6518 	for _, a := range optional {
   6519 		a(attrs)
   6520 	}
   6521 	opspec := tf.OpSpec{
   6522 		Type: "WriteAudioSummary",
   6523 		Input: []tf.Input{
   6524 			writer, step, tag, tensor, sample_rate,
   6525 		},
   6526 		Attrs: attrs,
   6527 	}
   6528 	return scope.AddOperation(opspec)
   6529 }
   6530 
   6531 // ProdAttr is an optional argument to Prod.
   6532 type ProdAttr func(optionalAttr)
   6533 
   6534 // ProdKeepDims sets the optional keep_dims attribute to value.
   6535 //
   6536 // value: If true, retain reduced dimensions with length 1.
   6537 // If not specified, defaults to false
   6538 func ProdKeepDims(value bool) ProdAttr {
   6539 	return func(m optionalAttr) {
   6540 		m["keep_dims"] = value
   6541 	}
   6542 }
   6543 
   6544 // Computes the product of elements across dimensions of a tensor.
   6545 //
   6546 // Reduces `input` along the dimensions given in `axis`. Unless
   6547 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   6548 // `axis`. If `keep_dims` is true, the reduced dimensions are
   6549 // retained with length 1.
   6550 //
   6551 // Arguments:
   6552 //	input: The tensor to reduce.
   6553 //	axis: The dimensions to reduce. Must be in the range
   6554 // `[-rank(input), rank(input))`.
   6555 //
   6556 // Returns The reduced tensor.
   6557 func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (output tf.Output) {
   6558 	if scope.Err() != nil {
   6559 		return
   6560 	}
   6561 	attrs := map[string]interface{}{}
   6562 	for _, a := range optional {
   6563 		a(attrs)
   6564 	}
   6565 	opspec := tf.OpSpec{
   6566 		Type: "Prod",
   6567 		Input: []tf.Input{
   6568 			input, axis,
   6569 		},
   6570 		Attrs: attrs,
   6571 	}
   6572 	op := scope.AddOperation(opspec)
   6573 	return op.Output(0)
   6574 }
   6575 
   6576 // ResizeBilinearAttr is an optional argument to ResizeBilinear.
   6577 type ResizeBilinearAttr func(optionalAttr)
   6578 
   6579 // ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
   6580 //
   6581 // value: If true, rescale input by (new_height - 1) / (height - 1), which
   6582 // exactly aligns the 4 corners of images and resized images. If false, rescale
   6583 // by new_height / height. Treat similarly the width dimension.
   6584 // If not specified, defaults to false
   6585 func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
   6586 	return func(m optionalAttr) {
   6587 		m["align_corners"] = value
   6588 	}
   6589 }
   6590 
   6591 // Resize `images` to `size` using bilinear interpolation.
   6592 //
   6593 // Input images can be of different types but output images are always float.
   6594 //
   6595 // Arguments:
   6596 //	images: 4-D with shape `[batch, height, width, channels]`.
   6597 //	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
   6598 // new size for the images.
   6599 //
   6600 // Returns 4-D with shape
   6601 // `[batch, new_height, new_width, channels]`.
   6602 func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
   6603 	if scope.Err() != nil {
   6604 		return
   6605 	}
   6606 	attrs := map[string]interface{}{}
   6607 	for _, a := range optional {
   6608 		a(attrs)
   6609 	}
   6610 	opspec := tf.OpSpec{
   6611 		Type: "ResizeBilinear",
   6612 		Input: []tf.Input{
   6613 			images, size,
   6614 		},
   6615 		Attrs: attrs,
   6616 	}
   6617 	op := scope.AddOperation(opspec)
   6618 	return op.Output(0)
   6619 }
   6620 
   6621 // Computes softsign: `features / (abs(features) + 1)`.
   6622 func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
   6623 	if scope.Err() != nil {
   6624 		return
   6625 	}
   6626 	opspec := tf.OpSpec{
   6627 		Type: "Softsign",
   6628 		Input: []tf.Input{
   6629 			features,
   6630 		},
   6631 	}
   6632 	op := scope.AddOperation(opspec)
   6633 	return op.Output(0)
   6634 }
   6635 
   6636 // GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
   6637 type GenerateVocabRemappingAttr func(optionalAttr)
   6638 
   6639 // GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
   6640 //
   6641 // value: Number of entries in the old vocab file to consider.  If -1,
   6642 // use the entire old vocabulary.
   6643 // If not specified, defaults to -1
   6644 //
   6645 // REQUIRES: value >= -1
   6646 func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr {
   6647 	return func(m optionalAttr) {
   6648 		m["old_vocab_size"] = value
   6649 	}
   6650 }
   6651 
   6652 // Given a path to new and old vocabulary files, returns a remapping Tensor of
   6653 //
   6654 // length `num_new_vocab`, where `remapping[i]` contains the row number in the old
   6655 // vocabulary that corresponds to row `i` in the new vocabulary (starting at line
   6656 // `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
   6657 // in the new vocabulary is not in the old vocabulary.  The old vocabulary is
   6658 // constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
   6659 // default value of -1.
   6660 //
   6661 // `num_vocab_offset` enables
   6662 // use in the partitioned variable case, and should generally be set through
   6663 // examining partitioning info.  The format of the files should be a text file,
   6664 // with each line containing a single entity within the vocabulary.
   6665 //
   6666 // For example, with `new_vocab_file` a text file containing each of the following
   6667 // elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
   6668 // `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
   6669 // `[0, -1, 2]`.
   6670 //
   6671 // The op also returns a count of how many entries in the new vocabulary
   6672 // were present in the old vocabulary, which is used to calculate the number of
   6673 // values to initialize in a weight matrix remapping
   6674 //
   6675 // This functionality can be used to remap both row vocabularies (typically,
   6676 // features) and column vocabularies (typically, classes) from TensorFlow
   6677 // checkpoints.  Note that the partitioning logic relies on contiguous vocabularies
   6678 // corresponding to div-partitioned variables.  Moreover, the underlying remapping
   6679 // uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
   6680 // use the corresponding index_table_from_file() as the FeatureColumn framework
   6681 // does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
   6682 //
   6683 // Arguments:
   6684 //	new_vocab_file: Path to the new vocab file.
   6685 //	old_vocab_file: Path to the old vocab file.
   6686 //	new_vocab_offset: How many entries into the new vocab file to start reading.
   6687 //	num_new_vocab: Number of entries in the new vocab file to remap.
   6688 //
   6689 // Returns A Tensor of length num_new_vocab where the element at index i
   6690 // is equal to the old ID that maps to the new ID i.  This element is -1 for any
   6691 // new ID that is not found in the old vocabulary.Number of new vocab entries found in old vocab.
   6692 func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output) {
   6693 	if scope.Err() != nil {
   6694 		return
   6695 	}
   6696 	attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
   6697 	for _, a := range optional {
   6698 		a(attrs)
   6699 	}
   6700 	opspec := tf.OpSpec{
   6701 		Type: "GenerateVocabRemapping",
   6702 		Input: []tf.Input{
   6703 			new_vocab_file, old_vocab_file,
   6704 		},
   6705 		Attrs: attrs,
   6706 	}
   6707 	op := scope.AddOperation(opspec)
   6708 	return op.Output(0), op.Output(1)
   6709 }
   6710 
   6711 // Assigns sparse updates to the variable referenced by `resource`.
   6712 //
   6713 // This operation computes
   6714 //
   6715 //     # Scalar indices
   6716 //     ref[indices, ...] = updates[...]
   6717 //
   6718 //     # Vector indices (for each i)
   6719 //     ref[indices[i], ...] = updates[i, ...]
   6720 //
   6721 //     # High rank indices (for each i, ..., j)
   6722 //     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
   6723 //
   6724 // Arguments:
   6725 //	resource: Should be from a `Variable` node.
   6726 //	indices: A tensor of indices into the first dimension of `ref`.
   6727 //	updates: A tensor of updated values to add to `ref`.
   6728 //
   6729 // Returns the created operation.
   6730 func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
   6731 	if scope.Err() != nil {
   6732 		return
   6733 	}
   6734 	opspec := tf.OpSpec{
   6735 		Type: "ResourceScatterUpdate",
   6736 		Input: []tf.Input{
   6737 			resource, indices, updates,
   6738 		},
   6739 	}
   6740 	return scope.AddOperation(opspec)
   6741 }
   6742 
   6743 // CumsumAttr is an optional argument to Cumsum.
   6744 type CumsumAttr func(optionalAttr)
   6745 
   6746 // CumsumExclusive sets the optional exclusive attribute to value.
   6747 //
   6748 // value: If `True`, perform exclusive cumsum.
   6749 // If not specified, defaults to false
   6750 func CumsumExclusive(value bool) CumsumAttr {
   6751 	return func(m optionalAttr) {
   6752 		m["exclusive"] = value
   6753 	}
   6754 }
   6755 
   6756 // CumsumReverse sets the optional reverse attribute to value.
   6757 //
   6758 // value: A `bool` (default: False).
   6759 // If not specified, defaults to false
   6760 func CumsumReverse(value bool) CumsumAttr {
   6761 	return func(m optionalAttr) {
   6762 		m["reverse"] = value
   6763 	}
   6764 }
   6765 
   6766 // Compute the cumulative sum of the tensor `x` along `axis`.
   6767 //
   6768 // By default, this op performs an inclusive cumsum, which means that the first
   6769 // element of the input is identical to the first element of the output:
   6770 //
   6771 // ```python
   6772 // tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
   6773 // ```
   6774 //
   6775 // By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
   6776 // performed instead:
   6777 //
   6778 // ```python
   6779 // tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
   6780 // ```
   6781 //
   6782 // By setting the `reverse` kwarg to `True`, the cumsum is performed in the
   6783 // opposite direction:
   6784 //
   6785 // ```python
   6786 // tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
   6787 // ```
   6788 //
   6789 // This is more efficient than using separate `tf.reverse` ops.
   6790 //
   6791 // The `reverse` and `exclusive` kwargs can also be combined:
   6792 //
   6793 // ```python
   6794 // tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
   6795 // ```
   6796 //
   6797 // Arguments:
   6798 //	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
   6799 // `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
   6800 // `complex128`, `qint8`, `quint8`, `qint32`, `half`.
   6801 //	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
   6802 // `[-rank(x), rank(x))`.
   6803 func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) {
   6804 	if scope.Err() != nil {
   6805 		return
   6806 	}
   6807 	attrs := map[string]interface{}{}
   6808 	for _, a := range optional {
   6809 		a(attrs)
   6810 	}
   6811 	opspec := tf.OpSpec{
   6812 		Type: "Cumsum",
   6813 		Input: []tf.Input{
   6814 			x, axis,
   6815 		},
   6816 		Attrs: attrs,
   6817 	}
   6818 	op := scope.AddOperation(opspec)
   6819 	return op.Output(0)
   6820 }
   6821 
   6822 // QuantizedRelu6Attr is an optional argument to QuantizedRelu6.
   6823 type QuantizedRelu6Attr func(optionalAttr)
   6824 
   6825 // QuantizedRelu6OutType sets the optional out_type attribute to value.
   6826 // If not specified, defaults to DT_QUINT8
   6827 func QuantizedRelu6OutType(value tf.DataType) QuantizedRelu6Attr {
   6828 	return func(m optionalAttr) {
   6829 		m["out_type"] = value
   6830 	}
   6831 }
   6832 
   6833 // Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
   6834 //
   6835 // Arguments:
   6836 //
   6837 //	min_features: The float value that the lowest quantized value represents.
   6838 //	max_features: The float value that the highest quantized value represents.
   6839 //
   6840 // Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
   6841 func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedRelu6Attr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
   6842 	if scope.Err() != nil {
   6843 		return
   6844 	}
   6845 	attrs := map[string]interface{}{}
   6846 	for _, a := range optional {
   6847 		a(attrs)
   6848 	}
   6849 	opspec := tf.OpSpec{
   6850 		Type: "QuantizedRelu6",
   6851 		Input: []tf.Input{
   6852 			features, min_features, max_features,
   6853 		},
   6854 		Attrs: attrs,
   6855 	}
   6856 	op := scope.AddOperation(opspec)
   6857 	return op.Output(0), op.Output(1), op.Output(2)
   6858 }
   6859 
   6860 // FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2.
   6861 type FixedLengthRecordReaderV2Attr func(optionalAttr)
   6862 
   6863 // FixedLengthRecordReaderV2HeaderBytes sets the optional header_bytes attribute to value.
   6864 //
   6865 // value: Number of bytes in the header, defaults to 0.
   6866 // If not specified, defaults to 0
   6867 func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {
   6868 	return func(m optionalAttr) {
   6869 		m["header_bytes"] = value
   6870 	}
   6871 }
   6872 
   6873 // FixedLengthRecordReaderV2FooterBytes sets the optional footer_bytes attribute to value.
   6874 //
   6875 // value: Number of bytes in the footer, defaults to 0.
   6876 // If not specified, defaults to 0
   6877 func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr {
   6878 	return func(m optionalAttr) {
   6879 		m["footer_bytes"] = value
   6880 	}
   6881 }
   6882 
   6883 // FixedLengthRecordReaderV2HopBytes sets the optional hop_bytes attribute to value.
   6884 //
   6885 // value: Number of bytes to hop before each read. Default of 0 means using
   6886 // record_bytes.
   6887 // If not specified, defaults to 0
   6888 func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr {
   6889 	return func(m optionalAttr) {
   6890 		m["hop_bytes"] = value
   6891 	}
   6892 }
   6893 
   6894 // FixedLengthRecordReaderV2Container sets the optional container attribute to value.
   6895 //
   6896 // value: If non-empty, this reader is placed in the given container.
   6897 // Otherwise, a default container is used.
   6898 // If not specified, defaults to ""
   6899 func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr {
   6900 	return func(m optionalAttr) {
   6901 		m["container"] = value
   6902 	}
   6903 }
   6904 
   6905 // FixedLengthRecordReaderV2SharedName sets the optional shared_name attribute to value.
   6906 //
   6907 // value: If non-empty, this reader is named in the given bucket
   6908 // with this shared_name. Otherwise, the node name is used instead.
   6909 // If not specified, defaults to ""
   6910 func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr {
   6911 	return func(m optionalAttr) {
   6912 		m["shared_name"] = value
   6913 	}
   6914 }
   6915 
   6916 // FixedLengthRecordReaderV2Encoding sets the optional encoding attribute to value.
   6917 //
   6918 // value: The type of encoding for the file. Currently ZLIB and GZIP
   6919 // are supported. Defaults to none.
   6920 // If not specified, defaults to ""
   6921 func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr {
   6922 	return func(m optionalAttr) {
   6923 		m["encoding"] = value
   6924 	}
   6925 }
   6926 
   6927 // A Reader that outputs fixed-length records from a file.
   6928 //
   6929 // Arguments:
   6930 //	record_bytes: Number of bytes in the record.
   6931 //
   6932 // Returns The handle to reference the Reader.
   6933 func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output) {
   6934 	if scope.Err() != nil {
   6935 		return
   6936 	}
   6937 	attrs := map[string]interface{}{"record_bytes": record_bytes}
   6938 	for _, a := range optional {
   6939 		a(attrs)
   6940 	}
   6941 	opspec := tf.OpSpec{
   6942 		Type: "FixedLengthRecordReaderV2",
   6943 
   6944 		Attrs: attrs,
   6945 	}
   6946 	op := scope.AddOperation(opspec)
   6947 	return op.Output(0)
   6948 }
   6949 
   6950 // The gradient operator for the SparseAdd op.
   6951 //
   6952 // The SparseAdd op calculates A + B, where A, B, and the sum are all represented
   6953 // as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
   6954 // non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
   6955 // values of A and B.
   6956 //
   6957 // Arguments:
   6958 //	backprop_val_grad: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
   6959 // the non-empty values of the sum.
   6960 //	a_indices: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
   6961 //	b_indices: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
   6962 //	sum_indices: 2-D.  The `indices` of the sum `SparseTensor`, size
   6963 // `[nnz(sum), ndims]`.
   6964 //
   6965 // Returns 1-D with shape `[nnz(A)]`. The gradient with respect to the
   6966 // non-empty values of A.1-D with shape `[nnz(B)]`. The gradient with respect to the
   6967 // non-empty values of B.
   6968 func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, b_indices tf.Output, sum_indices tf.Output) (a_val_grad tf.Output, b_val_grad tf.Output) {
   6969 	if scope.Err() != nil {
   6970 		return
   6971 	}
   6972 	opspec := tf.OpSpec{
   6973 		Type: "SparseAddGrad",
   6974 		Input: []tf.Input{
   6975 			backprop_val_grad, a_indices, b_indices, sum_indices,
   6976 		},
   6977 	}
   6978 	op := scope.AddOperation(opspec)
   6979 	return op.Output(0), op.Output(1)
   6980 }
   6981 
   6982 // Computes atan of x element-wise.
   6983 func Atan(scope *Scope, x tf.Output) (y tf.Output) {
   6984 	if scope.Err() != nil {
   6985 		return
   6986 	}
   6987 	opspec := tf.OpSpec{
   6988 		Type: "Atan",
   6989 		Input: []tf.Input{
   6990 			x,
   6991 		},
   6992 	}
   6993 	op := scope.AddOperation(opspec)
   6994 	return op.Output(0)
   6995 }
   6996 
   6997 // Encode audio data using the WAV file format.
   6998 //
   6999 // This operation will generate a string suitable to be saved out to create a .wav
   7000 // audio file. It will be encoded in the 16-bit PCM format. It takes in float
   7001 // values in the range -1.0f to 1.0f, and any outside that value will be clamped to
   7002 // that range.
   7003 //
   7004 // `audio` is a 2-D float Tensor of shape `[length, channels]`.
   7005 // `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
   7006 //
   7007 // Arguments:
   7008 //	audio: 2-D with shape `[length, channels]`.
   7009 //	sample_rate: Scalar containing the sample frequency.
   7010 //
   7011 // Returns 0-D. WAV-encoded file contents.
   7012 func EncodeWav(scope *Scope, audio tf.Output, sample_rate tf.Output) (contents tf.Output) {
   7013 	if scope.Err() != nil {
   7014 		return
   7015 	}
   7016 	opspec := tf.OpSpec{
   7017 		Type: "EncodeWav",
   7018 		Input: []tf.Input{
   7019 			audio, sample_rate,
   7020 		},
   7021 	}
   7022 	op := scope.AddOperation(opspec)
   7023 	return op.Output(0)
   7024 }
   7025 
   7026 // Converts each string in the input Tensor to its hash mod by a number of buckets.
   7027 //
   7028 // The hash function is deterministic on the content of the string within the
   7029 // process. The hash function is a keyed hash function, where attribute `key`
   7030 // defines the key of the hash function. `key` is an array of 2 elements.
   7031 //
   7032 // A strong hash is important when inputs may be malicious, e.g. URLs with
   7033 // additional components. Adversaries could try to make their inputs hash to the
   7034 // same bucket for a denial-of-service attack or to skew the results. A strong
   7035 // hash prevents this by making it difficult, if not infeasible, to compute inputs
   7036 // that hash to the same bucket. This comes at a cost of roughly 4x higher compute
   7037 // time than `tf.string_to_hash_bucket_fast`.
   7038 //
   7039 // Arguments:
   7040 //	input: The strings to assign a hash bucket.
   7041 //	num_buckets: The number of buckets.
   7042 //	key: The key for the keyed hash function passed as a list of two uint64
   7043 // elements.
   7044 //
   7045 // Returns A Tensor of the same shape as the input `string_tensor`.
   7046 func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output) {
   7047 	if scope.Err() != nil {
   7048 		return
   7049 	}
   7050 	attrs := map[string]interface{}{"num_buckets": num_buckets, "key": key}
   7051 	opspec := tf.OpSpec{
   7052 		Type: "StringToHashBucketStrong",
   7053 		Input: []tf.Input{
   7054 			input,
   7055 		},
   7056 		Attrs: attrs,
   7057 	}
   7058 	op := scope.AddOperation(opspec)
   7059 	return op.Output(0)
   7060 }
   7061 
   7062 // Generates values in an interval.
   7063 //
   7064 // A sequence of `num` evenly-spaced values are generated beginning at `start`.
   7065 // If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
   7066 // so that the last one is exactly `stop`.
   7067 //
   7068 // For example:
   7069 //
   7070 // ```
   7071 // tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
   7072 // ```
   7073 //
   7074 // Arguments:
   7075 //	start: First entry in the range.
   7076 //	stop: Last entry in the range.
   7077 //	num: Number of values to generate.
   7078 //
   7079 // Returns 1-D. The generated values.
   7080 func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output) {
   7081 	if scope.Err() != nil {
   7082 		return
   7083 	}
   7084 	opspec := tf.OpSpec{
   7085 		Type: "LinSpace",
   7086 		Input: []tf.Input{
   7087 			start, stop, num,
   7088 		},
   7089 	}
   7090 	op := scope.AddOperation(opspec)
   7091 	return op.Output(0)
   7092 }
   7093 
   7094 // DestroyResourceOpAttr is an optional argument to DestroyResourceOp.
   7095 type DestroyResourceOpAttr func(optionalAttr)
   7096 
   7097 // DestroyResourceOpIgnoreLookupError sets the optional ignore_lookup_error attribute to value.
   7098 //
   7099 // value: whether to ignore the error when the resource
   7100 // doesn't exist.
   7101 // If not specified, defaults to true
   7102 func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr {
   7103 	return func(m optionalAttr) {
   7104 		m["ignore_lookup_error"] = value
   7105 	}
   7106 }
   7107 
   7108 // Deletes the resource specified by the handle.
   7109 //
   7110 // All subsequent operations using the resource will result in a NotFound
   7111 // error status.
   7112 //
   7113 // Arguments:
   7114 //	resource: handle to the resource to delete.
   7115 //
   7116 // Returns the created operation.
   7117 func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation) {
   7118 	if scope.Err() != nil {
   7119 		return
   7120 	}
   7121 	attrs := map[string]interface{}{}
   7122 	for _, a := range optional {
   7123 		a(attrs)
   7124 	}
   7125 	opspec := tf.OpSpec{
   7126 		Type: "DestroyResourceOp",
   7127 		Input: []tf.Input{
   7128 			resource,
   7129 		},
   7130 		Attrs: attrs,
   7131 	}
   7132 	return scope.AddOperation(opspec)
   7133 }
   7134 
   7135 // CumprodAttr is an optional argument to Cumprod.
   7136 type CumprodAttr func(optionalAttr)
   7137 
   7138 // CumprodExclusive sets the optional exclusive attribute to value.
   7139 //
   7140 // value: If `True`, perform exclusive cumprod.
   7141 // If not specified, defaults to false
   7142 func CumprodExclusive(value bool) CumprodAttr {
   7143 	return func(m optionalAttr) {
   7144 		m["exclusive"] = value
   7145 	}
   7146 }
   7147 
   7148 // CumprodReverse sets the optional reverse attribute to value.
   7149 //
   7150 // value: A `bool` (default: False).
   7151 // If not specified, defaults to false
   7152 func CumprodReverse(value bool) CumprodAttr {
   7153 	return func(m optionalAttr) {
   7154 		m["reverse"] = value
   7155 	}
   7156 }
   7157 
   7158 // Compute the cumulative product of the tensor `x` along `axis`.
   7159 //
   7160 // By default, this op performs an inclusive cumprod, which means that the first
   7161 // element of the input is identical to the first element of the output:
   7162 //
   7163 // ```python
   7164 // tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
   7165 // ```
   7166 //
   7167 // By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
   7168 // performed instead:
   7169 //
   7170 // ```python
   7171 // tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
   7172 // ```
   7173 //
   7174 // By setting the `reverse` kwarg to `True`, the cumprod is performed in the
   7175 // opposite direction:
   7176 //
   7177 // ```python
   7178 // tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
   7179 // ```
   7180 //
   7181 // This is more efficient than using separate `tf.reverse` ops.
   7182 //
   7183 // The `reverse` and `exclusive` kwargs can also be combined:
   7184 //
   7185 // ```python
   7186 // tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
   7187 // ```
   7188 //
   7189 // Arguments:
   7190 //	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
   7191 // `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
   7192 // `complex128`, `qint8`, `quint8`, `qint32`, `half`.
   7193 //	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
   7194 // `[-rank(x), rank(x))`.
   7195 func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr) (out tf.Output) {
   7196 	if scope.Err() != nil {
   7197 		return
   7198 	}
   7199 	attrs := map[string]interface{}{}
   7200 	for _, a := range optional {
   7201 		a(attrs)
   7202 	}
   7203 	opspec := tf.OpSpec{
   7204 		Type: "Cumprod",
   7205 		Input: []tf.Input{
   7206 			x, axis,
   7207 		},
   7208 		Attrs: attrs,
   7209 	}
   7210 	op := scope.AddOperation(opspec)
   7211 	return op.Output(0)
   7212 }
   7213 
   7214 // Computes the mean along segments of a tensor.
   7215 //
   7216 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   7217 // segments.
   7218 //
   7219 // Computes a tensor such that
   7220 // \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
   7221 // over `j` such that `segment_ids[j] == i` and `N` is the total number of
   7222 // values summed.
   7223 //
   7224 // If the mean is empty for a given segment ID `i`, `output[i] = 0`.
   7225 //
   7226 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   7227 // <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
   7228 // </div>
   7229 //
   7230 // Arguments:
   7231 //
   7232 //	segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
   7233 // first dimension.  Values should be sorted and can be repeated.
   7234 //
   7235 // Returns Has same shape as data, except for dimension 0 which
   7236 // has size `k`, the number of segments.
   7237 func SegmentMean(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
   7238 	if scope.Err() != nil {
   7239 		return
   7240 	}
   7241 	opspec := tf.OpSpec{
   7242 		Type: "SegmentMean",
   7243 		Input: []tf.Input{
   7244 			data, segment_ids,
   7245 		},
   7246 	}
   7247 	op := scope.AddOperation(opspec)
   7248 	return op.Output(0)
   7249 }
   7250 
   7251 // ResourceSparseApplyCenteredRMSPropAttr is an optional argument to ResourceSparseApplyCenteredRMSProp.
   7252 type ResourceSparseApplyCenteredRMSPropAttr func(optionalAttr)
   7253 
   7254 // ResourceSparseApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
   7255 //
   7256 // value: If `True`, updating of the var, mg, ms, and mom tensors is
   7257 // protected by a lock; otherwise the behavior is undefined, but may exhibit less
   7258 // contention.
   7259 // If not specified, defaults to false
   7260 func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseApplyCenteredRMSPropAttr {
   7261 	return func(m optionalAttr) {
   7262 		m["use_locking"] = value
   7263 	}
   7264 }
   7265 
   7266 // Update '*var' according to the centered RMSProp algorithm.
   7267 //
   7268 // The centered RMSProp algorithm uses an estimate of the centered second moment
   7269 // (i.e., the variance) for normalization, as opposed to regular RMSProp, which
   7270 // uses the (uncentered) second moment. This often helps with training, but is
   7271 // slightly more expensive in terms of computation and memory.
   7272 //
   7273 // Note that in dense implementation of this algorithm, mg, ms, and mom will
   7274 // update even if the grad is zero, but in this sparse implementation, mg, ms,
   7275 // and mom will not update in iterations during which the grad is zero.
   7276 //
   7277 // mean_square = decay * mean_square + (1-decay) * gradient ** 2
   7278 // mean_grad = decay * mean_grad + (1-decay) * gradient
   7279 // Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
   7280 //
   7281 // ms <- rho * ms_{t-1} + (1-rho) * grad * grad
   7282 // mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
   7283 // var <- var - mom
   7284 //
   7285 // Arguments:
   7286 //	var_: Should be from a Variable().
   7287 //	mg: Should be from a Variable().
   7288 //	ms: Should be from a Variable().
   7289 //	mom: Should be from a Variable().
   7290 //	lr: Scaling factor. Must be a scalar.
   7291 //	rho: Decay rate. Must be a scalar.
   7292 //
   7293 //	epsilon: Ridge term. Must be a scalar.
   7294 //	grad: The gradient.
   7295 //	indices: A vector of indices into the first dimension of var, ms and mom.
   7296 //
   7297 // Returns the created operation.
   7298 func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation) {
   7299 	if scope.Err() != nil {
   7300 		return
   7301 	}
   7302 	attrs := map[string]interface{}{}
   7303 	for _, a := range optional {
   7304 		a(attrs)
   7305 	}
   7306 	opspec := tf.OpSpec{
   7307 		Type: "ResourceSparseApplyCenteredRMSProp",
   7308 		Input: []tf.Input{
   7309 			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices,
   7310 		},
   7311 		Attrs: attrs,
   7312 	}
   7313 	return scope.AddOperation(opspec)
   7314 }
   7315 
   7316 // Creates a dataset that batches `batch_size` elements from `input_dataset`.
   7317 //
   7318 // Arguments:
   7319 //
   7320 //	batch_size: A scalar representing the number of elements to accumulate in a
   7321 // batch.
   7322 //
   7323 //
   7324 func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   7325 	if scope.Err() != nil {
   7326 		return
   7327 	}
   7328 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   7329 	opspec := tf.OpSpec{
   7330 		Type: "BatchDataset",
   7331 		Input: []tf.Input{
   7332 			input_dataset, batch_size,
   7333 		},
   7334 		Attrs: attrs,
   7335 	}
   7336 	op := scope.AddOperation(opspec)
   7337 	return op.Output(0)
   7338 }
   7339 
   7340 // Inverse fast Fourier transform.
   7341 //
   7342 // Computes the inverse 1-dimensional discrete Fourier transform over the
   7343 // inner-most dimension of `input`.
   7344 //
   7345 // Arguments:
   7346 //	input: A complex64 tensor.
   7347 //
   7348 // Returns A complex64 tensor of the same shape as `input`. The inner-most
   7349 //   dimension of `input` is replaced with its inverse 1D Fourier transform.
   7350 //
   7351 // @compatibility(numpy)
   7352 // Equivalent to np.fft.ifft
   7353 // @end_compatibility
   7354 func IFFT(scope *Scope, input tf.Output) (output tf.Output) {
   7355 	if scope.Err() != nil {
   7356 		return
   7357 	}
   7358 	opspec := tf.OpSpec{
   7359 		Type: "IFFT",
   7360 		Input: []tf.Input{
   7361 			input,
   7362 		},
   7363 	}
   7364 	op := scope.AddOperation(opspec)
   7365 	return op.Output(0)
   7366 }
   7367 
   7368 // LRNAttr is an optional argument to LRN.
   7369 type LRNAttr func(optionalAttr)
   7370 
   7371 // LRNDepthRadius sets the optional depth_radius attribute to value.
   7372 //
   7373 // value: 0-D.  Half-width of the 1-D normalization window.
   7374 // If not specified, defaults to 5
   7375 func LRNDepthRadius(value int64) LRNAttr {
   7376 	return func(m optionalAttr) {
   7377 		m["depth_radius"] = value
   7378 	}
   7379 }
   7380 
   7381 // LRNBias sets the optional bias attribute to value.
   7382 //
   7383 // value: An offset (usually positive to avoid dividing by 0).
   7384 // If not specified, defaults to 1
   7385 func LRNBias(value float32) LRNAttr {
   7386 	return func(m optionalAttr) {
   7387 		m["bias"] = value
   7388 	}
   7389 }
   7390 
   7391 // LRNAlpha sets the optional alpha attribute to value.
   7392 //
   7393 // value: A scale factor, usually positive.
   7394 // If not specified, defaults to 1
   7395 func LRNAlpha(value float32) LRNAttr {
   7396 	return func(m optionalAttr) {
   7397 		m["alpha"] = value
   7398 	}
   7399 }
   7400 
   7401 // LRNBeta sets the optional beta attribute to value.
   7402 //
   7403 // value: An exponent.
   7404 // If not specified, defaults to 0.5
   7405 func LRNBeta(value float32) LRNAttr {
   7406 	return func(m optionalAttr) {
   7407 		m["beta"] = value
   7408 	}
   7409 }
   7410 
   7411 // Local Response Normalization.
   7412 //
   7413 // The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
   7414 // dimension), and each vector is normalized independently.  Within a given vector,
   7415 // each component is divided by the weighted, squared sum of inputs within
   7416 // `depth_radius`.  In detail,
   7417 //
   7418 //     sqr_sum[a, b, c, d] =
   7419 //         sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
   7420 //     output = input / (bias + alpha * sqr_sum) ** beta
   7421 //
   7422 // For details, see [Krizhevsky et al., ImageNet classification with deep
   7423 // convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
   7424 //
   7425 // Arguments:
   7426 //	input: 4-D.
   7427 func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) {
   7428 	if scope.Err() != nil {
   7429 		return
   7430 	}
   7431 	attrs := map[string]interface{}{}
   7432 	for _, a := range optional {
   7433 		a(attrs)
   7434 	}
   7435 	opspec := tf.OpSpec{
   7436 		Type: "LRN",
   7437 		Input: []tf.Input{
   7438 			input,
   7439 		},
   7440 		Attrs: attrs,
   7441 	}
   7442 	op := scope.AddOperation(opspec)
   7443 	return op.Output(0)
   7444 }
   7445 
   7446 // Creates a dataset that zips together `input_datasets`.
   7447 func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   7448 	if scope.Err() != nil {
   7449 		return
   7450 	}
   7451 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   7452 	opspec := tf.OpSpec{
   7453 		Type: "ZipDataset",
   7454 		Input: []tf.Input{
   7455 			tf.OutputList(input_datasets),
   7456 		},
   7457 		Attrs: attrs,
   7458 	}
   7459 	op := scope.AddOperation(opspec)
   7460 	return op.Output(0)
   7461 }
   7462 
   7463 // Writes a `GraphDef` protocol buffer to a `SummaryWriter`.
   7464 //
   7465 // Arguments:
   7466 //	writer: Handle of `SummaryWriter`.
   7467 //	step: The step to write the summary for.
   7468 //	tensor: A scalar string of the serialized tf.GraphDef proto.
   7469 //
   7470 // Returns the created operation.
   7471 func WriteGraphSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
   7472 	if scope.Err() != nil {
   7473 		return
   7474 	}
   7475 	opspec := tf.OpSpec{
   7476 		Type: "WriteGraphSummary",
   7477 		Input: []tf.Input{
   7478 			writer, step, tensor,
   7479 		},
   7480 	}
   7481 	return scope.AddOperation(opspec)
   7482 }
   7483 
   7484 // ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad.
   7485 type ResourceSparseApplyAdagradAttr func(optionalAttr)
   7486 
   7487 // ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value.
   7488 //
   7489 // value: If `True`, updating of the var and accum tensors will be protected
   7490 // by a lock; otherwise the behavior is undefined, but may exhibit less
   7491 // contention.
   7492 // If not specified, defaults to false
   7493 func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagradAttr {
   7494 	return func(m optionalAttr) {
   7495 		m["use_locking"] = value
   7496 	}
   7497 }
   7498 
   7499 // Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
   7500 //
   7501 // That is for rows we have grad for, we update var and accum as follows:
   7502 // accum += grad * grad
   7503 // var -= lr * grad * (1 / sqrt(accum))
   7504 //
   7505 // Arguments:
   7506 //	var_: Should be from a Variable().
   7507 //	accum: Should be from a Variable().
   7508 //	lr: Learning rate. Must be a scalar.
   7509 //	grad: The gradient.
   7510 //	indices: A vector of indices into the first dimension of var and accum.
   7511 //
   7512 // Returns the created operation.
   7513 func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation) {
   7514 	if scope.Err() != nil {
   7515 		return
   7516 	}
   7517 	attrs := map[string]interface{}{}
   7518 	for _, a := range optional {
   7519 		a(attrs)
   7520 	}
   7521 	opspec := tf.OpSpec{
   7522 		Type: "ResourceSparseApplyAdagrad",
   7523 		Input: []tf.Input{
   7524 			var_, accum, lr, grad, indices,
   7525 		},
   7526 		Attrs: attrs,
   7527 	}
   7528 	return scope.AddOperation(opspec)
   7529 }
   7530 
   7531 // 2D real-valued fast Fourier transform.
   7532 //
   7533 // Computes the 2-dimensional discrete Fourier transform of a real-valued signal
   7534 // over the inner-most 2 dimensions of `input`.
   7535 //
   7536 // Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
   7537 // `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
   7538 // of `output`: the zero-frequency term, followed by the `fft_length / 2`
   7539 // positive-frequency terms.
   7540 //
   7541 // Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
   7542 // corresponding dimension of `input`, the dimension is cropped. If it is larger,
   7543 // the dimension is padded with zeros.
   7544 //
   7545 // Arguments:
   7546 //	input: A float32 tensor.
   7547 //	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
   7548 //
   7549 // Returns A complex64 tensor of the same rank as `input`. The inner-most 2
   7550 //   dimensions of `input` are replaced with their 2D Fourier transform. The
   7551 //   inner-most dimension contains `fft_length / 2 + 1` unique frequency
   7552 //   components.
   7553 //
   7554 // @compatibility(numpy)
   7555 // Equivalent to np.fft.rfft2
   7556 // @end_compatibility
   7557 func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
   7558 	if scope.Err() != nil {
   7559 		return
   7560 	}
   7561 	opspec := tf.OpSpec{
   7562 		Type: "RFFT2D",
   7563 		Input: []tf.Input{
   7564 			input, fft_length,
   7565 		},
   7566 	}
   7567 	op := scope.AddOperation(opspec)
   7568 	return op.Output(0)
   7569 }
   7570 
   7571 // ResizeAreaAttr is an optional argument to ResizeArea.
   7572 type ResizeAreaAttr func(optionalAttr)
   7573 
   7574 // ResizeAreaAlignCorners sets the optional align_corners attribute to value.
   7575 //
   7576 // value: If true, rescale input by (new_height - 1) / (height - 1), which
   7577 // exactly aligns the 4 corners of images and resized images. If false, rescale
   7578 // by new_height / height. Treat similarly the width dimension.
   7579 // If not specified, defaults to false
   7580 func ResizeAreaAlignCorners(value bool) ResizeAreaAttr {
   7581 	return func(m optionalAttr) {
   7582 		m["align_corners"] = value
   7583 	}
   7584 }
   7585 
   7586 // Resize `images` to `size` using area interpolation.
   7587 //
   7588 // Input images can be of different types but output images are always float.
   7589 //
   7590 // Each output pixel is computed by first transforming the pixel's footprint into
   7591 // the input tensor and then averaging the pixels that intersect the footprint. An
   7592 // input pixel's contribution to the average is weighted by the fraction of its
   7593 // area that intersects the footprint.  This is the same as OpenCV's INTER_AREA.
   7594 //
   7595 // Arguments:
   7596 //	images: 4-D with shape `[batch, height, width, channels]`.
   7597 //	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
   7598 // new size for the images.
   7599 //
   7600 // Returns 4-D with shape
   7601 // `[batch, new_height, new_width, channels]`.
   7602 func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output) {
   7603 	if scope.Err() != nil {
   7604 		return
   7605 	}
   7606 	attrs := map[string]interface{}{}
   7607 	for _, a := range optional {
   7608 		a(attrs)
   7609 	}
   7610 	opspec := tf.OpSpec{
   7611 		Type: "ResizeArea",
   7612 		Input: []tf.Input{
   7613 			images, size,
   7614 		},
   7615 		Attrs: attrs,
   7616 	}
   7617 	op := scope.AddOperation(opspec)
   7618 	return op.Output(0)
   7619 }
   7620 
   7621 // StatelessRandomUniformAttr is an optional argument to StatelessRandomUniform.
   7622 type StatelessRandomUniformAttr func(optionalAttr)
   7623 
   7624 // StatelessRandomUniformDtype sets the optional dtype attribute to value.
   7625 //
   7626 // value: The type of the output.
   7627 // If not specified, defaults to DT_FLOAT
   7628 func StatelessRandomUniformDtype(value tf.DataType) StatelessRandomUniformAttr {
   7629 	return func(m optionalAttr) {
   7630 		m["dtype"] = value
   7631 	}
   7632 }
   7633 
   7634 // Outputs deterministic pseudorandom random values from a uniform distribution.
   7635 //
   7636 // The generated values follow a uniform distribution in the range `[0, 1)`. The
   7637 // lower bound 0 is included in the range, while the upper bound 1 is excluded.
   7638 //
   7639 // The outputs are a deterministic function of `shape` and `seed`.
   7640 //
   7641 // Arguments:
   7642 //	shape: The shape of the output tensor.
   7643 //	seed: 2 seeds (shape [2]).
   7644 //
   7645 // Returns Random values with specified shape.
   7646 func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformAttr) (output tf.Output) {
   7647 	if scope.Err() != nil {
   7648 		return
   7649 	}
   7650 	attrs := map[string]interface{}{}
   7651 	for _, a := range optional {
   7652 		a(attrs)
   7653 	}
   7654 	opspec := tf.OpSpec{
   7655 		Type: "StatelessRandomUniform",
   7656 		Input: []tf.Input{
   7657 			shape, seed,
   7658 		},
   7659 		Attrs: attrs,
   7660 	}
   7661 	op := scope.AddOperation(opspec)
   7662 	return op.Output(0)
   7663 }
   7664 
   7665 // AngleAttr is an optional argument to Angle.
   7666 type AngleAttr func(optionalAttr)
   7667 
   7668 // AngleTout sets the optional Tout attribute to value.
   7669 // If not specified, defaults to DT_FLOAT
   7670 func AngleTout(value tf.DataType) AngleAttr {
   7671 	return func(m optionalAttr) {
   7672 		m["Tout"] = value
   7673 	}
   7674 }
   7675 
   7676 // Returns the argument of a complex number.
   7677 //
   7678 // Given a tensor `input` of complex numbers, this operation returns a tensor of
   7679 // type `float` that is the argument of each element in `input`. All elements in
   7680 // `input` must be complex numbers of the form \\(a + bj\\), where *a*
   7681 // is the real part and *b* is the imaginary part.
   7682 //
   7683 // The argument returned by this operation is of the form \\(atan2(b, a)\\).
   7684 //
   7685 // For example:
   7686 //
   7687 // ```
   7688 // # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
   7689 // tf.angle(input) ==> [2.0132, 1.056]
   7690 // ```
   7691 //
   7692 // @compatibility(numpy)
   7693 // Equivalent to np.angle.
   7694 // @end_compatibility
   7695 func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Output) {
   7696 	if scope.Err() != nil {
   7697 		return
   7698 	}
   7699 	attrs := map[string]interface{}{}
   7700 	for _, a := range optional {
   7701 		a(attrs)
   7702 	}
   7703 	opspec := tf.OpSpec{
   7704 		Type: "Angle",
   7705 		Input: []tf.Input{
   7706 			input,
   7707 		},
   7708 		Attrs: attrs,
   7709 	}
   7710 	op := scope.AddOperation(opspec)
   7711 	return op.Output(0)
   7712 }
   7713 
   7714 // VarHandleOpAttr is an optional argument to VarHandleOp.
   7715 type VarHandleOpAttr func(optionalAttr)
   7716 
   7717 // VarHandleOpContainer sets the optional container attribute to value.
   7718 //
   7719 // value: the container this variable is placed in.
   7720 // If not specified, defaults to ""
   7721 func VarHandleOpContainer(value string) VarHandleOpAttr {
   7722 	return func(m optionalAttr) {
   7723 		m["container"] = value
   7724 	}
   7725 }
   7726 
   7727 // VarHandleOpSharedName sets the optional shared_name attribute to value.
   7728 //
   7729 // value: the name by which this variable is referred to.
   7730 // If not specified, defaults to ""
   7731 func VarHandleOpSharedName(value string) VarHandleOpAttr {
   7732 	return func(m optionalAttr) {
   7733 		m["shared_name"] = value
   7734 	}
   7735 }
   7736 
   7737 // Creates a handle to a Variable resource.
   7738 //
   7739 // Arguments:
   7740 //	dtype: the type of this variable. Must agree with the dtypes
   7741 // of all ops using this variable.
   7742 //	shape: The (possibly partially specified) shape of this variable.
   7743 func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output) {
   7744 	if scope.Err() != nil {
   7745 		return
   7746 	}
   7747 	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
   7748 	for _, a := range optional {
   7749 		a(attrs)
   7750 	}
   7751 	opspec := tf.OpSpec{
   7752 		Type: "VarHandleOp",
   7753 
   7754 		Attrs: attrs,
   7755 	}
   7756 	op := scope.AddOperation(opspec)
   7757 	return op.Output(0)
   7758 }
   7759 
   7760 // Elementwise computes the bitwise XOR of `x` and `y`.
   7761 //
   7762 // The result will have those bits set, that are different in `x` and `y`. The
   7763 // computation is performed on the underlying representations of `x` and `y`.
   7764 func BitwiseXor(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   7765 	if scope.Err() != nil {
   7766 		return
   7767 	}
   7768 	opspec := tf.OpSpec{
   7769 		Type: "BitwiseXor",
   7770 		Input: []tf.Input{
   7771 			x, y,
   7772 		},
   7773 	}
   7774 	op := scope.AddOperation(opspec)
   7775 	return op.Output(0)
   7776 }
   7777 
   7778 // Deserialize `SparseTensor` objects.
   7779 //
   7780 // The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
   7781 // the last dimension stores serialized `SparseTensor` objects and the other N
   7782 // dimensions (N >= 0) correspond to a batch. The ranks of the original
   7783 // `SparseTensor` objects must all match. When the final `SparseTensor` is
   7784 // created, its rank is the rank of the incoming `SparseTensor` objects plus N;
   7785 // the sparse tensors have been concatenated along new dimensions, one for each
   7786 // batch.
   7787 //
   7788 // The output `SparseTensor` object's shape values for the original dimensions
   7789 // are the max across the input `SparseTensor` objects' shape values for the
   7790 // corresponding dimensions. The new dimensions match the size of the batch.
   7791 //
   7792 // The input `SparseTensor` objects' indices are assumed ordered in
   7793 // standard lexicographic order.  If this is not the case, after this
   7794 // step run `SparseReorder` to restore index ordering.
   7795 //
   7796 // For example, if the serialized input is a `[2 x 3]` matrix representing two
   7797 // original `SparseTensor` objects:
   7798 //
   7799 //     index = [ 0]
   7800 //             [10]
   7801 //             [20]
   7802 //     values = [1, 2, 3]
   7803 //     shape = [50]
   7804 //
   7805 // and
   7806 //
   7807 //     index = [ 2]
   7808 //             [10]
   7809 //     values = [4, 5]
   7810 //     shape = [30]
   7811 //
   7812 // then the final deserialized `SparseTensor` will be:
   7813 //
   7814 //     index = [0  0]
   7815 //             [0 10]
   7816 //             [0 20]
   7817 //             [1  2]
   7818 //             [1 10]
   7819 //     values = [1, 2, 3, 4, 5]
   7820 //     shape = [2 50]
   7821 //
   7822 // Arguments:
   7823 //	serialized_sparse: The serialized `SparseTensor` objects. The last dimension
   7824 // must have 3 columns.
   7825 //	dtype: The `dtype` of the serialized `SparseTensor` objects.
   7826 func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
   7827 	if scope.Err() != nil {
   7828 		return
   7829 	}
   7830 	attrs := map[string]interface{}{"dtype": dtype}
   7831 	opspec := tf.OpSpec{
   7832 		Type: "DeserializeSparse",
   7833 		Input: []tf.Input{
   7834 			serialized_sparse,
   7835 		},
   7836 		Attrs: attrs,
   7837 	}
   7838 	op := scope.AddOperation(opspec)
   7839 	return op.Output(0), op.Output(1), op.Output(2)
   7840 }
   7841 
   7842 // ResourceApplyRMSPropAttr is an optional argument to ResourceApplyRMSProp.
   7843 type ResourceApplyRMSPropAttr func(optionalAttr)
   7844 
   7845 // ResourceApplyRMSPropUseLocking sets the optional use_locking attribute to value.
   7846 //
   7847 // value: If `True`, updating of the var, ms, and mom tensors is protected
   7848 // by a lock; otherwise the behavior is undefined, but may exhibit less
   7849 // contention.
   7850 // If not specified, defaults to false
   7851 func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr {
   7852 	return func(m optionalAttr) {
   7853 		m["use_locking"] = value
   7854 	}
   7855 }
   7856 
   7857 // Update '*var' according to the RMSProp algorithm.
   7858 //
   7859 // Note that in dense implementation of this algorithm, ms and mom will
   7860 // update even if the grad is zero, but in this sparse implementation, ms
   7861 // and mom will not update in iterations during which the grad is zero.
   7862 //
   7863 // mean_square = decay * mean_square + (1-decay) * gradient ** 2
   7864 // Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
   7865 //
   7866 // ms <- rho * ms_{t-1} + (1-rho) * grad * grad
   7867 // mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
   7868 // var <- var - mom
   7869 //
   7870 // Arguments:
   7871 //	var_: Should be from a Variable().
   7872 //	ms: Should be from a Variable().
   7873 //	mom: Should be from a Variable().
   7874 //	lr: Scaling factor. Must be a scalar.
   7875 //	rho: Decay rate. Must be a scalar.
   7876 //
   7877 //	epsilon: Ridge term. Must be a scalar.
   7878 //	grad: The gradient.
   7879 //
   7880 // Returns the created operation.
   7881 func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation) {
   7882 	if scope.Err() != nil {
   7883 		return
   7884 	}
   7885 	attrs := map[string]interface{}{}
   7886 	for _, a := range optional {
   7887 		a(attrs)
   7888 	}
   7889 	opspec := tf.OpSpec{
   7890 		Type: "ResourceApplyRMSProp",
   7891 		Input: []tf.Input{
   7892 			var_, ms, mom, lr, rho, momentum, epsilon, grad,
   7893 		},
   7894 		Attrs: attrs,
   7895 	}
   7896 	return scope.AddOperation(opspec)
   7897 }
   7898 
   7899 // SizeAttr is an optional argument to Size.
   7900 type SizeAttr func(optionalAttr)
   7901 
   7902 // SizeOutType sets the optional out_type attribute to value.
   7903 // If not specified, defaults to DT_INT32
   7904 func SizeOutType(value tf.DataType) SizeAttr {
   7905 	return func(m optionalAttr) {
   7906 		m["out_type"] = value
   7907 	}
   7908 }
   7909 
   7910 // Returns the size of a tensor.
   7911 //
   7912 // This operation returns an integer representing the number of elements in
   7913 // `input`.
   7914 //
   7915 // For example:
   7916 //
   7917 // ```
   7918 // # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
   7919 // size(t) ==> 12
   7920 // ```
   7921 func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output) {
   7922 	if scope.Err() != nil {
   7923 		return
   7924 	}
   7925 	attrs := map[string]interface{}{}
   7926 	for _, a := range optional {
   7927 		a(attrs)
   7928 	}
   7929 	opspec := tf.OpSpec{
   7930 		Type: "Size",
   7931 		Input: []tf.Input{
   7932 			input,
   7933 		},
   7934 		Attrs: attrs,
   7935 	}
   7936 	op := scope.AddOperation(opspec)
   7937 	return op.Output(0)
   7938 }
   7939 
   7940 // ResourceScatterNdUpdateAttr is an optional argument to ResourceScatterNdUpdate.
   7941 type ResourceScatterNdUpdateAttr func(optionalAttr)
   7942 
   7943 // ResourceScatterNdUpdateUseLocking sets the optional use_locking attribute to value.
   7944 //
   7945 // value: An optional bool. Defaults to True. If True, the assignment will
   7946 // be protected by a lock; otherwise the behavior is undefined,
   7947 // but may exhibit less contention.
   7948 // If not specified, defaults to true
   7949 func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr {
   7950 	return func(m optionalAttr) {
   7951 		m["use_locking"] = value
   7952 	}
   7953 }
   7954 
   7955 // Applies sparse `updates` to individual values or slices within a given
   7956 //
   7957 // variable according to `indices`.
   7958 //
   7959 // `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
   7960 //
   7961 // `indices` must be integer tensor, containing indices into `ref`.
   7962 // It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
   7963 //
   7964 // The innermost dimension of `indices` (with length `K`) corresponds to
   7965 // indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
   7966 // dimension of `ref`.
   7967 //
   7968 // `updates` is `Tensor` of rank `Q-1+P-K` with shape:
   7969 //
   7970 // ```
   7971 // [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
   7972 // ```
   7973 //
   7974 // For example, say we want to update 4 scattered elements to a rank-1 tensor to
   7975 // 8 elements. In Python, that update would look like this:
   7976 //
   7977 // ```python
   7978 //     ref = tfe.Variable([1, 2, 3, 4, 5, 6, 7, 8])
   7979 //     indices = tf.constant([[4], [3], [1] ,[7]])
   7980 //     updates = tf.constant([9, 10, 11, 12])
   7981 //     update = tf.scatter_nd_update(ref, indices, updates)
   7982 //     with tf.Session() as sess:
   7983 //       print sess.run(update)
   7984 // ```
   7985 //
   7986 // The resulting update to ref would look like this:
   7987 //
   7988 //     [1, 11, 3, 10, 9, 6, 7, 12]
   7989 //
   7990 // See @{tf.scatter_nd} for more details about how to make updates to
   7991 // slices.
   7992 //
   7993 // Arguments:
   7994 //	ref: A resource handle. Must be from a VarHandleOp.
   7995 //	indices: A Tensor. Must be one of the following types: int32, int64.
   7996 // A tensor of indices into ref.
   7997 //	updates: A Tensor. Must have the same type as ref. A tensor of updated
   7998 // values to add to ref.
   7999 //
   8000 // Returns the created operation.
   8001 func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdUpdateAttr) (o *tf.Operation) {
   8002 	if scope.Err() != nil {
   8003 		return
   8004 	}
   8005 	attrs := map[string]interface{}{}
   8006 	for _, a := range optional {
   8007 		a(attrs)
   8008 	}
   8009 	opspec := tf.OpSpec{
   8010 		Type: "ResourceScatterNdUpdate",
   8011 		Input: []tf.Input{
   8012 			ref, indices, updates,
   8013 		},
   8014 		Attrs: attrs,
   8015 	}
   8016 	return scope.AddOperation(opspec)
   8017 }
   8018 
   8019 // StageSizeAttr is an optional argument to StageSize.
   8020 type StageSizeAttr func(optionalAttr)
   8021 
   8022 // StageSizeCapacity sets the optional capacity attribute to value.
   8023 // If not specified, defaults to 0
   8024 //
   8025 // REQUIRES: value >= 0
   8026 func StageSizeCapacity(value int64) StageSizeAttr {
   8027 	return func(m optionalAttr) {
   8028 		m["capacity"] = value
   8029 	}
   8030 }
   8031 
   8032 // StageSizeMemoryLimit sets the optional memory_limit attribute to value.
   8033 // If not specified, defaults to 0
   8034 //
   8035 // REQUIRES: value >= 0
   8036 func StageSizeMemoryLimit(value int64) StageSizeAttr {
   8037 	return func(m optionalAttr) {
   8038 		m["memory_limit"] = value
   8039 	}
   8040 }
   8041 
   8042 // StageSizeContainer sets the optional container attribute to value.
   8043 // If not specified, defaults to ""
   8044 func StageSizeContainer(value string) StageSizeAttr {
   8045 	return func(m optionalAttr) {
   8046 		m["container"] = value
   8047 	}
   8048 }
   8049 
   8050 // StageSizeSharedName sets the optional shared_name attribute to value.
   8051 // If not specified, defaults to ""
   8052 func StageSizeSharedName(value string) StageSizeAttr {
   8053 	return func(m optionalAttr) {
   8054 		m["shared_name"] = value
   8055 	}
   8056 }
   8057 
   8058 // Op returns the number of elements in the underlying container.
   8059 func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (size tf.Output) {
   8060 	if scope.Err() != nil {
   8061 		return
   8062 	}
   8063 	attrs := map[string]interface{}{"dtypes": dtypes}
   8064 	for _, a := range optional {
   8065 		a(attrs)
   8066 	}
   8067 	opspec := tf.OpSpec{
   8068 		Type: "StageSize",
   8069 
   8070 		Attrs: attrs,
   8071 	}
   8072 	op := scope.AddOperation(opspec)
   8073 	return op.Output(0)
   8074 }
   8075 
   8076 // NonMaxSuppressionAttr is an optional argument to NonMaxSuppression.
   8077 type NonMaxSuppressionAttr func(optionalAttr)
   8078 
   8079 // NonMaxSuppressionIouThreshold sets the optional iou_threshold attribute to value.
   8080 //
   8081 // value: A float representing the threshold for deciding whether boxes
   8082 // overlap too much with respect to IOU.
   8083 // If not specified, defaults to 0.5
   8084 func NonMaxSuppressionIouThreshold(value float32) NonMaxSuppressionAttr {
   8085 	return func(m optionalAttr) {
   8086 		m["iou_threshold"] = value
   8087 	}
   8088 }
   8089 
   8090 // Greedily selects a subset of bounding boxes in descending order of score,
   8091 //
   8092 // pruning away boxes that have high intersection-over-union (IOU) overlap
   8093 // with previously selected boxes.  Bounding boxes are supplied as
   8094 // [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
   8095 // diagonal pair of box corners and the coordinates can be provided as normalized
   8096 // (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
   8097 // is agnostic to where the origin is in the coordinate system.  Note that this
   8098 // algorithm is invariant to orthogonal transformations and translations
   8099 // of the coordinate system; thus translating or reflections of the coordinate
   8100 // system result in the same boxes being selected by the algorithm.
   8101 // The output of this operation is a set of integers indexing into the input
   8102 // collection of bounding boxes representing the selected boxes.  The bounding
   8103 // box coordinates corresponding to the selected indices can then be obtained
   8104 // using the `tf.gather operation`.  For example:
   8105 //   selected_indices = tf.image.non_max_suppression(
   8106 //       boxes, scores, max_output_size, iou_threshold)
   8107 //   selected_boxes = tf.gather(boxes, selected_indices)
   8108 //
   8109 // Arguments:
   8110 //	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
   8111 //	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
   8112 // score corresponding to each box (each row of boxes).
   8113 //	max_output_size: A scalar integer tensor representing the maximum number of
   8114 // boxes to be selected by non max suppression.
   8115 //
   8116 // Returns A 1-D integer tensor of shape `[M]` representing the selected
   8117 // indices from the boxes tensor, where `M <= max_output_size`.
   8118 func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {
   8119 	if scope.Err() != nil {
   8120 		return
   8121 	}
   8122 	attrs := map[string]interface{}{}
   8123 	for _, a := range optional {
   8124 		a(attrs)
   8125 	}
   8126 	opspec := tf.OpSpec{
   8127 		Type: "NonMaxSuppression",
   8128 		Input: []tf.Input{
   8129 			boxes, scores, max_output_size,
   8130 		},
   8131 		Attrs: attrs,
   8132 	}
   8133 	op := scope.AddOperation(opspec)
   8134 	return op.Output(0)
   8135 }
   8136 
   8137 // Creates a dataset that emits `components` as a tuple of tensors once.
   8138 func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
   8139 	if scope.Err() != nil {
   8140 		return
   8141 	}
   8142 	attrs := map[string]interface{}{"output_shapes": output_shapes}
   8143 	opspec := tf.OpSpec{
   8144 		Type: "TensorDataset",
   8145 		Input: []tf.Input{
   8146 			tf.OutputList(components),
   8147 		},
   8148 		Attrs: attrs,
   8149 	}
   8150 	op := scope.AddOperation(opspec)
   8151 	return op.Output(0)
   8152 }
   8153 
   8154 // Component-wise multiplies a SparseTensor by a dense Tensor.
   8155 //
   8156 // The output locations corresponding to the implicitly zero elements in the sparse
   8157 // tensor will be zero (i.e., will not take up storage space), regardless of the
   8158 // contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
   8159 //
   8160 // *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
   8161 // the other direction.
   8162 //
   8163 // Arguments:
   8164 //	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   8165 // SparseTensor, possibly not in canonical ordering.
   8166 //	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
   8167 //	sp_shape: 1-D.  Shape of the input SparseTensor.
   8168 //	dense: `R`-D.  The dense Tensor operand.
   8169 //
   8170 // Returns 1-D.  The `N` values that are operated on.
   8171 func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
   8172 	if scope.Err() != nil {
   8173 		return
   8174 	}
   8175 	opspec := tf.OpSpec{
   8176 		Type: "SparseDenseCwiseMul",
   8177 		Input: []tf.Input{
   8178 			sp_indices, sp_values, sp_shape, dense,
   8179 		},
   8180 	}
   8181 	op := scope.AddOperation(opspec)
   8182 	return op.Output(0)
   8183 }
   8184 
   8185 // ResourceSparseApplyFtrlAttr is an optional argument to ResourceSparseApplyFtrl.
   8186 type ResourceSparseApplyFtrlAttr func(optionalAttr)
   8187 
   8188 // ResourceSparseApplyFtrlUseLocking sets the optional use_locking attribute to value.
   8189 //
   8190 // value: If `True`, updating of the var and accum tensors will be protected
   8191 // by a lock; otherwise the behavior is undefined, but may exhibit less
   8192 // contention.
   8193 // If not specified, defaults to false
   8194 func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr {
   8195 	return func(m optionalAttr) {
   8196 		m["use_locking"] = value
   8197 	}
   8198 }
   8199 
   8200 // Update relevant entries in '*var' according to the Ftrl-proximal scheme.
   8201 //
   8202 // That is for rows we have grad for, we update var, accum and linear as follows:
   8203 // accum_new = accum + grad * grad
   8204 // linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
   8205 // quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
   8206 // var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
   8207 // accum = accum_new
   8208 //
   8209 // Arguments:
   8210 //	var_: Should be from a Variable().
   8211 //	accum: Should be from a Variable().
   8212 //	linear: Should be from a Variable().
   8213 //	grad: The gradient.
   8214 //	indices: A vector of indices into the first dimension of var and accum.
   8215 //	lr: Scaling factor. Must be a scalar.
   8216 //	l1: L1 regularization. Must be a scalar.
   8217 //	l2: L2 regularization. Must be a scalar.
   8218 //	lr_power: Scaling factor. Must be a scalar.
   8219 //
   8220 // Returns the created operation.
   8221 func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation) {
   8222 	if scope.Err() != nil {
   8223 		return
   8224 	}
   8225 	attrs := map[string]interface{}{}
   8226 	for _, a := range optional {
   8227 		a(attrs)
   8228 	}
   8229 	opspec := tf.OpSpec{
   8230 		Type: "ResourceSparseApplyFtrl",
   8231 		Input: []tf.Input{
   8232 			var_, accum, linear, grad, indices, lr, l1, l2, lr_power,
   8233 		},
   8234 		Attrs: attrs,
   8235 	}
   8236 	return scope.AddOperation(opspec)
   8237 }
   8238 
   8239 // Returns which elements of x are Inf.
   8240 //
   8241 // @compatibility(numpy)
   8242 // Equivalent to np.isinf
   8243 // @end_compatibility
   8244 func IsInf(scope *Scope, x tf.Output) (y tf.Output) {
   8245 	if scope.Err() != nil {
   8246 		return
   8247 	}
   8248 	opspec := tf.OpSpec{
   8249 		Type: "IsInf",
   8250 		Input: []tf.Input{
   8251 			x,
   8252 		},
   8253 	}
   8254 	op := scope.AddOperation(opspec)
   8255 	return op.Output(0)
   8256 }
   8257 
   8258 // ResourceSparseApplyRMSPropAttr is an optional argument to ResourceSparseApplyRMSProp.
   8259 type ResourceSparseApplyRMSPropAttr func(optionalAttr)
   8260 
   8261 // ResourceSparseApplyRMSPropUseLocking sets the optional use_locking attribute to value.
   8262 //
   8263 // value: If `True`, updating of the var, ms, and mom tensors is protected
   8264 // by a lock; otherwise the behavior is undefined, but may exhibit less
   8265 // contention.
   8266 // If not specified, defaults to false
   8267 func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSPropAttr {
   8268 	return func(m optionalAttr) {
   8269 		m["use_locking"] = value
   8270 	}
   8271 }
   8272 
   8273 // Update '*var' according to the RMSProp algorithm.
   8274 //
   8275 // Note that in dense implementation of this algorithm, ms and mom will
   8276 // update even if the grad is zero, but in this sparse implementation, ms
   8277 // and mom will not update in iterations during which the grad is zero.
   8278 //
   8279 // mean_square = decay * mean_square + (1-decay) * gradient ** 2
   8280 // Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
   8281 //
   8282 // ms <- rho * ms_{t-1} + (1-rho) * grad * grad
   8283 // mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
   8284 // var <- var - mom
   8285 //
   8286 // Arguments:
   8287 //	var_: Should be from a Variable().
   8288 //	ms: Should be from a Variable().
   8289 //	mom: Should be from a Variable().
   8290 //	lr: Scaling factor. Must be a scalar.
   8291 //	rho: Decay rate. Must be a scalar.
   8292 //
   8293 //	epsilon: Ridge term. Must be a scalar.
   8294 //	grad: The gradient.
   8295 //	indices: A vector of indices into the first dimension of var, ms and mom.
   8296 //
   8297 // Returns the created operation.
   8298 func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation) {
   8299 	if scope.Err() != nil {
   8300 		return
   8301 	}
   8302 	attrs := map[string]interface{}{}
   8303 	for _, a := range optional {
   8304 		a(attrs)
   8305 	}
   8306 	opspec := tf.OpSpec{
   8307 		Type: "ResourceSparseApplyRMSProp",
   8308 		Input: []tf.Input{
   8309 			var_, ms, mom, lr, rho, momentum, epsilon, grad, indices,
   8310 		},
   8311 		Attrs: attrs,
   8312 	}
   8313 	return scope.AddOperation(opspec)
   8314 }
   8315 
   8316 // Returns the truth value of (x > y) element-wise.
   8317 //
   8318 // *NOTE*: `Greater` supports broadcasting. More about broadcasting
   8319 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   8320 func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   8321 	if scope.Err() != nil {
   8322 		return
   8323 	}
   8324 	opspec := tf.OpSpec{
   8325 		Type: "Greater",
   8326 		Input: []tf.Input{
   8327 			x, y,
   8328 		},
   8329 	}
   8330 	op := scope.AddOperation(opspec)
   8331 	return op.Output(0)
   8332 }
   8333 
   8334 // SampleDistortedBoundingBoxAttr is an optional argument to SampleDistortedBoundingBox.
   8335 type SampleDistortedBoundingBoxAttr func(optionalAttr)
   8336 
   8337 // SampleDistortedBoundingBoxSeed sets the optional seed attribute to value.
   8338 //
   8339 // value: If either `seed` or `seed2` are set to non-zero, the random number
   8340 // generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
   8341 // seed.
   8342 // If not specified, defaults to 0
   8343 func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr {
   8344 	return func(m optionalAttr) {
   8345 		m["seed"] = value
   8346 	}
   8347 }
   8348 
   8349 // SampleDistortedBoundingBoxSeed2 sets the optional seed2 attribute to value.
   8350 //
   8351 // value: A second seed to avoid seed collision.
   8352 // If not specified, defaults to 0
   8353 func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr {
   8354 	return func(m optionalAttr) {
   8355 		m["seed2"] = value
   8356 	}
   8357 }
   8358 
   8359 // SampleDistortedBoundingBoxMinObjectCovered sets the optional min_object_covered attribute to value.
   8360 //
   8361 // value: The cropped area of the image must contain at least this
   8362 // fraction of any bounding box supplied. The value of this parameter should be
   8363 // non-negative. In the case of 0, the cropped area does not need to overlap
   8364 // any of the bounding boxes supplied.
   8365 // If not specified, defaults to 0.1
   8366 func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr {
   8367 	return func(m optionalAttr) {
   8368 		m["min_object_covered"] = value
   8369 	}
   8370 }
   8371 
   8372 // SampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
   8373 //
   8374 // value: The cropped area of the image must have an aspect ratio =
   8375 // width / height within this range.
   8376 // If not specified, defaults to <f:0.75 f:1.33 >
   8377 func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr {
   8378 	return func(m optionalAttr) {
   8379 		m["aspect_ratio_range"] = value
   8380 	}
   8381 }
   8382 
   8383 // SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
   8384 //
   8385 // value: The cropped area of the image must contain a fraction of the
   8386 // supplied image within in this range.
   8387 // If not specified, defaults to <f:0.05 f:1 >
   8388 func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
   8389 	return func(m optionalAttr) {
   8390 		m["area_range"] = value
   8391 	}
   8392 }
   8393 
   8394 // SampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
   8395 //
   8396 // value: Number of attempts at generating a cropped region of the image
   8397 // of the specified constraints. After `max_attempts` failures, return the entire
   8398 // image.
   8399 // If not specified, defaults to 100
   8400 func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr {
   8401 	return func(m optionalAttr) {
   8402 		m["max_attempts"] = value
   8403 	}
   8404 }
   8405 
   8406 // SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
   8407 //
   8408 // value: Controls behavior if no bounding boxes supplied.
   8409 // If true, assume an implicit bounding box covering the whole input. If false,
   8410 // raise an error.
   8411 // If not specified, defaults to false
   8412 func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr {
   8413 	return func(m optionalAttr) {
   8414 		m["use_image_if_no_bounding_boxes"] = value
   8415 	}
   8416 }
   8417 
   8418 // Generate a single randomly distorted bounding box for an image.
   8419 //
   8420 // Bounding box annotations are often supplied in addition to ground-truth labels
   8421 // in image recognition or object localization tasks. A common technique for
   8422 // training such a system is to randomly distort an image while preserving
   8423 // its content, i.e. *data augmentation*. This Op outputs a randomly distorted
   8424 // localization of an object, i.e. bounding box, given an `image_size`,
   8425 // `bounding_boxes` and a series of constraints.
   8426 //
   8427 // The output of this Op is a single bounding box that may be used to crop the
   8428 // original image. The output is returned as 3 tensors: `begin`, `size` and
   8429 // `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
   8430 // image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
   8431 // what the bounding box looks like.
   8432 //
   8433 // Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
   8434 // bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
   8435 // height of the underlying image.
   8436 //
   8437 // For example,
   8438 //
   8439 // ```python
   8440 //     # Generate a single distorted bounding box.
   8441 //     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
   8442 //         tf.shape(image),
   8443 //         bounding_boxes=bounding_boxes)
   8444 //
   8445 //     # Draw the bounding box in an image summary.
   8446 //     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
   8447 //                                                   bbox_for_draw)
   8448 //     tf.summary.image('images_with_box', image_with_box)
   8449 //
   8450 //     # Employ the bounding box to distort the image.
   8451 //     distorted_image = tf.slice(image, begin, size)
   8452 // ```
   8453 //
   8454 // Note that if no bounding box information is available, setting
   8455 // `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
   8456 // bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
   8457 // false and no bounding boxes are supplied, an error is raised.
   8458 //
   8459 // Arguments:
   8460 //	image_size: 1-D, containing `[height, width, channels]`.
   8461 //	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
   8462 // associated with the image.
   8463 //
   8464 // Returns 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
   8465 // `tf.slice`.1-D, containing `[target_height, target_width, -1]`. Provide as input to
   8466 // `tf.slice`.3-D with shape `[1, 1, 4]` containing the distorted bounding box.
   8467 // Provide as input to `tf.image.draw_bounding_boxes`.
   8468 func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, optional ...SampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
   8469 	if scope.Err() != nil {
   8470 		return
   8471 	}
   8472 	attrs := map[string]interface{}{}
   8473 	for _, a := range optional {
   8474 		a(attrs)
   8475 	}
   8476 	opspec := tf.OpSpec{
   8477 		Type: "SampleDistortedBoundingBox",
   8478 		Input: []tf.Input{
   8479 			image_size, bounding_boxes,
   8480 		},
   8481 		Attrs: attrs,
   8482 	}
   8483 	op := scope.AddOperation(opspec)
   8484 	return op.Output(0), op.Output(1), op.Output(2)
   8485 }
   8486 
   8487 // Returns x / y element-wise for integer types.
   8488 //
   8489 // Truncation designates that negative numbers will round fractional quantities
   8490 // toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
   8491 // than Python semantics. See `FloorDiv` for a division function that matches
   8492 // Python Semantics.
   8493 //
   8494 // *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
   8495 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   8496 func TruncateDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   8497 	if scope.Err() != nil {
   8498 		return
   8499 	}
   8500 	opspec := tf.OpSpec{
   8501 		Type: "TruncateDiv",
   8502 		Input: []tf.Input{
   8503 			x, y,
   8504 		},
   8505 	}
   8506 	op := scope.AddOperation(opspec)
   8507 	return op.Output(0)
   8508 }
   8509 
   8510 // Restores tensors from a V2 checkpoint.
   8511 //
   8512 // For backward compatibility with the V1 format, this Op currently allows
   8513 // restoring from a V1 checkpoint as well:
   8514 //   - This Op first attempts to find the V2 index file pointed to by "prefix", and
   8515 //     if found proceed to read it as a V2 checkpoint;
   8516 //   - Otherwise the V1 read path is invoked.
   8517 // Relying on this behavior is not recommended, as the ability to fall back to read
   8518 // V1 might be deprecated and eventually removed.
   8519 //
   8520 // By default, restores the named tensors in full.  If the caller wishes to restore
   8521 // specific slices of stored tensors, "shape_and_slices" should be non-empty
   8522 // strings and correspondingly well-formed.
   8523 //
   8524 // Callers must ensure all the named tensors are indeed stored in the checkpoint.
   8525 //
   8526 // Arguments:
   8527 //	prefix: Must have a single element.  The prefix of a V2 checkpoint.
   8528 //	tensor_names: shape {N}.  The names of the tensors to be restored.
   8529 //	shape_and_slices: shape {N}.  The slice specs of the tensors to be restored.
   8530 // Empty strings indicate that they are non-partitioned tensors.
   8531 //	dtypes: shape {N}.  The list of expected dtype for the tensors.  Must match
   8532 // those stored in the checkpoint.
   8533 //
   8534 // Returns shape {N}.  The restored tensors, whose shapes are read from the
   8535 // checkpoint directly.
   8536 func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, dtypes []tf.DataType) (tensors []tf.Output) {
   8537 	if scope.Err() != nil {
   8538 		return
   8539 	}
   8540 	attrs := map[string]interface{}{"dtypes": dtypes}
   8541 	opspec := tf.OpSpec{
   8542 		Type: "RestoreV2",
   8543 		Input: []tf.Input{
   8544 			prefix, tensor_names, shape_and_slices,
   8545 		},
   8546 		Attrs: attrs,
   8547 	}
   8548 	op := scope.AddOperation(opspec)
   8549 	if scope.Err() != nil {
   8550 		return
   8551 	}
   8552 	var idx int
   8553 	var err error
   8554 	if tensors, idx, err = makeOutputList(op, idx, "tensors"); err != nil {
   8555 		scope.UpdateErr("RestoreV2", err)
   8556 		return
   8557 	}
   8558 	return tensors
   8559 }
   8560 
   8561 // Decode web-safe base64-encoded strings.
   8562 //
   8563 // Input may or may not have padding at the end. See EncodeBase64 for padding.
   8564 // Web-safe means that input must use - and _ instead of + and /.
   8565 //
   8566 // Arguments:
   8567 //	input: Base64 strings to decode.
   8568 //
   8569 // Returns Decoded strings.
   8570 func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output) {
   8571 	if scope.Err() != nil {
   8572 		return
   8573 	}
   8574 	opspec := tf.OpSpec{
   8575 		Type: "DecodeBase64",
   8576 		Input: []tf.Input{
   8577 			input,
   8578 		},
   8579 	}
   8580 	op := scope.AddOperation(opspec)
   8581 	return op.Output(0)
   8582 }
   8583 
   8584 // Store the input tensor in the state of the current session.
   8585 //
   8586 // Arguments:
   8587 //	value: The tensor to be stored.
   8588 //
   8589 // Returns The handle for the tensor stored in the session state, represented
   8590 // as a string.
   8591 func GetSessionHandle(scope *Scope, value tf.Output) (handle tf.Output) {
   8592 	if scope.Err() != nil {
   8593 		return
   8594 	}
   8595 	opspec := tf.OpSpec{
   8596 		Type: "GetSessionHandle",
   8597 		Input: []tf.Input{
   8598 			value,
   8599 		},
   8600 	}
   8601 	op := scope.AddOperation(opspec)
   8602 	return op.Output(0)
   8603 }
   8604 
   8605 // ResourceSparseApplyProximalAdagradAttr is an optional argument to ResourceSparseApplyProximalAdagrad.
   8606 type ResourceSparseApplyProximalAdagradAttr func(optionalAttr)
   8607 
   8608 // ResourceSparseApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
   8609 //
   8610 // value: If True, updating of the var and accum tensors will be protected by
   8611 // a lock; otherwise the behavior is undefined, but may exhibit less contention.
   8612 // If not specified, defaults to false
   8613 func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseApplyProximalAdagradAttr {
   8614 	return func(m optionalAttr) {
   8615 		m["use_locking"] = value
   8616 	}
   8617 }
   8618 
   8619 // Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
   8620 //
   8621 // That is for rows we have grad for, we update var and accum as follows:
   8622 // accum += grad * grad
   8623 // prox_v = var
   8624 // prox_v -= lr * grad * (1 / sqrt(accum))
   8625 // var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
   8626 //
   8627 // Arguments:
   8628 //	var_: Should be from a Variable().
   8629 //	accum: Should be from a Variable().
   8630 //	lr: Learning rate. Must be a scalar.
   8631 //	l1: L1 regularization. Must be a scalar.
   8632 //	l2: L2 regularization. Must be a scalar.
   8633 //	grad: The gradient.
   8634 //	indices: A vector of indices into the first dimension of var and accum.
   8635 //
   8636 // Returns the created operation.
   8637 func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation) {
   8638 	if scope.Err() != nil {
   8639 		return
   8640 	}
   8641 	attrs := map[string]interface{}{}
   8642 	for _, a := range optional {
   8643 		a(attrs)
   8644 	}
   8645 	opspec := tf.OpSpec{
   8646 		Type: "ResourceSparseApplyProximalAdagrad",
   8647 		Input: []tf.Input{
   8648 			var_, accum, lr, l1, l2, grad, indices,
   8649 		},
   8650 		Attrs: attrs,
   8651 	}
   8652 	return scope.AddOperation(opspec)
   8653 }
   8654 
   8655 // Returns element-wise largest integer not greater than x.
   8656 func Floor(scope *Scope, x tf.Output) (y tf.Output) {
   8657 	if scope.Err() != nil {
   8658 		return
   8659 	}
   8660 	opspec := tf.OpSpec{
   8661 		Type: "Floor",
   8662 		Input: []tf.Input{
   8663 			x,
   8664 		},
   8665 	}
   8666 	op := scope.AddOperation(opspec)
   8667 	return op.Output(0)
   8668 }
   8669 
   8670 // Computes the Gauss error function of `x` element-wise.
   8671 func Erf(scope *Scope, x tf.Output) (y tf.Output) {
   8672 	if scope.Err() != nil {
   8673 		return
   8674 	}
   8675 	opspec := tf.OpSpec{
   8676 		Type: "Erf",
   8677 		Input: []tf.Input{
   8678 			x,
   8679 		},
   8680 	}
   8681 	op := scope.AddOperation(opspec)
   8682 	return op.Output(0)
   8683 }
   8684 
   8685 // Reads the value of a variable.
   8686 //
   8687 // The tensor returned by this operation is immutable.
   8688 //
   8689 // The value returned by this operation is guaranteed to be influenced by all the
   8690 // writes on which this operation depends directly or indirectly, and to not be
   8691 // influenced by any of the writes which depend directly or indirectly on this
   8692 // operation.
   8693 //
   8694 // Arguments:
   8695 //	resource: handle to the resource in which to store the variable.
   8696 //	dtype: the dtype of the value.
   8697 func ReadVariableOp(scope *Scope, resource tf.Output, dtype tf.DataType) (value tf.Output) {
   8698 	if scope.Err() != nil {
   8699 		return
   8700 	}
   8701 	attrs := map[string]interface{}{"dtype": dtype}
   8702 	opspec := tf.OpSpec{
   8703 		Type: "ReadVariableOp",
   8704 		Input: []tf.Input{
   8705 			resource,
   8706 		},
   8707 		Attrs: attrs,
   8708 	}
   8709 	op := scope.AddOperation(opspec)
   8710 	return op.Output(0)
   8711 }
   8712 
   8713 // MaxPool3DGradAttr is an optional argument to MaxPool3DGrad.
   8714 type MaxPool3DGradAttr func(optionalAttr)
   8715 
   8716 // MaxPool3DGradDataFormat sets the optional data_format attribute to value.
   8717 //
   8718 // value: The data format of the input and output data. With the
   8719 // default format "NDHWC", the data is stored in the order of:
   8720 //     [batch, in_depth, in_height, in_width, in_channels].
   8721 // Alternatively, the format could be "NCDHW", the data storage order is:
   8722 //     [batch, in_channels, in_depth, in_height, in_width].
   8723 // If not specified, defaults to "NDHWC"
   8724 func MaxPool3DGradDataFormat(value string) MaxPool3DGradAttr {
   8725 	return func(m optionalAttr) {
   8726 		m["data_format"] = value
   8727 	}
   8728 }
   8729 
   8730 // Computes gradients of max pooling function.
   8731 //
   8732 // Arguments:
   8733 //	orig_input: The original input tensor.
   8734 //	orig_output: The original output tensor.
   8735 //	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
   8736 //	ksize: 1-D tensor of length 5. The size of the window for each dimension of
   8737 // the input tensor. Must have `ksize[0] = ksize[4] = 1`.
   8738 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   8739 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   8740 //	padding: The type of padding algorithm to use.
   8741 func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradAttr) (output tf.Output) {
   8742 	if scope.Err() != nil {
   8743 		return
   8744 	}
   8745 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   8746 	for _, a := range optional {
   8747 		a(attrs)
   8748 	}
   8749 	opspec := tf.OpSpec{
   8750 		Type: "MaxPool3DGrad",
   8751 		Input: []tf.Input{
   8752 			orig_input, orig_output, grad,
   8753 		},
   8754 		Attrs: attrs,
   8755 	}
   8756 	op := scope.AddOperation(opspec)
   8757 	return op.Output(0)
   8758 }
   8759 
   8760 // SparseReduceSumAttr is an optional argument to SparseReduceSum.
   8761 type SparseReduceSumAttr func(optionalAttr)
   8762 
   8763 // SparseReduceSumKeepDims sets the optional keep_dims attribute to value.
   8764 //
   8765 // value: If true, retain reduced dimensions with length 1.
   8766 // If not specified, defaults to false
   8767 func SparseReduceSumKeepDims(value bool) SparseReduceSumAttr {
   8768 	return func(m optionalAttr) {
   8769 		m["keep_dims"] = value
   8770 	}
   8771 }
   8772 
   8773 // Computes the sum of elements across dimensions of a SparseTensor.
   8774 //
   8775 // This Op takes a SparseTensor and is the sparse counterpart to
   8776 // `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
   8777 // instead of a sparse one.
   8778 //
   8779 // Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
   8780 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   8781 // `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
   8782 // with length 1.
   8783 //
   8784 // If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
   8785 // with a single element is returned.  Additionally, the axes can be negative,
   8786 // which are interpreted according to the indexing rules in Python.
   8787 //
   8788 // Arguments:
   8789 //	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   8790 // SparseTensor, possibly not in canonical ordering.
   8791 //	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
   8792 //	input_shape: 1-D.  Shape of the input SparseTensor.
   8793 //	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
   8794 //
   8795 // Returns `R-K`-D.  The reduced Tensor.
   8796 func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output) {
   8797 	if scope.Err() != nil {
   8798 		return
   8799 	}
   8800 	attrs := map[string]interface{}{}
   8801 	for _, a := range optional {
   8802 		a(attrs)
   8803 	}
   8804 	opspec := tf.OpSpec{
   8805 		Type: "SparseReduceSum",
   8806 		Input: []tf.Input{
   8807 			input_indices, input_values, input_shape, reduction_axes,
   8808 		},
   8809 		Attrs: attrs,
   8810 	}
   8811 	op := scope.AddOperation(opspec)
   8812 	return op.Output(0)
   8813 }
   8814 
   8815 // ResourceApplyAdagradAttr is an optional argument to ResourceApplyAdagrad.
   8816 type ResourceApplyAdagradAttr func(optionalAttr)
   8817 
   8818 // ResourceApplyAdagradUseLocking sets the optional use_locking attribute to value.
   8819 //
   8820 // value: If `True`, updating of the var and accum tensors will be protected
   8821 // by a lock; otherwise the behavior is undefined, but may exhibit less
   8822 // contention.
   8823 // If not specified, defaults to false
   8824 func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr {
   8825 	return func(m optionalAttr) {
   8826 		m["use_locking"] = value
   8827 	}
   8828 }
   8829 
   8830 // Update '*var' according to the adagrad scheme.
   8831 //
   8832 // accum += grad * grad
   8833 // var -= lr * grad * (1 / sqrt(accum))
   8834 //
   8835 // Arguments:
   8836 //	var_: Should be from a Variable().
   8837 //	accum: Should be from a Variable().
   8838 //	lr: Scaling factor. Must be a scalar.
   8839 //	grad: The gradient.
   8840 //
   8841 // Returns the created operation.
   8842 func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation) {
   8843 	if scope.Err() != nil {
   8844 		return
   8845 	}
   8846 	attrs := map[string]interface{}{}
   8847 	for _, a := range optional {
   8848 		a(attrs)
   8849 	}
   8850 	opspec := tf.OpSpec{
   8851 		Type: "ResourceApplyAdagrad",
   8852 		Input: []tf.Input{
   8853 			var_, accum, lr, grad,
   8854 		},
   8855 		Attrs: attrs,
   8856 	}
   8857 	return scope.AddOperation(opspec)
   8858 }
   8859 
   8860 // Returns element-wise remainder of division. This emulates C semantics in that
   8861 //
   8862 // the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
   8863 // y + truncate_mod(x, y) = x`.
   8864 //
   8865 // *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
   8866 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   8867 func TruncateMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   8868 	if scope.Err() != nil {
   8869 		return
   8870 	}
   8871 	opspec := tf.OpSpec{
   8872 		Type: "TruncateMod",
   8873 		Input: []tf.Input{
   8874 			x, y,
   8875 		},
   8876 	}
   8877 	op := scope.AddOperation(opspec)
   8878 	return op.Output(0)
   8879 }
   8880 
   8881 // Inverse 2D real-valued fast Fourier transform.
   8882 //
   8883 // Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
   8884 // signal over the inner-most 2 dimensions of `input`.
   8885 //
   8886 // The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
   8887 // The inner-most dimension contains the `fft_length / 2 + 1` unique components of
   8888 // the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
   8889 // from the size of the inner-most 2 dimensions of `input`. If the FFT length used
   8890 // to compute `input` is odd, it should be provided since it cannot be inferred
   8891 // properly.
   8892 //
   8893 // Along each axis `IRFFT2D` is computed on, if `fft_length` (or
   8894 // `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
   8895 // corresponding dimension of `input`, the dimension is cropped. If it is larger,
   8896 // the dimension is padded with zeros.
   8897 //
   8898 // Arguments:
   8899 //	input: A complex64 tensor.
   8900 //	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
   8901 //
   8902 // Returns A float32 tensor of the same rank as `input`. The inner-most 2
   8903 //   dimensions of `input` are replaced with the `fft_length` samples of their
   8904 //   inverse 2D Fourier transform.
   8905 //
   8906 // @compatibility(numpy)
   8907 // Equivalent to np.fft.irfft2
   8908 // @end_compatibility
   8909 func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
   8910 	if scope.Err() != nil {
   8911 		return
   8912 	}
   8913 	opspec := tf.OpSpec{
   8914 		Type: "IRFFT2D",
   8915 		Input: []tf.Input{
   8916 			input, fft_length,
   8917 		},
   8918 	}
   8919 	op := scope.AddOperation(opspec)
   8920 	return op.Output(0)
   8921 }
   8922 
   8923 // Transforms a vector of brain.Example protos (as strings) into typed tensors.
   8924 //
   8925 // Arguments:
   8926 //	serialized: A vector containing a batch of binary serialized Example protos.
   8927 //	names: A vector containing the names of the serialized protos.
   8928 // May contain, for example, table key (descriptive) names for the
   8929 // corresponding serialized protos.  These are purely useful for debugging
   8930 // purposes, and the presence of values here has no effect on the output.
   8931 // May also be an empty vector if no names are available.
   8932 // If non-empty, this vector must be the same length as "serialized".
   8933 //	sparse_keys: A list of Nsparse string Tensors (scalars).
   8934 // The keys expected in the Examples' features associated with sparse values.
   8935 //	dense_keys: A list of Ndense string Tensors (scalars).
   8936 // The keys expected in the Examples' features associated with dense values.
   8937 //	dense_defaults: A list of Ndense Tensors (some may be empty).
   8938 // dense_defaults[j] provides default values
   8939 // when the example's feature_map lacks dense_key[j].  If an empty Tensor is
   8940 // provided for dense_defaults[j], then the Feature dense_keys[j] is required.
   8941 // The input type is inferred from dense_defaults[j], even when it's empty.
   8942 // If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
   8943 // then the shape of dense_defaults[j] must match that of dense_shapes[j].
   8944 // If dense_shapes[j] has an undefined major dimension (variable strides dense
   8945 // feature), dense_defaults[j] must contain a single element:
   8946 // the padding element.
   8947 //	sparse_types: A list of Nsparse types; the data types of data in each Feature
   8948 // given in sparse_keys.
   8949 // Currently the ParseExample supports DT_FLOAT (FloatList),
   8950 // DT_INT64 (Int64List), and DT_STRING (BytesList).
   8951 //	dense_shapes: A list of Ndense shapes; the shapes of data in each Feature
   8952 // given in dense_keys.
   8953 // The number of elements in the Feature corresponding to dense_key[j]
   8954 // must always equal dense_shapes[j].NumEntries().
   8955 // If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
   8956 // Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
   8957 // The dense outputs are just the inputs row-stacked by batch.
   8958 // This works for dense_shapes[j] = (-1, D1, ..., DN).  In this case
   8959 // the shape of the output Tensor dense_values[j] will be
   8960 // (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
   8961 // of elements of length D1 * .... * DN, across all minibatch entries
   8962 // in the input.  Any minibatch entry with less than M blocks of elements of
   8963 // length D1 * ... * DN will be padded with the corresponding default_value
   8964 // scalar element along the second dimension.
   8965 func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, dense_keys []tf.Output, dense_defaults []tf.Output, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
   8966 	if scope.Err() != nil {
   8967 		return
   8968 	}
   8969 	attrs := map[string]interface{}{"sparse_types": sparse_types, "dense_shapes": dense_shapes}
   8970 	opspec := tf.OpSpec{
   8971 		Type: "ParseExample",
   8972 		Input: []tf.Input{
   8973 			serialized, names, tf.OutputList(sparse_keys), tf.OutputList(dense_keys), tf.OutputList(dense_defaults),
   8974 		},
   8975 		Attrs: attrs,
   8976 	}
   8977 	op := scope.AddOperation(opspec)
   8978 	if scope.Err() != nil {
   8979 		return
   8980 	}
   8981 	var idx int
   8982 	var err error
   8983 	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
   8984 		scope.UpdateErr("ParseExample", err)
   8985 		return
   8986 	}
   8987 	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
   8988 		scope.UpdateErr("ParseExample", err)
   8989 		return
   8990 	}
   8991 	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
   8992 		scope.UpdateErr("ParseExample", err)
   8993 		return
   8994 	}
   8995 	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
   8996 		scope.UpdateErr("ParseExample", err)
   8997 		return
   8998 	}
   8999 	return sparse_indices, sparse_values, sparse_shapes, dense_values
   9000 }
   9001 
   9002 // VariableShapeAttr is an optional argument to VariableShape.
   9003 type VariableShapeAttr func(optionalAttr)
   9004 
   9005 // VariableShapeOutType sets the optional out_type attribute to value.
   9006 // If not specified, defaults to DT_INT32
   9007 func VariableShapeOutType(value tf.DataType) VariableShapeAttr {
   9008 	return func(m optionalAttr) {
   9009 		m["out_type"] = value
   9010 	}
   9011 }
   9012 
   9013 // Returns the shape of the variable pointed to by `resource`.
   9014 //
   9015 // This operation returns a 1-D integer tensor representing the shape of `input`.
   9016 //
   9017 // For example:
   9018 //
   9019 // ```
   9020 // # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
   9021 // shape(t) ==> [2, 2, 3]
   9022 // ```
   9023 func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr) (output tf.Output) {
   9024 	if scope.Err() != nil {
   9025 		return
   9026 	}
   9027 	attrs := map[string]interface{}{}
   9028 	for _, a := range optional {
   9029 		a(attrs)
   9030 	}
   9031 	opspec := tf.OpSpec{
   9032 		Type: "VariableShape",
   9033 		Input: []tf.Input{
   9034 			input,
   9035 		},
   9036 		Attrs: attrs,
   9037 	}
   9038 	op := scope.AddOperation(opspec)
   9039 	return op.Output(0)
   9040 }
   9041 
   9042 // Fills empty rows in the input 2-D `SparseTensor` with a default value.
   9043 //
   9044 // The input `SparseTensor` is represented via the tuple of inputs
   9045 // (`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
   9046 // same `dense_shape` but with indices `output_indices` and values
   9047 // `output_values`.
   9048 //
   9049 // This op inserts a single entry for every row that doesn't have any values.
   9050 // The index is created as `[row, 0, ..., 0]` and the inserted value
   9051 // is `default_value`.
   9052 //
   9053 // For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
   9054 //
   9055 //     [0, 1]: a
   9056 //     [0, 3]: b
   9057 //     [2, 0]: c
   9058 //     [3, 1]: d
   9059 //
   9060 // Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
   9061 //
   9062 //     [0, 1]: a
   9063 //     [0, 3]: b
   9064 //     [1, 0]: default_value
   9065 //     [2, 0]: c
   9066 //     [3, 1]: d
   9067 //     [4, 0]: default_value
   9068 //
   9069 // The output `SparseTensor` will be in row-major order and will have the
   9070 // same shape as the input.
   9071 //
   9072 // This op also returns an indicator vector shaped `[dense_shape[0]]` such that
   9073 //
   9074 //     empty_row_indicator[i] = True iff row i was an empty row.
   9075 //
   9076 // And a reverse index map vector shaped `[indices.shape[0]]` that is used during
   9077 // backpropagation,
   9078 //
   9079 //     reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
   9080 //
   9081 // Arguments:
   9082 //	indices: 2-D. the indices of the sparse tensor.
   9083 //	values: 1-D. the values of the sparse tensor.
   9084 //	dense_shape: 1-D. the shape of the sparse tensor.
   9085 //	default_value: 0-D. default value to insert into location `[row, 0, ..., 0]`
   9086 //   for rows missing from the input sparse tensor.
   9087 // output indices: 2-D. the indices of the filled sparse tensor.
   9088 //
   9089 // Returns 1-D. the values of the filled sparse tensor.1-D. whether the dense row was missing in the
   9090 // input sparse tensor.1-D. a map from the input indices to the output indices.
   9091 func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, default_value tf.Output) (output_indices tf.Output, output_values tf.Output, empty_row_indicator tf.Output, reverse_index_map tf.Output) {
   9092 	if scope.Err() != nil {
   9093 		return
   9094 	}
   9095 	opspec := tf.OpSpec{
   9096 		Type: "SparseFillEmptyRows",
   9097 		Input: []tf.Input{
   9098 			indices, values, dense_shape, default_value,
   9099 		},
   9100 	}
   9101 	op := scope.AddOperation(opspec)
   9102 	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
   9103 }
   9104 
   9105 // Reverses specific dimensions of a tensor.
   9106 //
   9107 // Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
   9108 // of `tensor`, this operation reverses each dimension i of `tensor` where
   9109 // `dims[i]` is `True`.
   9110 //
   9111 // `tensor` can have up to 8 dimensions. The number of dimensions
   9112 // of `tensor` must equal the number of elements in `dims`. In other words:
   9113 //
   9114 // `rank(tensor) = size(dims)`
   9115 //
   9116 // For example:
   9117 //
   9118 // ```
   9119 // # tensor 't' is [[[[ 0,  1,  2,  3],
   9120 // #                  [ 4,  5,  6,  7],
   9121 // #                  [ 8,  9, 10, 11]],
   9122 // #                 [[12, 13, 14, 15],
   9123 // #                  [16, 17, 18, 19],
   9124 // #                  [20, 21, 22, 23]]]]
   9125 // # tensor 't' shape is [1, 2, 3, 4]
   9126 //
   9127 // # 'dims' is [False, False, False, True]
   9128 // reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
   9129 //                         [ 7,  6,  5,  4],
   9130 //                         [ 11, 10, 9, 8]],
   9131 //                        [[15, 14, 13, 12],
   9132 //                         [19, 18, 17, 16],
   9133 //                         [23, 22, 21, 20]]]]
   9134 //
   9135 // # 'dims' is [False, True, False, False]
   9136 // reverse(t, dims) ==> [[[[12, 13, 14, 15],
   9137 //                         [16, 17, 18, 19],
   9138 //                         [20, 21, 22, 23]
   9139 //                        [[ 0,  1,  2,  3],
   9140 //                         [ 4,  5,  6,  7],
   9141 //                         [ 8,  9, 10, 11]]]]
   9142 //
   9143 // # 'dims' is [False, False, True, False]
   9144 // reverse(t, dims) ==> [[[[8, 9, 10, 11],
   9145 //                         [4, 5, 6, 7],
   9146 //                         [0, 1, 2, 3]]
   9147 //                        [[20, 21, 22, 23],
   9148 //                         [16, 17, 18, 19],
   9149 //                         [12, 13, 14, 15]]]]
   9150 // ```
   9151 //
   9152 // Arguments:
   9153 //	tensor: Up to 8-D.
   9154 //	dims: 1-D. The dimensions to reverse.
   9155 //
   9156 // Returns The same shape as `tensor`.
   9157 func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output) {
   9158 	if scope.Err() != nil {
   9159 		return
   9160 	}
   9161 	opspec := tf.OpSpec{
   9162 		Type: "Reverse",
   9163 		Input: []tf.Input{
   9164 			tensor, dims,
   9165 		},
   9166 	}
   9167 	op := scope.AddOperation(opspec)
   9168 	return op.Output(0)
   9169 }
   9170 
   9171 // Computes log softmax activations.
   9172 //
   9173 // For each batch `i` and class `j` we have
   9174 //
   9175 //     logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
   9176 //
   9177 // Arguments:
   9178 //	logits: 2-D with shape `[batch_size, num_classes]`.
   9179 //
   9180 // Returns Same shape as `logits`.
   9181 func LogSoftmax(scope *Scope, logits tf.Output) (logsoftmax tf.Output) {
   9182 	if scope.Err() != nil {
   9183 		return
   9184 	}
   9185 	opspec := tf.OpSpec{
   9186 		Type: "LogSoftmax",
   9187 		Input: []tf.Input{
   9188 			logits,
   9189 		},
   9190 	}
   9191 	op := scope.AddOperation(opspec)
   9192 	return op.Output(0)
   9193 }
   9194 
   9195 // Computes the inverse permutation of a tensor.
   9196 //
   9197 // This operation computes the inverse of an index permutation. It takes a 1-D
   9198 // integer tensor `x`, which represents the indices of a zero-based array, and
   9199 // swaps each value with its index position. In other words, for an output tensor
   9200 // `y` and an input tensor `x`, this operation computes the following:
   9201 //
   9202 // `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
   9203 //
   9204 // The values must include 0. There can be no duplicate values or negative values.
   9205 //
   9206 // For example:
   9207 //
   9208 // ```
   9209 // # tensor `x` is [3, 4, 0, 2, 1]
   9210 // invert_permutation(x) ==> [2, 4, 3, 0, 1]
   9211 // ```
   9212 //
   9213 // Arguments:
   9214 //	x: 1-D.
   9215 //
   9216 // Returns 1-D.
   9217 func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output) {
   9218 	if scope.Err() != nil {
   9219 		return
   9220 	}
   9221 	opspec := tf.OpSpec{
   9222 		Type: "InvertPermutation",
   9223 		Input: []tf.Input{
   9224 			x,
   9225 		},
   9226 	}
   9227 	op := scope.AddOperation(opspec)
   9228 	return op.Output(0)
   9229 }
   9230 
   9231 // Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
   9232 //
   9233 // This operation folds the padded areas of `input` by `MirrorPad` according to the
   9234 // `paddings` you specify. `paddings` must be the same as `paddings` argument
   9235 // given to the corresponding `MirrorPad` op.
   9236 //
   9237 // The folded size of each dimension D of the output is:
   9238 //
   9239 // `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
   9240 //
   9241 // For example:
   9242 //
   9243 // ```
   9244 // # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
   9245 // # 'paddings' is [[0, 1]], [0, 1]].
   9246 // # 'mode' is SYMMETRIC.
   9247 // # rank of 't' is 2.
   9248 // pad(t, paddings) ==> [[ 1,  5]
   9249 //                       [11, 28]]
   9250 // ```
   9251 //
   9252 // Arguments:
   9253 //	input: The input tensor to be folded.
   9254 //	paddings: A two-column matrix specifying the padding sizes. The number of
   9255 // rows must be the same as the rank of `input`.
   9256 //	mode: The mode used in the `MirrorPad` op.
   9257 //
   9258 // Returns The folded tensor.
   9259 func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
   9260 	if scope.Err() != nil {
   9261 		return
   9262 	}
   9263 	attrs := map[string]interface{}{"mode": mode}
   9264 	opspec := tf.OpSpec{
   9265 		Type: "MirrorPadGrad",
   9266 		Input: []tf.Input{
   9267 			input, paddings,
   9268 		},
   9269 		Attrs: attrs,
   9270 	}
   9271 	op := scope.AddOperation(opspec)
   9272 	return op.Output(0)
   9273 }
   9274 
   9275 // Computes softmax cross entropy cost and gradients to backpropagate.
   9276 //
   9277 // Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
   9278 // a matrix of label probabilities, but rather a single label per row
   9279 // of features.  This label is considered to have probability 1.0 for the
   9280 // given row.
   9281 //
   9282 // Inputs are the logits, not probabilities.
   9283 //
   9284 // Arguments:
   9285 //	features: batch_size x num_classes matrix
   9286 //	labels: batch_size vector with values in [0, num_classes).
   9287 // This is the label for the given minibatch entry.
   9288 //
   9289 // Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
   9290 func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
   9291 	if scope.Err() != nil {
   9292 		return
   9293 	}
   9294 	opspec := tf.OpSpec{
   9295 		Type: "SparseSoftmaxCrossEntropyWithLogits",
   9296 		Input: []tf.Input{
   9297 			features, labels,
   9298 		},
   9299 	}
   9300 	op := scope.AddOperation(opspec)
   9301 	return op.Output(0), op.Output(1)
   9302 }
   9303 
   9304 // ResourceSparseApplyAdagradDAAttr is an optional argument to ResourceSparseApplyAdagradDA.
   9305 type ResourceSparseApplyAdagradDAAttr func(optionalAttr)
   9306 
   9307 // ResourceSparseApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
   9308 //
   9309 // value: If True, updating of the var and accum tensors will be protected by
   9310 // a lock; otherwise the behavior is undefined, but may exhibit less contention.
   9311 // If not specified, defaults to false
   9312 func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagradDAAttr {
   9313 	return func(m optionalAttr) {
   9314 		m["use_locking"] = value
   9315 	}
   9316 }
   9317 
   9318 // Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
   9319 //
   9320 // Arguments:
   9321 //	var_: Should be from a Variable().
   9322 //	gradient_accumulator: Should be from a Variable().
   9323 //	gradient_squared_accumulator: Should be from a Variable().
   9324 //	grad: The gradient.
   9325 //	indices: A vector of indices into the first dimension of var and accum.
   9326 //	lr: Learning rate. Must be a scalar.
   9327 //	l1: L1 regularization. Must be a scalar.
   9328 //	l2: L2 regularization. Must be a scalar.
   9329 //	global_step: Training step number. Must be a scalar.
   9330 //
   9331 // Returns the created operation.
   9332 func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation) {
   9333 	if scope.Err() != nil {
   9334 		return
   9335 	}
   9336 	attrs := map[string]interface{}{}
   9337 	for _, a := range optional {
   9338 		a(attrs)
   9339 	}
   9340 	opspec := tf.OpSpec{
   9341 		Type: "ResourceSparseApplyAdagradDA",
   9342 		Input: []tf.Input{
   9343 			var_, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step,
   9344 		},
   9345 		Attrs: attrs,
   9346 	}
   9347 	return scope.AddOperation(opspec)
   9348 }
   9349 
   9350 // Returns the truth value of NOT x element-wise.
   9351 func LogicalNot(scope *Scope, x tf.Output) (y tf.Output) {
   9352 	if scope.Err() != nil {
   9353 		return
   9354 	}
   9355 	opspec := tf.OpSpec{
   9356 		Type: "LogicalNot",
   9357 		Input: []tf.Input{
   9358 			x,
   9359 		},
   9360 	}
   9361 	op := scope.AddOperation(opspec)
   9362 	return op.Output(0)
   9363 }
   9364 
   9365 // 3D real-valued fast Fourier transform.
   9366 //
   9367 // Computes the 3-dimensional discrete Fourier transform of a real-valued signal
   9368 // over the inner-most 3 dimensions of `input`.
   9369 //
   9370 // Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
   9371 // `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
   9372 // of `output`: the zero-frequency term, followed by the `fft_length / 2`
   9373 // positive-frequency terms.
   9374 //
   9375 // Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
   9376 // corresponding dimension of `input`, the dimension is cropped. If it is larger,
   9377 // the dimension is padded with zeros.
   9378 //
   9379 // Arguments:
   9380 //	input: A float32 tensor.
   9381 //	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
   9382 //
   9383 // Returns A complex64 tensor of the same rank as `input`. The inner-most 3
   9384 //   dimensions of `input` are replaced with the their 3D Fourier transform. The
   9385 //   inner-most dimension contains `fft_length / 2 + 1` unique frequency
   9386 //   components.
   9387 //
   9388 // @compatibility(numpy)
   9389 // Equivalent to np.fft.rfftn with 3 dimensions.
   9390 // @end_compatibility
   9391 func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
   9392 	if scope.Err() != nil {
   9393 		return
   9394 	}
   9395 	opspec := tf.OpSpec{
   9396 		Type: "RFFT3D",
   9397 		Input: []tf.Input{
   9398 			input, fft_length,
   9399 		},
   9400 	}
   9401 	op := scope.AddOperation(opspec)
   9402 	return op.Output(0)
   9403 }
   9404 
   9405 // TensorArrayV3Attr is an optional argument to TensorArrayV3.
   9406 type TensorArrayV3Attr func(optionalAttr)
   9407 
   9408 // TensorArrayV3ElementShape sets the optional element_shape attribute to value.
   9409 //
   9410 // value: The expected shape of an element, if known. Used to
   9411 // validate the shapes of TensorArray elements. If this shape is not
   9412 // fully specified, gathering zero-size TensorArrays is an error.
   9413 // If not specified, defaults to <unknown_rank:true >
   9414 func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr {
   9415 	return func(m optionalAttr) {
   9416 		m["element_shape"] = value
   9417 	}
   9418 }
   9419 
   9420 // TensorArrayV3DynamicSize sets the optional dynamic_size attribute to value.
   9421 //
   9422 // value: A boolean that determines whether writes to the TensorArray
   9423 // are allowed to grow the size.  By default, this is not allowed.
   9424 // If not specified, defaults to false
   9425 func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr {
   9426 	return func(m optionalAttr) {
   9427 		m["dynamic_size"] = value
   9428 	}
   9429 }
   9430 
   9431 // TensorArrayV3ClearAfterRead sets the optional clear_after_read attribute to value.
   9432 //
   9433 // value: If true (default), Tensors in the TensorArray are cleared
   9434 // after being read.  This disables multiple read semantics but allows early
   9435 // release of memory.
   9436 // If not specified, defaults to true
   9437 func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr {
   9438 	return func(m optionalAttr) {
   9439 		m["clear_after_read"] = value
   9440 	}
   9441 }
   9442 
   9443 // TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value.
   9444 //
   9445 // value: If true (default is false), then all
   9446 // elements in the TensorArray will be expected to have have identical shapes.
   9447 // This allows certain behaviors, like dynamically checking for
   9448 // consistent shapes on write, and being able to fill in properly
   9449 // shaped zero tensors on stack -- even if the element_shape attribute
   9450 // is not fully defined.
   9451 // If not specified, defaults to false
   9452 func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr {
   9453 	return func(m optionalAttr) {
   9454 		m["identical_element_shapes"] = value
   9455 	}
   9456 }
   9457 
   9458 // TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value.
   9459 //
   9460 // value: Overrides the name used for the temporary tensor_array
   9461 // resource. Default value is the name of the 'TensorArray' op (which
   9462 // is guaranteed unique).
   9463 // If not specified, defaults to ""
   9464 func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr {
   9465 	return func(m optionalAttr) {
   9466 		m["tensor_array_name"] = value
   9467 	}
   9468 }
   9469 
   9470 // An array of Tensors of given size.
   9471 //
   9472 // Write data via Write and read via Read or Pack.
   9473 //
   9474 // Arguments:
   9475 //	size: The size of the array.
   9476 //	dtype: The type of the elements on the tensor_array.
   9477 //
   9478 // Returns The handle to the TensorArray.A scalar used to control gradient flow.
   9479 func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output) {
   9480 	if scope.Err() != nil {
   9481 		return
   9482 	}
   9483 	attrs := map[string]interface{}{"dtype": dtype}
   9484 	for _, a := range optional {
   9485 		a(attrs)
   9486 	}
   9487 	opspec := tf.OpSpec{
   9488 		Type: "TensorArrayV3",
   9489 		Input: []tf.Input{
   9490 			size,
   9491 		},
   9492 		Attrs: attrs,
   9493 	}
   9494 	op := scope.AddOperation(opspec)
   9495 	return op.Output(0), op.Output(1)
   9496 }
   9497 
   9498 // MaxPool3DAttr is an optional argument to MaxPool3D.
   9499 type MaxPool3DAttr func(optionalAttr)
   9500 
   9501 // MaxPool3DDataFormat sets the optional data_format attribute to value.
   9502 //
   9503 // value: The data format of the input and output data. With the
   9504 // default format "NDHWC", the data is stored in the order of:
   9505 //     [batch, in_depth, in_height, in_width, in_channels].
   9506 // Alternatively, the format could be "NCDHW", the data storage order is:
   9507 //     [batch, in_channels, in_depth, in_height, in_width].
   9508 // If not specified, defaults to "NDHWC"
   9509 func MaxPool3DDataFormat(value string) MaxPool3DAttr {
   9510 	return func(m optionalAttr) {
   9511 		m["data_format"] = value
   9512 	}
   9513 }
   9514 
   9515 // Performs 3D max pooling on the input.
   9516 //
   9517 // Arguments:
   9518 //	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
   9519 //	ksize: 1-D tensor of length 5. The size of the window for each dimension of
   9520 // the input tensor. Must have `ksize[0] = ksize[4] = 1`.
   9521 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   9522 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   9523 //	padding: The type of padding algorithm to use.
   9524 //
   9525 // Returns The max pooled output tensor.
   9526 func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DAttr) (output tf.Output) {
   9527 	if scope.Err() != nil {
   9528 		return
   9529 	}
   9530 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   9531 	for _, a := range optional {
   9532 		a(attrs)
   9533 	}
   9534 	opspec := tf.OpSpec{
   9535 		Type: "MaxPool3D",
   9536 		Input: []tf.Input{
   9537 			input,
   9538 		},
   9539 		Attrs: attrs,
   9540 	}
   9541 	op := scope.AddOperation(opspec)
   9542 	return op.Output(0)
   9543 }
   9544 
   9545 // Computes the gradients of 3-D convolution with respect to the input.
   9546 //
   9547 // DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2
   9548 //
   9549 // Arguments:
   9550 //	input: Shape `[batch, depth, rows, cols, in_channels]`.
   9551 //	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
   9552 // `in_channels` must match between `input` and `filter`.
   9553 //	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
   9554 // out_channels]`.
   9555 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   9556 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   9557 //	padding: The type of padding algorithm to use.
   9558 func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string) (output tf.Output) {
   9559 	if scope.Err() != nil {
   9560 		return
   9561 	}
   9562 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   9563 	opspec := tf.OpSpec{
   9564 		Type: "Conv3DBackpropInput",
   9565 		Input: []tf.Input{
   9566 			input, filter, out_backprop,
   9567 		},
   9568 		Attrs: attrs,
   9569 	}
   9570 	op := scope.AddOperation(opspec)
   9571 	return op.Output(0)
   9572 }
   9573 
   9574 // Inverse 2D fast Fourier transform.
   9575 //
   9576 // Computes the inverse 2-dimensional discrete Fourier transform over the
   9577 // inner-most 2 dimensions of `input`.
   9578 //
   9579 // Arguments:
   9580 //	input: A complex64 tensor.
   9581 //
   9582 // Returns A complex64 tensor of the same shape as `input`. The inner-most 2
   9583 //   dimensions of `input` are replaced with their inverse 2D Fourier transform.
   9584 //
   9585 // @compatibility(numpy)
   9586 // Equivalent to np.fft.ifft2
   9587 // @end_compatibility
   9588 func IFFT2D(scope *Scope, input tf.Output) (output tf.Output) {
   9589 	if scope.Err() != nil {
   9590 		return
   9591 	}
   9592 	opspec := tf.OpSpec{
   9593 		Type: "IFFT2D",
   9594 		Input: []tf.Input{
   9595 			input,
   9596 		},
   9597 	}
   9598 	op := scope.AddOperation(opspec)
   9599 	return op.Output(0)
   9600 }
   9601 
   9602 // Creates a tensor filled with a scalar value.
   9603 //
   9604 // This operation creates a tensor of shape `dims` and fills it with `value`.
   9605 //
   9606 // For example:
   9607 //
   9608 // ```
   9609 // # Output tensor has shape [2, 3].
   9610 // fill([2, 3], 9) ==> [[9, 9, 9]
   9611 //                      [9, 9, 9]]
   9612 // ```
   9613 //
   9614 // Arguments:
   9615 //	dims: 1-D. Represents the shape of the output tensor.
   9616 //	value: 0-D (scalar). Value to fill the returned tensor.
   9617 //
   9618 // @compatibility(numpy)
   9619 // Equivalent to np.full
   9620 // @end_compatibility
   9621 func Fill(scope *Scope, dims tf.Output, value tf.Output) (output tf.Output) {
   9622 	if scope.Err() != nil {
   9623 		return
   9624 	}
   9625 	opspec := tf.OpSpec{
   9626 		Type: "Fill",
   9627 		Input: []tf.Input{
   9628 			dims, value,
   9629 		},
   9630 	}
   9631 	op := scope.AddOperation(opspec)
   9632 	return op.Output(0)
   9633 }
   9634 
   9635 // 2D fast Fourier transform.
   9636 //
   9637 // Computes the 2-dimensional discrete Fourier transform over the inner-most
   9638 // 2 dimensions of `input`.
   9639 //
   9640 // Arguments:
   9641 //	input: A complex64 tensor.
   9642 //
   9643 // Returns A complex64 tensor of the same shape as `input`. The inner-most 2
   9644 //   dimensions of `input` are replaced with their 2D Fourier transform.
   9645 //
   9646 // @compatibility(numpy)
   9647 // Equivalent to np.fft.fft2
   9648 // @end_compatibility
   9649 func FFT2D(scope *Scope, input tf.Output) (output tf.Output) {
   9650 	if scope.Err() != nil {
   9651 		return
   9652 	}
   9653 	opspec := tf.OpSpec{
   9654 		Type: "FFT2D",
   9655 		Input: []tf.Input{
   9656 			input,
   9657 		},
   9658 	}
   9659 	op := scope.AddOperation(opspec)
   9660 	return op.Output(0)
   9661 }
   9662 
   9663 // ResourceApplyProximalGradientDescentAttr is an optional argument to ResourceApplyProximalGradientDescent.
   9664 type ResourceApplyProximalGradientDescentAttr func(optionalAttr)
   9665 
   9666 // ResourceApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
   9667 //
   9668 // value: If True, the subtraction will be protected by a lock;
   9669 // otherwise the behavior is undefined, but may exhibit less contention.
   9670 // If not specified, defaults to false
   9671 func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyProximalGradientDescentAttr {
   9672 	return func(m optionalAttr) {
   9673 		m["use_locking"] = value
   9674 	}
   9675 }
   9676 
   9677 // Update '*var' as FOBOS algorithm with fixed learning rate.
   9678 //
   9679 // prox_v = var - alpha * delta
   9680 // var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
   9681 //
   9682 // Arguments:
   9683 //	var_: Should be from a Variable().
   9684 //	alpha: Scaling factor. Must be a scalar.
   9685 //	l1: L1 regularization. Must be a scalar.
   9686 //	l2: L2 regularization. Must be a scalar.
   9687 //	delta: The change.
   9688 //
   9689 // Returns the created operation.
   9690 func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation) {
   9691 	if scope.Err() != nil {
   9692 		return
   9693 	}
   9694 	attrs := map[string]interface{}{}
   9695 	for _, a := range optional {
   9696 		a(attrs)
   9697 	}
   9698 	opspec := tf.OpSpec{
   9699 		Type: "ResourceApplyProximalGradientDescent",
   9700 		Input: []tf.Input{
   9701 			var_, alpha, l1, l2, delta,
   9702 		},
   9703 		Attrs: attrs,
   9704 	}
   9705 	return scope.AddOperation(opspec)
   9706 }
   9707 
   9708 // Computes the gradient for the sqrt of `x` wrt its input.
   9709 //
   9710 // Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
   9711 // is the corresponding input gradient.
   9712 func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
   9713 	if scope.Err() != nil {
   9714 		return
   9715 	}
   9716 	opspec := tf.OpSpec{
   9717 		Type: "SqrtGrad",
   9718 		Input: []tf.Input{
   9719 			y, dy,
   9720 		},
   9721 	}
   9722 	op := scope.AddOperation(opspec)
   9723 	return op.Output(0)
   9724 }
   9725 
   9726 // Get the value of the tensor specified by its handle.
   9727 //
   9728 // Arguments:
   9729 //	handle: The handle for a tensor stored in the session state.
   9730 //	dtype: The type of the output value.
   9731 //
   9732 // Returns The tensor for the given handle.
   9733 func GetSessionTensor(scope *Scope, handle tf.Output, dtype tf.DataType) (value tf.Output) {
   9734 	if scope.Err() != nil {
   9735 		return
   9736 	}
   9737 	attrs := map[string]interface{}{"dtype": dtype}
   9738 	opspec := tf.OpSpec{
   9739 		Type: "GetSessionTensor",
   9740 		Input: []tf.Input{
   9741 			handle,
   9742 		},
   9743 		Attrs: attrs,
   9744 	}
   9745 	op := scope.AddOperation(opspec)
   9746 	return op.Output(0)
   9747 }
   9748 
   9749 // Returns x - y element-wise.
   9750 //
   9751 // *NOTE*: `Subtract` supports broadcasting. More about broadcasting
   9752 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   9753 func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   9754 	if scope.Err() != nil {
   9755 		return
   9756 	}
   9757 	opspec := tf.OpSpec{
   9758 		Type: "Sub",
   9759 		Input: []tf.Input{
   9760 			x, y,
   9761 		},
   9762 	}
   9763 	op := scope.AddOperation(opspec)
   9764 	return op.Output(0)
   9765 }
   9766 
   9767 // Computes softmax cross entropy cost and gradients to backpropagate.
   9768 //
   9769 // Inputs are the logits, not probabilities.
   9770 //
   9771 // Arguments:
   9772 //	features: batch_size x num_classes matrix
   9773 //	labels: batch_size x num_classes matrix
   9774 // The caller must ensure that each batch of labels represents a valid
   9775 // probability distribution.
   9776 //
   9777 // Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
   9778 func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
   9779 	if scope.Err() != nil {
   9780 		return
   9781 	}
   9782 	opspec := tf.OpSpec{
   9783 		Type: "SoftmaxCrossEntropyWithLogits",
   9784 		Input: []tf.Input{
   9785 			features, labels,
   9786 		},
   9787 	}
   9788 	op := scope.AddOperation(opspec)
   9789 	return op.Output(0), op.Output(1)
   9790 }
   9791 
   9792 // ReduceJoinAttr is an optional argument to ReduceJoin.
   9793 type ReduceJoinAttr func(optionalAttr)
   9794 
   9795 // ReduceJoinKeepDims sets the optional keep_dims attribute to value.
   9796 //
   9797 // value: If `True`, retain reduced dimensions with length `1`.
   9798 // If not specified, defaults to false
   9799 func ReduceJoinKeepDims(value bool) ReduceJoinAttr {
   9800 	return func(m optionalAttr) {
   9801 		m["keep_dims"] = value
   9802 	}
   9803 }
   9804 
   9805 // ReduceJoinSeparator sets the optional separator attribute to value.
   9806 //
   9807 // value: The separator to use when joining.
   9808 // If not specified, defaults to ""
   9809 func ReduceJoinSeparator(value string) ReduceJoinAttr {
   9810 	return func(m optionalAttr) {
   9811 		m["separator"] = value
   9812 	}
   9813 }
   9814 
   9815 // Joins a string Tensor across the given dimensions.
   9816 //
   9817 // Computes the string join across dimensions in the given string Tensor of shape
   9818 // `[d_0, d_1, ..., d_n-1]`.  Returns a new Tensor created by joining the input
   9819 // strings with the given separator (default: empty string).  Negative indices are
   9820 // counted backwards from the end, with `-1` being equivalent to `n - 1`.
   9821 //
   9822 // For example:
   9823 //
   9824 // ```python
   9825 // # tensor `a` is [["a", "b"], ["c", "d"]]
   9826 // tf.reduce_join(a, 0) ==> ["ac", "bd"]
   9827 // tf.reduce_join(a, 1) ==> ["ab", "cd"]
   9828 // tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
   9829 // tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
   9830 // tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
   9831 // tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
   9832 // tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
   9833 // tf.reduce_join(a, [0, 1]) ==> ["acbd"]
   9834 // tf.reduce_join(a, [1, 0]) ==> ["abcd"]
   9835 // tf.reduce_join(a, []) ==> ["abcd"]
   9836 // ```
   9837 //
   9838 // Arguments:
   9839 //	inputs: The input to be joined.  All reduced indices must have non-zero size.
   9840 //	reduction_indices: The dimensions to reduce over.  Dimensions are reduced in the
   9841 // order specified.  Omitting `reduction_indices` is equivalent to passing
   9842 // `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
   9843 //
   9844 // Returns Has shape equal to that of the input with reduced dimensions removed or
   9845 // set to `1` depending on `keep_dims`.
   9846 func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, optional ...ReduceJoinAttr) (output tf.Output) {
   9847 	if scope.Err() != nil {
   9848 		return
   9849 	}
   9850 	attrs := map[string]interface{}{}
   9851 	for _, a := range optional {
   9852 		a(attrs)
   9853 	}
   9854 	opspec := tf.OpSpec{
   9855 		Type: "ReduceJoin",
   9856 		Input: []tf.Input{
   9857 			inputs, reduction_indices,
   9858 		},
   9859 		Attrs: attrs,
   9860 	}
   9861 	op := scope.AddOperation(opspec)
   9862 	return op.Output(0)
   9863 }
   9864 
   9865 // Computes cos of x element-wise.
   9866 func Cos(scope *Scope, x tf.Output) (y tf.Output) {
   9867 	if scope.Err() != nil {
   9868 		return
   9869 	}
   9870 	opspec := tf.OpSpec{
   9871 		Type: "Cos",
   9872 		Input: []tf.Input{
   9873 			x,
   9874 		},
   9875 	}
   9876 	op := scope.AddOperation(opspec)
   9877 	return op.Output(0)
   9878 }
   9879 
   9880 // FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
   9881 type FusedBatchNormGradAttr func(optionalAttr)
   9882 
   9883 // FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
   9884 //
   9885 // value: A small float number added to the variance of x.
   9886 // If not specified, defaults to 0.0001
   9887 func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
   9888 	return func(m optionalAttr) {
   9889 		m["epsilon"] = value
   9890 	}
   9891 }
   9892 
   9893 // FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
   9894 //
   9895 // value: The data format for y_backprop, x, x_backprop.
   9896 // Either "NHWC" (default) or "NCHW".
   9897 // If not specified, defaults to "NHWC"
   9898 func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
   9899 	return func(m optionalAttr) {
   9900 		m["data_format"] = value
   9901 	}
   9902 }
   9903 
   9904 // FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
   9905 //
   9906 // value: A bool value to indicate the operation is for training (default)
   9907 // or inference.
   9908 // If not specified, defaults to true
   9909 func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
   9910 	return func(m optionalAttr) {
   9911 		m["is_training"] = value
   9912 	}
   9913 }
   9914 
   9915 // Gradient for batch normalization.
   9916 //
   9917 // Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
   9918 // The size of 1D Tensors matches the dimension C of the 4D Tensors.
   9919 //
   9920 // Arguments:
   9921 //	y_backprop: A 4D Tensor for the gradient with respect to y.
   9922 //	x: A 4D Tensor for input data.
   9923 //	scale: A 1D Tensor for scaling factor, to scale the normalized x.
   9924 //	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
   9925 // mean to be reused in gradient computation. When is_training is
   9926 // False, a 1D Tensor for the population mean to be reused in both
   9927 // 1st and 2nd order gradient computation.
   9928 //	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
   9929 // variance (inverted variance in the cuDNN case) to be reused in
   9930 // gradient computation. When is_training is False, a 1D Tensor
   9931 // for the population variance to be reused in both 1st and 2nd
   9932 // order gradient computation.
   9933 //
   9934 // Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
   9935 // in FusedBatchNorm.
   9936 func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
   9937 	if scope.Err() != nil {
   9938 		return
   9939 	}
   9940 	attrs := map[string]interface{}{}
   9941 	for _, a := range optional {
   9942 		a(attrs)
   9943 	}
   9944 	opspec := tf.OpSpec{
   9945 		Type: "FusedBatchNormGrad",
   9946 		Input: []tf.Input{
   9947 			y_backprop, x, scale, reserve_space_1, reserve_space_2,
   9948 		},
   9949 		Attrs: attrs,
   9950 	}
   9951 	op := scope.AddOperation(opspec)
   9952 	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
   9953 }
   9954 
   9955 // TopKAttr is an optional argument to TopK.
   9956 type TopKAttr func(optionalAttr)
   9957 
   9958 // TopKSorted sets the optional sorted attribute to value.
   9959 //
   9960 // value: If true the resulting `k` elements will be sorted by the values in
   9961 // descending order.
   9962 // If not specified, defaults to true
   9963 func TopKSorted(value bool) TopKAttr {
   9964 	return func(m optionalAttr) {
   9965 		m["sorted"] = value
   9966 	}
   9967 }
   9968 
   9969 // Finds values and indices of the `k` largest elements for the last dimension.
   9970 //
   9971 // DEPRECATED at GraphDef version 7: Use TopKV2 instead
   9972 //
   9973 // If the input is a vector (rank-1), finds the `k` largest entries in the vector
   9974 // and outputs their values and indices as vectors.  Thus `values[j]` is the
   9975 // `j`-th largest entry in `input`, and its index is `indices[j]`.
   9976 //
   9977 // For matrices (resp. higher rank input), computes the top `k` entries in each
   9978 // row (resp. vector along the last dimension).  Thus,
   9979 //
   9980 //     values.shape = indices.shape = input.shape[:-1] + [k]
   9981 //
   9982 // If two elements are equal, the lower-index element appears first.
   9983 //
   9984 // If `k` varies dynamically, use `TopKV2` below.
   9985 //
   9986 // Arguments:
   9987 //	input: 1-D or higher with last dimension at least `k`.
   9988 //	k: Number of top elements to look for along the last dimension (along each
   9989 // row for matrices).
   9990 //
   9991 // Returns The `k` largest elements along each last dimensional slice.The indices of `values` within the last dimension of `input`.
   9992 func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output) {
   9993 	if scope.Err() != nil {
   9994 		return
   9995 	}
   9996 	attrs := map[string]interface{}{"k": k}
   9997 	for _, a := range optional {
   9998 		a(attrs)
   9999 	}
   10000 	opspec := tf.OpSpec{
   10001 		Type: "TopK",
   10002 		Input: []tf.Input{
   10003 			input,
   10004 		},
   10005 		Attrs: attrs,
   10006 	}
   10007 	op := scope.AddOperation(opspec)
   10008 	return op.Output(0), op.Output(1)
   10009 }
   10010 
   10011 // Transforms a Tensor into a serialized TensorProto proto.
   10012 //
   10013 // Arguments:
   10014 //	tensor: A Tensor of type `T`.
   10015 //
   10016 // Returns A serialized TensorProto proto of the input tensor.
   10017 func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output) {
   10018 	if scope.Err() != nil {
   10019 		return
   10020 	}
   10021 	opspec := tf.OpSpec{
   10022 		Type: "SerializeTensor",
   10023 		Input: []tf.Input{
   10024 			tensor,
   10025 		},
   10026 	}
   10027 	op := scope.AddOperation(opspec)
   10028 	return op.Output(0)
   10029 }
   10030 
   10031 // MatrixSolveAttr is an optional argument to MatrixSolve.
   10032 type MatrixSolveAttr func(optionalAttr)
   10033 
   10034 // MatrixSolveAdjoint sets the optional adjoint attribute to value.
   10035 //
   10036 // value: Boolean indicating whether to solve with `matrix` or its (block-wise)
   10037 // adjoint.
   10038 // If not specified, defaults to false
   10039 func MatrixSolveAdjoint(value bool) MatrixSolveAttr {
   10040 	return func(m optionalAttr) {
   10041 		m["adjoint"] = value
   10042 	}
   10043 }
   10044 
   10045 // Solves systems of linear equations.
   10046 //
   10047 // `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
   10048 // form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
   10049 // a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
   10050 // satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
   10051 // If `adjoint` is `True` then each output matrix satisfies
   10052 // `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
   10053 //
   10054 // Arguments:
   10055 //	matrix: Shape is `[..., M, M]`.
   10056 //	rhs: Shape is `[..., M, K]`.
   10057 //
   10058 // Returns Shape is `[..., M, K]`.
   10059 func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output) {
   10060 	if scope.Err() != nil {
   10061 		return
   10062 	}
   10063 	attrs := map[string]interface{}{}
   10064 	for _, a := range optional {
   10065 		a(attrs)
   10066 	}
   10067 	opspec := tf.OpSpec{
   10068 		Type: "MatrixSolve",
   10069 		Input: []tf.Input{
   10070 			matrix, rhs,
   10071 		},
   10072 		Attrs: attrs,
   10073 	}
   10074 	op := scope.AddOperation(opspec)
   10075 	return op.Output(0)
   10076 }
   10077 
   10078 // Looks up keys in a table, outputs the corresponding values.
   10079 //
   10080 // The tensor `keys` must of the same type as the keys of the table.
   10081 // The output `values` is of the type of the table values.
   10082 //
   10083 // The scalar `default_value` is the value output for keys not present in the
   10084 // table. It must also be of the same type as the table values.
   10085 //
   10086 // Arguments:
   10087 //	table_handle: Handle to the table.
   10088 //	keys: Any shape.  Keys to look up.
   10089 //
   10090 //
   10091 // Returns Same shape as `keys`.  Values found in the table, or `default_values`
   10092 // for missing keys.
   10093 func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output) {
   10094 	if scope.Err() != nil {
   10095 		return
   10096 	}
   10097 	opspec := tf.OpSpec{
   10098 		Type: "LookupTableFindV2",
   10099 		Input: []tf.Input{
   10100 			table_handle, keys, default_value,
   10101 		},
   10102 	}
   10103 	op := scope.AddOperation(opspec)
   10104 	return op.Output(0)
   10105 }
   10106 
   10107 // Inverse 3D fast Fourier transform.
   10108 //
   10109 // Computes the inverse 3-dimensional discrete Fourier transform over the
   10110 // inner-most 3 dimensions of `input`.
   10111 //
   10112 // Arguments:
   10113 //	input: A complex64 tensor.
   10114 //
   10115 // Returns A complex64 tensor of the same shape as `input`. The inner-most 3
   10116 //   dimensions of `input` are replaced with their inverse 3D Fourier transform.
   10117 //
   10118 // @compatibility(numpy)
   10119 // Equivalent to np.fft.ifftn with 3 dimensions.
   10120 // @end_compatibility
   10121 func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
   10122 	if scope.Err() != nil {
   10123 		return
   10124 	}
   10125 	opspec := tf.OpSpec{
   10126 		Type: "IFFT3D",
   10127 		Input: []tf.Input{
   10128 			input,
   10129 		},
   10130 	}
   10131 	op := scope.AddOperation(opspec)
   10132 	return op.Output(0)
   10133 }
   10134 
   10135 // Adds `bias` to `value`.
   10136 //
   10137 // This is a deprecated version of BiasAdd and will be soon removed.
   10138 //
   10139 // This is a special case of `tf.add` where `bias` is restricted to be 1-D.
   10140 // Broadcasting is supported, so `value` may have any number of dimensions.
   10141 //
   10142 // Arguments:
   10143 //	value: Any number of dimensions.
   10144 //	bias: 1-D with size the last dimension of `value`.
   10145 //
   10146 // Returns Broadcasted sum of `value` and `bias`.
   10147 func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output) {
   10148 	if scope.Err() != nil {
   10149 		return
   10150 	}
   10151 	opspec := tf.OpSpec{
   10152 		Type: "BiasAddV1",
   10153 		Input: []tf.Input{
   10154 			value, bias,
   10155 		},
   10156 	}
   10157 	op := scope.AddOperation(opspec)
   10158 	return op.Output(0)
   10159 }
   10160 
   10161 // Reverses specific dimensions of a tensor.
   10162 //
   10163 // NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
   10164 // `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
   10165 //
   10166 // Given a `tensor`, and a `int32` tensor `axis` representing the set of
   10167 // dimensions of `tensor` to reverse. This operation reverses each dimension
   10168 // `i` for which there exists `j` s.t. `axis[j] == i`.
   10169 //
   10170 // `tensor` can have up to 8 dimensions. The number of dimensions specified
   10171 // in `axis` may be 0 or more entries. If an index is specified more than
   10172 // once, a InvalidArgument error is raised.
   10173 //
   10174 // For example:
   10175 //
   10176 // ```
   10177 // # tensor 't' is [[[[ 0,  1,  2,  3],
   10178 // #                  [ 4,  5,  6,  7],
   10179 // #                  [ 8,  9, 10, 11]],
   10180 // #                 [[12, 13, 14, 15],
   10181 // #                  [16, 17, 18, 19],
   10182 // #                  [20, 21, 22, 23]]]]
   10183 // # tensor 't' shape is [1, 2, 3, 4]
   10184 //
   10185 // # 'dims' is [3] or 'dims' is [-1]
   10186 // reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
   10187 //                         [ 7,  6,  5,  4],
   10188 //                         [ 11, 10, 9, 8]],
   10189 //                        [[15, 14, 13, 12],
   10190 //                         [19, 18, 17, 16],
   10191 //                         [23, 22, 21, 20]]]]
   10192 //
   10193 // # 'dims' is '[1]' (or 'dims' is '[-3]')
   10194 // reverse(t, dims) ==> [[[[12, 13, 14, 15],
   10195 //                         [16, 17, 18, 19],
   10196 //                         [20, 21, 22, 23]
   10197 //                        [[ 0,  1,  2,  3],
   10198 //                         [ 4,  5,  6,  7],
   10199 //                         [ 8,  9, 10, 11]]]]
   10200 //
   10201 // # 'dims' is '[2]' (or 'dims' is '[-2]')
   10202 // reverse(t, dims) ==> [[[[8, 9, 10, 11],
   10203 //                         [4, 5, 6, 7],
   10204 //                         [0, 1, 2, 3]]
   10205 //                        [[20, 21, 22, 23],
   10206 //                         [16, 17, 18, 19],
   10207 //                         [12, 13, 14, 15]]]]
   10208 // ```
   10209 //
   10210 // Arguments:
   10211 //	tensor: Up to 8-D.
   10212 //	axis: 1-D. The indices of the dimensions to reverse. Must be in the range
   10213 // `[-rank(tensor), rank(tensor))`.
   10214 //
   10215 // Returns The same shape as `tensor`.
   10216 func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {
   10217 	if scope.Err() != nil {
   10218 		return
   10219 	}
   10220 	opspec := tf.OpSpec{
   10221 		Type: "ReverseV2",
   10222 		Input: []tf.Input{
   10223 			tensor, axis,
   10224 		},
   10225 	}
   10226 	op := scope.AddOperation(opspec)
   10227 	return op.Output(0)
   10228 }
   10229 
   10230 // RealAttr is an optional argument to Real.
   10231 type RealAttr func(optionalAttr)
   10232 
   10233 // RealTout sets the optional Tout attribute to value.
   10234 // If not specified, defaults to DT_FLOAT
   10235 func RealTout(value tf.DataType) RealAttr {
   10236 	return func(m optionalAttr) {
   10237 		m["Tout"] = value
   10238 	}
   10239 }
   10240 
   10241 // Returns the real part of a complex number.
   10242 //
   10243 // Given a tensor `input` of complex numbers, this operation returns a tensor of
   10244 // type `float` that is the real part of each element in `input`. All elements in
   10245 // `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
   10246 //  part returned by this operation and *b* is the imaginary part.
   10247 //
   10248 // For example:
   10249 //
   10250 // ```
   10251 // # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
   10252 // tf.real(input) ==> [-2.25, 3.25]
   10253 // ```
   10254 func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output) {
   10255 	if scope.Err() != nil {
   10256 		return
   10257 	}
   10258 	attrs := map[string]interface{}{}
   10259 	for _, a := range optional {
   10260 		a(attrs)
   10261 	}
   10262 	opspec := tf.OpSpec{
   10263 		Type: "Real",
   10264 		Input: []tf.Input{
   10265 			input,
   10266 		},
   10267 		Attrs: attrs,
   10268 	}
   10269 	op := scope.AddOperation(opspec)
   10270 	return op.Output(0)
   10271 }
   10272 
   10273 // AudioSummaryAttr is an optional argument to AudioSummary.
   10274 type AudioSummaryAttr func(optionalAttr)
   10275 
   10276 // AudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
   10277 //
   10278 // value: Max number of batch elements to generate audio for.
   10279 // If not specified, defaults to 3
   10280 //
   10281 // REQUIRES: value >= 1
   10282 func AudioSummaryMaxOutputs(value int64) AudioSummaryAttr {
   10283 	return func(m optionalAttr) {
   10284 		m["max_outputs"] = value
   10285 	}
   10286 }
   10287 
   10288 // Outputs a `Summary` protocol buffer with audio.
   10289 //
   10290 // DEPRECATED at GraphDef version 15: Use AudioSummaryV2.
   10291 //
   10292 // The summary has up to `max_outputs` summary values containing audio. The
   10293 // audio is built from `tensor` which must be 3-D with shape `[batch_size,
   10294 // frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
   10295 // assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
   10296 //
   10297 // The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
   10298 // build the `tag` of the summary values:
   10299 //
   10300 // *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
   10301 // *  If `max_outputs` is greater than 1, the summary value tags are
   10302 //    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
   10303 //
   10304 // Arguments:
   10305 //	tag: Scalar. Used to build the `tag` attribute of the summary values.
   10306 //	tensor: 2-D of shape `[batch_size, frames]`.
   10307 //	sample_rate: The sample rate of the signal in hertz.
   10308 //
   10309 // Returns Scalar. Serialized `Summary` protocol buffer.
   10310 func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, optional ...AudioSummaryAttr) (summary tf.Output) {
   10311 	if scope.Err() != nil {
   10312 		return
   10313 	}
   10314 	attrs := map[string]interface{}{"sample_rate": sample_rate}
   10315 	for _, a := range optional {
   10316 		a(attrs)
   10317 	}
   10318 	opspec := tf.OpSpec{
   10319 		Type: "AudioSummary",
   10320 		Input: []tf.Input{
   10321 			tag, tensor,
   10322 		},
   10323 		Attrs: attrs,
   10324 	}
   10325 	op := scope.AddOperation(opspec)
   10326 	return op.Output(0)
   10327 }
   10328 
   10329 // QrAttr is an optional argument to Qr.
   10330 type QrAttr func(optionalAttr)
   10331 
   10332 // QrFullMatrices sets the optional full_matrices attribute to value.
   10333 //
   10334 // value: If true, compute full-sized `q` and `r`. If false
   10335 // (the default), compute only the leading `P` columns of `q`.
   10336 // If not specified, defaults to false
   10337 func QrFullMatrices(value bool) QrAttr {
   10338 	return func(m optionalAttr) {
   10339 		m["full_matrices"] = value
   10340 	}
   10341 }
   10342 
   10343 // Computes the QR decompositions of one or more matrices.
   10344 //
   10345 // Computes the QR decomposition of each inner matrix in `tensor` such that
   10346 // `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
   10347 //
   10348 // ```python
   10349 // # a is a tensor.
   10350 // # q is a tensor of orthonormal matrices.
   10351 // # r is a tensor of upper triangular matrices.
   10352 // q, r = qr(a)
   10353 // q_full, r_full = qr(a, full_matrices=True)
   10354 // ```
   10355 //
   10356 // Arguments:
   10357 //	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
   10358 // form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
   10359 //
   10360 // Returns Orthonormal basis for range of `a`. If `full_matrices` is `False` then
   10361 // shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
   10362 // `[..., M, M]`.Triangular factor. If `full_matrices` is `False` then shape is
   10363 // `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
   10364 func Qr(scope *Scope, input tf.Output, optional ...QrAttr) (q tf.Output, r tf.Output) {
   10365 	if scope.Err() != nil {
   10366 		return
   10367 	}
   10368 	attrs := map[string]interface{}{}
   10369 	for _, a := range optional {
   10370 		a(attrs)
   10371 	}
   10372 	opspec := tf.OpSpec{
   10373 		Type: "Qr",
   10374 		Input: []tf.Input{
   10375 			input,
   10376 		},
   10377 		Attrs: attrs,
   10378 	}
   10379 	op := scope.AddOperation(opspec)
   10380 	return op.Output(0), op.Output(1)
   10381 }
   10382 
   10383 // Records the bytes size of each element of `input_dataset` in a StatsAggregator.
   10384 func BytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   10385 	if scope.Err() != nil {
   10386 		return
   10387 	}
   10388 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   10389 	opspec := tf.OpSpec{
   10390 		Type: "BytesProducedStatsDataset",
   10391 		Input: []tf.Input{
   10392 			input_dataset, tag,
   10393 		},
   10394 		Attrs: attrs,
   10395 	}
   10396 	op := scope.AddOperation(opspec)
   10397 	return op.Output(0)
   10398 }
   10399 
   10400 // ResourceSparseApplyProximalGradientDescentAttr is an optional argument to ResourceSparseApplyProximalGradientDescent.
   10401 type ResourceSparseApplyProximalGradientDescentAttr func(optionalAttr)
   10402 
   10403 // ResourceSparseApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
   10404 //
   10405 // value: If True, the subtraction will be protected by a lock;
   10406 // otherwise the behavior is undefined, but may exhibit less contention.
   10407 // If not specified, defaults to false
   10408 func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSparseApplyProximalGradientDescentAttr {
   10409 	return func(m optionalAttr) {
   10410 		m["use_locking"] = value
   10411 	}
   10412 }
   10413 
   10414 // Sparse update '*var' as FOBOS algorithm with fixed learning rate.
   10415 //
   10416 // That is for rows we have grad for, we update var as follows:
   10417 // prox_v = var - alpha * grad
   10418 // var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
   10419 //
   10420 // Arguments:
   10421 //	var_: Should be from a Variable().
   10422 //	alpha: Scaling factor. Must be a scalar.
   10423 //	l1: L1 regularization. Must be a scalar.
   10424 //	l2: L2 regularization. Must be a scalar.
   10425 //	grad: The gradient.
   10426 //	indices: A vector of indices into the first dimension of var and accum.
   10427 //
   10428 // Returns the created operation.
   10429 func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation) {
   10430 	if scope.Err() != nil {
   10431 		return
   10432 	}
   10433 	attrs := map[string]interface{}{}
   10434 	for _, a := range optional {
   10435 		a(attrs)
   10436 	}
   10437 	opspec := tf.OpSpec{
   10438 		Type: "ResourceSparseApplyProximalGradientDescent",
   10439 		Input: []tf.Input{
   10440 			var_, alpha, l1, l2, grad, indices,
   10441 		},
   10442 		Attrs: attrs,
   10443 	}
   10444 	return scope.AddOperation(opspec)
   10445 }
   10446 
   10447 // MeanAttr is an optional argument to Mean.
   10448 type MeanAttr func(optionalAttr)
   10449 
   10450 // MeanKeepDims sets the optional keep_dims attribute to value.
   10451 //
   10452 // value: If true, retain reduced dimensions with length 1.
   10453 // If not specified, defaults to false
   10454 func MeanKeepDims(value bool) MeanAttr {
   10455 	return func(m optionalAttr) {
   10456 		m["keep_dims"] = value
   10457 	}
   10458 }
   10459 
   10460 // Computes the mean of elements across dimensions of a tensor.
   10461 //
   10462 // Reduces `input` along the dimensions given in `axis`. Unless
   10463 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   10464 // `axis`. If `keep_dims` is true, the reduced dimensions are
   10465 // retained with length 1.
   10466 //
   10467 // Arguments:
   10468 //	input: The tensor to reduce.
   10469 //	axis: The dimensions to reduce. Must be in the range
   10470 // `[-rank(input), rank(input))`.
   10471 //
   10472 // Returns The reduced tensor.
   10473 func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (output tf.Output) {
   10474 	if scope.Err() != nil {
   10475 		return
   10476 	}
   10477 	attrs := map[string]interface{}{}
   10478 	for _, a := range optional {
   10479 		a(attrs)
   10480 	}
   10481 	opspec := tf.OpSpec{
   10482 		Type: "Mean",
   10483 		Input: []tf.Input{
   10484 			input, axis,
   10485 		},
   10486 		Attrs: attrs,
   10487 	}
   10488 	op := scope.AddOperation(opspec)
   10489 	return op.Output(0)
   10490 }
   10491 
   10492 // InitializeTableFromTextFileV2Attr is an optional argument to InitializeTableFromTextFileV2.
   10493 type InitializeTableFromTextFileV2Attr func(optionalAttr)
   10494 
   10495 // InitializeTableFromTextFileV2VocabSize sets the optional vocab_size attribute to value.
   10496 //
   10497 // value: Number of elements of the file, use -1 if unknown.
   10498 // If not specified, defaults to -1
   10499 //
   10500 // REQUIRES: value >= -1
   10501 func InitializeTableFromTextFileV2VocabSize(value int64) InitializeTableFromTextFileV2Attr {
   10502 	return func(m optionalAttr) {
   10503 		m["vocab_size"] = value
   10504 	}
   10505 }
   10506 
   10507 // InitializeTableFromTextFileV2Delimiter sets the optional delimiter attribute to value.
   10508 //
   10509 // value: Delimiter to separate fields in a line.
   10510 // If not specified, defaults to "\t"
   10511 func InitializeTableFromTextFileV2Delimiter(value string) InitializeTableFromTextFileV2Attr {
   10512 	return func(m optionalAttr) {
   10513 		m["delimiter"] = value
   10514 	}
   10515 }
   10516 
   10517 // Initializes a table from a text file.
   10518 //
   10519 // It inserts one key-value pair into the table for each line of the file.
   10520 // The key and value is extracted from the whole line content, elements from the
   10521 // split line based on `delimiter` or the line number (starting from zero).
   10522 // Where to extract the key and value from a line is specified by `key_index` and
   10523 // `value_index`.
   10524 //
   10525 // - A value of -1 means use the line number(starting from zero), expects `int64`.
   10526 // - A value of -2 means use the whole line content, expects `string`.
   10527 // - A value >= 0 means use the index (starting at zero) of the split line based
   10528 //   on `delimiter`.
   10529 //
   10530 // Arguments:
   10531 //	table_handle: Handle to a table which will be initialized.
   10532 //	filename: Filename of a vocabulary text file.
   10533 //	key_index: Column index in a line to get the table `key` values from.
   10534 //	value_index: Column index that represents information of a line to get the table
   10535 // `value` values from.
   10536 //
   10537 // Returns the created operation.
   10538 func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, value_index int64, optional ...InitializeTableFromTextFileV2Attr) (o *tf.Operation) {
   10539 	if scope.Err() != nil {
   10540 		return
   10541 	}
   10542 	attrs := map[string]interface{}{"key_index": key_index, "value_index": value_index}
   10543 	for _, a := range optional {
   10544 		a(attrs)
   10545 	}
   10546 	opspec := tf.OpSpec{
   10547 		Type: "InitializeTableFromTextFileV2",
   10548 		Input: []tf.Input{
   10549 			table_handle, filename,
   10550 		},
   10551 		Attrs: attrs,
   10552 	}
   10553 	return scope.AddOperation(opspec)
   10554 }
   10555 
   10556 // QuantizedReluAttr is an optional argument to QuantizedRelu.
   10557 type QuantizedReluAttr func(optionalAttr)
   10558 
   10559 // QuantizedReluOutType sets the optional out_type attribute to value.
   10560 // If not specified, defaults to DT_QUINT8
   10561 func QuantizedReluOutType(value tf.DataType) QuantizedReluAttr {
   10562 	return func(m optionalAttr) {
   10563 		m["out_type"] = value
   10564 	}
   10565 }
   10566 
   10567 // Computes Quantized Rectified Linear: `max(features, 0)`
   10568 //
   10569 // Arguments:
   10570 //
   10571 //	min_features: The float value that the lowest quantized value represents.
   10572 //	max_features: The float value that the highest quantized value represents.
   10573 //
   10574 // Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
   10575 func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
   10576 	if scope.Err() != nil {
   10577 		return
   10578 	}
   10579 	attrs := map[string]interface{}{}
   10580 	for _, a := range optional {
   10581 		a(attrs)
   10582 	}
   10583 	opspec := tf.OpSpec{
   10584 		Type: "QuantizedRelu",
   10585 		Input: []tf.Input{
   10586 			features, min_features, max_features,
   10587 		},
   10588 		Attrs: attrs,
   10589 	}
   10590 	op := scope.AddOperation(opspec)
   10591 	return op.Output(0), op.Output(1), op.Output(2)
   10592 }
   10593 
   10594 // Reshapes a SparseTensor to represent values in a new dense shape.
   10595 //
   10596 // This operation has the same semantics as reshape on the represented dense
   10597 // tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
   10598 //
   10599 // If one component of `new_shape` is the special value -1, the size of that
   10600 // dimension is computed so that the total dense size remains constant.  At
   10601 // most one component of `new_shape` can be -1.  The number of dense elements
   10602 // implied by `new_shape` must be the same as the number of dense elements
   10603 // originally implied by `input_shape`.
   10604 //
   10605 // Reshaping does not affect the order of values in the SparseTensor.
   10606 //
   10607 // If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
   10608 // has length `R_out`, then `input_indices` has shape `[N, R_in]`,
   10609 // `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
   10610 // `output_shape` has length `R_out`.
   10611 //
   10612 // Arguments:
   10613 //	input_indices: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
   10614 // SparseTensor.
   10615 //	input_shape: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
   10616 //	new_shape: 1-D.  `R_out` vector with the requested new dense shape.
   10617 //
   10618 // Returns 2-D.  `N x R_out` matrix with the updated indices of non-empty
   10619 // values in the output SparseTensor.1-D.  `R_out` vector with the full dense shape of the output
   10620 // SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
   10621 // filled in.
   10622 func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, new_shape tf.Output) (output_indices tf.Output, output_shape tf.Output) {
   10623 	if scope.Err() != nil {
   10624 		return
   10625 	}
   10626 	opspec := tf.OpSpec{
   10627 		Type: "SparseReshape",
   10628 		Input: []tf.Input{
   10629 			input_indices, input_shape, new_shape,
   10630 		},
   10631 	}
   10632 	op := scope.AddOperation(opspec)
   10633 	return op.Output(0), op.Output(1)
   10634 }
   10635 
   10636 // Deprecated. Use TensorArraySplitV3
   10637 //
   10638 // DEPRECATED at GraphDef version 26: Use TensorArraySplitV3
   10639 func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
   10640 	if scope.Err() != nil {
   10641 		return
   10642 	}
   10643 	opspec := tf.OpSpec{
   10644 		Type: "TensorArraySplitV2",
   10645 		Input: []tf.Input{
   10646 			handle, value, lengths, flow_in,
   10647 		},
   10648 	}
   10649 	op := scope.AddOperation(opspec)
   10650 	return op.Output(0)
   10651 }
   10652 
   10653 // PackAttr is an optional argument to Pack.
   10654 type PackAttr func(optionalAttr)
   10655 
   10656 // PackAxis sets the optional axis attribute to value.
   10657 //
   10658 // value: Dimension along which to pack.  Negative values wrap around, so the
   10659 // valid range is `[-(R+1), R+1)`.
   10660 // If not specified, defaults to 0
   10661 func PackAxis(value int64) PackAttr {
   10662 	return func(m optionalAttr) {
   10663 		m["axis"] = value
   10664 	}
   10665 }
   10666 
   10667 // Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
   10668 //
   10669 // Packs the `N` tensors in `values` into a tensor with rank one higher than each
   10670 // tensor in `values`, by packing them along the `axis` dimension.
   10671 // Given a list of tensors of shape `(A, B, C)`;
   10672 //
   10673 // if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
   10674 // if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
   10675 // Etc.
   10676 //
   10677 // For example:
   10678 //
   10679 // ```
   10680 // # 'x' is [1, 4]
   10681 // # 'y' is [2, 5]
   10682 // # 'z' is [3, 6]
   10683 // pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
   10684 // pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
   10685 // ```
   10686 //
   10687 // This is the opposite of `unpack`.
   10688 //
   10689 // Arguments:
   10690 //	values: Must be of same shape and type.
   10691 //
   10692 // Returns The packed tensor.
   10693 func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
   10694 	if scope.Err() != nil {
   10695 		return
   10696 	}
   10697 	attrs := map[string]interface{}{}
   10698 	for _, a := range optional {
   10699 		a(attrs)
   10700 	}
   10701 	opspec := tf.OpSpec{
   10702 		Type: "Pack",
   10703 		Input: []tf.Input{
   10704 			tf.OutputList(values),
   10705 		},
   10706 		Attrs: attrs,
   10707 	}
   10708 	op := scope.AddOperation(opspec)
   10709 	return op.Output(0)
   10710 }
   10711 
   10712 // Reorders a SparseTensor into the canonical, row-major ordering.
   10713 //
   10714 // Note that by convention, all sparse ops preserve the canonical ordering along
   10715 // increasing dimension number. The only time ordering can be violated is during
   10716 // manual manipulation of the indices and values vectors to add entries.
   10717 //
   10718 // Reordering does not affect the shape of the SparseTensor.
   10719 //
   10720 // If the tensor has rank `R` and `N` non-empty values, `input_indices` has
   10721 // shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
   10722 //
   10723 // Arguments:
   10724 //	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   10725 // SparseTensor, possibly not in canonical ordering.
   10726 //	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
   10727 //	input_shape: 1-D.  Shape of the input SparseTensor.
   10728 //
   10729 // Returns 2-D.  `N x R` matrix with the same indices as input_indices, but
   10730 // in canonical row-major ordering.1-D.  `N` non-empty values corresponding to `output_indices`.
   10731 func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
   10732 	if scope.Err() != nil {
   10733 		return
   10734 	}
   10735 	opspec := tf.OpSpec{
   10736 		Type: "SparseReorder",
   10737 		Input: []tf.Input{
   10738 			input_indices, input_values, input_shape,
   10739 		},
   10740 	}
   10741 	op := scope.AddOperation(opspec)
   10742 	return op.Output(0), op.Output(1)
   10743 }
   10744 
   10745 // Computes rectified linear: `max(features, 0)`.
   10746 func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
   10747 	if scope.Err() != nil {
   10748 		return
   10749 	}
   10750 	opspec := tf.OpSpec{
   10751 		Type: "Relu",
   10752 		Input: []tf.Input{
   10753 			features,
   10754 		},
   10755 	}
   10756 	op := scope.AddOperation(opspec)
   10757 	return op.Output(0)
   10758 }
   10759 
   10760 // ResourceApplyAddSignAttr is an optional argument to ResourceApplyAddSign.
   10761 type ResourceApplyAddSignAttr func(optionalAttr)
   10762 
   10763 // ResourceApplyAddSignUseLocking sets the optional use_locking attribute to value.
   10764 //
   10765 // value: If `True`, updating of the var and m tensors is
   10766 // protected by a lock; otherwise the behavior is undefined, but may exhibit less
   10767 // contention.
   10768 // If not specified, defaults to false
   10769 func ResourceApplyAddSignUseLocking(value bool) ResourceApplyAddSignAttr {
   10770 	return func(m optionalAttr) {
   10771 		m["use_locking"] = value
   10772 	}
   10773 }
   10774 
   10775 // Update '*var' according to the AddSign update.
   10776 //
   10777 // m_t <- beta1 * m_{t-1} + (1 - beta1) * g
   10778 // update <- (alpha + sign_decay * sign(g) *sign(m)) * g
   10779 // variable <- variable - lr_t * update
   10780 //
   10781 // Arguments:
   10782 //	var_: Should be from a Variable().
   10783 //	m: Should be from a Variable().
   10784 //	lr: Scaling factor. Must be a scalar.
   10785 //	alpha: Must be a scalar.
   10786 //	sign_decay: Must be a scalar.
   10787 //	beta: Must be a scalar.
   10788 //	grad: The gradient.
   10789 //
   10790 // Returns the created operation.
   10791 func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyAddSignAttr) (o *tf.Operation) {
   10792 	if scope.Err() != nil {
   10793 		return
   10794 	}
   10795 	attrs := map[string]interface{}{}
   10796 	for _, a := range optional {
   10797 		a(attrs)
   10798 	}
   10799 	opspec := tf.OpSpec{
   10800 		Type: "ResourceApplyAddSign",
   10801 		Input: []tf.Input{
   10802 			var_, m, lr, alpha, sign_decay, beta, grad,
   10803 		},
   10804 		Attrs: attrs,
   10805 	}
   10806 	return scope.AddOperation(opspec)
   10807 }
   10808 
   10809 // FractionalMaxPoolGradAttr is an optional argument to FractionalMaxPoolGrad.
   10810 type FractionalMaxPoolGradAttr func(optionalAttr)
   10811 
   10812 // FractionalMaxPoolGradOverlapping sets the optional overlapping attribute to value.
   10813 //
   10814 // value: When set to True, it means when pooling, the values at the boundary
   10815 // of adjacent pooling cells are used by both cells. For example:
   10816 //
   10817 // `index  0  1  2  3  4`
   10818 //
   10819 // `value  20 5  16 3  7`
   10820 //
   10821 // If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
   10822 // The result would be [20, 16] for fractional max pooling.
   10823 // If not specified, defaults to false
   10824 func FractionalMaxPoolGradOverlapping(value bool) FractionalMaxPoolGradAttr {
   10825 	return func(m optionalAttr) {
   10826 		m["overlapping"] = value
   10827 	}
   10828 }
   10829 
   10830 // Computes gradient of the FractionalMaxPool function.
   10831 //
   10832 // Arguments:
   10833 //	orig_input: Original input for `fractional_max_pool`
   10834 //	orig_output: Original output for `fractional_max_pool`
   10835 //	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
   10836 // w.r.t. the output of `fractional_max_pool`.
   10837 //	row_pooling_sequence: row pooling sequence, form pooling region with
   10838 // col_pooling_sequence.
   10839 //	col_pooling_sequence: column pooling sequence, form pooling region with
   10840 // row_pooling sequence.
   10841 //
   10842 // Returns 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
   10843 func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalMaxPoolGradAttr) (output tf.Output) {
   10844 	if scope.Err() != nil {
   10845 		return
   10846 	}
   10847 	attrs := map[string]interface{}{}
   10848 	for _, a := range optional {
   10849 		a(attrs)
   10850 	}
   10851 	opspec := tf.OpSpec{
   10852 		Type: "FractionalMaxPoolGrad",
   10853 		Input: []tf.Input{
   10854 			orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence,
   10855 		},
   10856 		Attrs: attrs,
   10857 	}
   10858 	op := scope.AddOperation(opspec)
   10859 	return op.Output(0)
   10860 }
   10861 
   10862 // ResourceApplyAdagradDAAttr is an optional argument to ResourceApplyAdagradDA.
   10863 type ResourceApplyAdagradDAAttr func(optionalAttr)
   10864 
   10865 // ResourceApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
   10866 //
   10867 // value: If True, updating of the var and accum tensors will be protected by
   10868 // a lock; otherwise the behavior is undefined, but may exhibit less contention.
   10869 // If not specified, defaults to false
   10870 func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr {
   10871 	return func(m optionalAttr) {
   10872 		m["use_locking"] = value
   10873 	}
   10874 }
   10875 
   10876 // Update '*var' according to the proximal adagrad scheme.
   10877 //
   10878 // Arguments:
   10879 //	var_: Should be from a Variable().
   10880 //	gradient_accumulator: Should be from a Variable().
   10881 //	gradient_squared_accumulator: Should be from a Variable().
   10882 //	grad: The gradient.
   10883 //	lr: Scaling factor. Must be a scalar.
   10884 //	l1: L1 regularization. Must be a scalar.
   10885 //	l2: L2 regularization. Must be a scalar.
   10886 //	global_step: Training step number. Must be a scalar.
   10887 //
   10888 // Returns the created operation.
   10889 func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation) {
   10890 	if scope.Err() != nil {
   10891 		return
   10892 	}
   10893 	attrs := map[string]interface{}{}
   10894 	for _, a := range optional {
   10895 		a(attrs)
   10896 	}
   10897 	opspec := tf.OpSpec{
   10898 		Type: "ResourceApplyAdagradDA",
   10899 		Input: []tf.Input{
   10900 			var_, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step,
   10901 		},
   10902 		Attrs: attrs,
   10903 	}
   10904 	return scope.AddOperation(opspec)
   10905 }
   10906 
   10907 // SparseReduceMaxSparseAttr is an optional argument to SparseReduceMaxSparse.
   10908 type SparseReduceMaxSparseAttr func(optionalAttr)
   10909 
   10910 // SparseReduceMaxSparseKeepDims sets the optional keep_dims attribute to value.
   10911 //
   10912 // value: If true, retain reduced dimensions with length 1.
   10913 // If not specified, defaults to false
   10914 func SparseReduceMaxSparseKeepDims(value bool) SparseReduceMaxSparseAttr {
   10915 	return func(m optionalAttr) {
   10916 		m["keep_dims"] = value
   10917 	}
   10918 }
   10919 
   10920 // Computes the max of elements across dimensions of a SparseTensor.
   10921 //
   10922 // This Op takes a SparseTensor and is the sparse counterpart to
   10923 // `tf.reduce_max()`.  In contrast to SparseReduceMax, this Op returns a
   10924 // SparseTensor.
   10925 //
   10926 // Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
   10927 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   10928 // `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
   10929 // with length 1.
   10930 //
   10931 // If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
   10932 // with a single element is returned.  Additionally, the axes can be negative,
   10933 // which are interpreted according to the indexing rules in Python.
   10934 //
   10935 // Arguments:
   10936 //	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   10937 // SparseTensor, possibly not in canonical ordering.
   10938 //	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
   10939 //	input_shape: 1-D.  Shape of the input SparseTensor.
   10940 //	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
   10941 func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
   10942 	if scope.Err() != nil {
   10943 		return
   10944 	}
   10945 	attrs := map[string]interface{}{}
   10946 	for _, a := range optional {
   10947 		a(attrs)
   10948 	}
   10949 	opspec := tf.OpSpec{
   10950 		Type: "SparseReduceMaxSparse",
   10951 		Input: []tf.Input{
   10952 			input_indices, input_values, input_shape, reduction_axes,
   10953 		},
   10954 		Attrs: attrs,
   10955 	}
   10956 	op := scope.AddOperation(opspec)
   10957 	return op.Output(0), op.Output(1), op.Output(2)
   10958 }
   10959 
   10960 // Creates a dataset that emits the outputs of `input_dataset` `count` times.
   10961 //
   10962 // Arguments:
   10963 //
   10964 //	count: A scalar representing the number of times that `input_dataset` should
   10965 // be repeated. A value of `-1` indicates that it should be repeated infinitely.
   10966 //
   10967 //
   10968 func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   10969 	if scope.Err() != nil {
   10970 		return
   10971 	}
   10972 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   10973 	opspec := tf.OpSpec{
   10974 		Type: "RepeatDataset",
   10975 		Input: []tf.Input{
   10976 			input_dataset, count,
   10977 		},
   10978 		Attrs: attrs,
   10979 	}
   10980 	op := scope.AddOperation(opspec)
   10981 	return op.Output(0)
   10982 }
   10983 
   10984 // AddManySparseToTensorsMapAttr is an optional argument to AddManySparseToTensorsMap.
   10985 type AddManySparseToTensorsMapAttr func(optionalAttr)
   10986 
   10987 // AddManySparseToTensorsMapContainer sets the optional container attribute to value.
   10988 //
   10989 // value: The container name for the `SparseTensorsMap` created by this op.
   10990 // If not specified, defaults to ""
   10991 func AddManySparseToTensorsMapContainer(value string) AddManySparseToTensorsMapAttr {
   10992 	return func(m optionalAttr) {
   10993 		m["container"] = value
   10994 	}
   10995 }
   10996 
   10997 // AddManySparseToTensorsMapSharedName sets the optional shared_name attribute to value.
   10998 //
   10999 // value: The shared name for the `SparseTensorsMap` created by this op.
   11000 // If blank, the new Operation's unique name is used.
   11001 // If not specified, defaults to ""
   11002 func AddManySparseToTensorsMapSharedName(value string) AddManySparseToTensorsMapAttr {
   11003 	return func(m optionalAttr) {
   11004 		m["shared_name"] = value
   11005 	}
   11006 }
   11007 
   11008 // Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
   11009 //
   11010 // A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
   11011 // `sparse_values`, and `sparse_shape`, where
   11012 //
   11013 // ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
   11014 //
   11015 // An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
   11016 // having a first `sparse_indices` column taking values between `[0, N)`, where
   11017 // the minibatch size `N == sparse_shape[0]`.
   11018 //
   11019 // The input `SparseTensor` must have rank `R` greater than 1, and the first
   11020 // dimension is treated as the minibatch dimension.  Elements of the `SparseTensor`
   11021 // must be sorted in increasing order of this first dimension.  The stored
   11022 // `SparseTensor` objects pointed to by each row of the output `sparse_handles`
   11023 // will have rank `R-1`.
   11024 //
   11025 // The `SparseTensor` values can then be read out as part of a minibatch by passing
   11026 // the given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure
   11027 // the correct `SparseTensorsMap` is accessed, ensure that the same
   11028 // `container` and `shared_name` are passed to that Op.  If no `shared_name`
   11029 // is provided here, instead use the *name* of the Operation created by calling
   11030 // `AddManySparseToTensorsMap` as the `shared_name` passed to
   11031 // `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
   11032 //
   11033 // Arguments:
   11034 //	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
   11035 // `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
   11036 //	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
   11037 //	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
   11038 // The minibatch size `N == sparse_shape[0]`.
   11039 //
   11040 // Returns 1-D.  The handles of the `SparseTensor` now stored in the
   11041 // `SparseTensorsMap`.  Shape: `[N]`.
   11042 func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddManySparseToTensorsMapAttr) (sparse_handles tf.Output) {
   11043 	if scope.Err() != nil {
   11044 		return
   11045 	}
   11046 	attrs := map[string]interface{}{}
   11047 	for _, a := range optional {
   11048 		a(attrs)
   11049 	}
   11050 	opspec := tf.OpSpec{
   11051 		Type: "AddManySparseToTensorsMap",
   11052 		Input: []tf.Input{
   11053 			sparse_indices, sparse_values, sparse_shape,
   11054 		},
   11055 		Attrs: attrs,
   11056 	}
   11057 	op := scope.AddOperation(opspec)
   11058 	return op.Output(0)
   11059 }
   11060 
   11061 // MinAttr is an optional argument to Min.
   11062 type MinAttr func(optionalAttr)
   11063 
   11064 // MinKeepDims sets the optional keep_dims attribute to value.
   11065 //
   11066 // value: If true, retain reduced dimensions with length 1.
   11067 // If not specified, defaults to false
   11068 func MinKeepDims(value bool) MinAttr {
   11069 	return func(m optionalAttr) {
   11070 		m["keep_dims"] = value
   11071 	}
   11072 }
   11073 
   11074 // Computes the minimum of elements across dimensions of a tensor.
   11075 //
   11076 // Reduces `input` along the dimensions given in `axis`. Unless
   11077 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   11078 // `axis`. If `keep_dims` is true, the reduced dimensions are
   11079 // retained with length 1.
   11080 //
   11081 // Arguments:
   11082 //	input: The tensor to reduce.
   11083 //	axis: The dimensions to reduce. Must be in the range
   11084 // `[-rank(input), rank(input))`.
   11085 //
   11086 // Returns The reduced tensor.
   11087 func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (output tf.Output) {
   11088 	if scope.Err() != nil {
   11089 		return
   11090 	}
   11091 	attrs := map[string]interface{}{}
   11092 	for _, a := range optional {
   11093 		a(attrs)
   11094 	}
   11095 	opspec := tf.OpSpec{
   11096 		Type: "Min",
   11097 		Input: []tf.Input{
   11098 			input, axis,
   11099 		},
   11100 		Attrs: attrs,
   11101 	}
   11102 	op := scope.AddOperation(opspec)
   11103 	return op.Output(0)
   11104 }
   11105 
   11106 // Shuffle dimensions of x according to a permutation.
   11107 //
   11108 // The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
   11109 //   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
   11110 func Transpose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
   11111 	if scope.Err() != nil {
   11112 		return
   11113 	}
   11114 	opspec := tf.OpSpec{
   11115 		Type: "Transpose",
   11116 		Input: []tf.Input{
   11117 			x, perm,
   11118 		},
   11119 	}
   11120 	op := scope.AddOperation(opspec)
   11121 	return op.Output(0)
   11122 }
   11123 
   11124 // DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
   11125 type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
   11126 
   11127 // DepthwiseConv2dNativeBackpropFilterDataFormat sets the optional data_format attribute to value.
   11128 //
   11129 // value: Specify the data format of the input and output data. With the
   11130 // default format "NHWC", the data is stored in the order of:
   11131 //     [batch, height, width, channels].
   11132 // Alternatively, the format could be "NCHW", the data storage order of:
   11133 //     [batch, channels, height, width].
   11134 // If not specified, defaults to "NHWC"
   11135 func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr {
   11136 	return func(m optionalAttr) {
   11137 		m["data_format"] = value
   11138 	}
   11139 }
   11140 
   11141 // DepthwiseConv2dNativeBackpropFilterDilations sets the optional dilations attribute to value.
   11142 //
   11143 // value: 1-D tensor of length 4.  The dilation factor for each dimension of
   11144 // `input`. If set to k > 1, there will be k-1 skipped cells between each filter
   11145 // element on that dimension. The dimension order is determined by the value of
   11146 // `data_format`, see above for details. Dilations in the batch and depth
   11147 // dimensions must be 1.
   11148 // If not specified, defaults to <i:1 i:1 i:1 i:1 >
   11149 func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
   11150 	return func(m optionalAttr) {
   11151 		m["dilations"] = value
   11152 	}
   11153 }
   11154 
   11155 // Computes the gradients of depthwise convolution with respect to the filter.
   11156 //
   11157 // Arguments:
   11158 //	input: 4-D with shape based on `data_format`.  For example, if
   11159 // `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
   11160 // in_width, in_channels]` tensor.
   11161 //	filter_sizes: An integer vector representing the tensor shape of `filter`,
   11162 // where `filter` is a 4-D
   11163 // `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
   11164 //	out_backprop: 4-D with shape  based on `data_format`.
   11165 // For example, if `data_format` is 'NHWC' then
   11166 // out_backprop shape is `[batch, out_height, out_width, out_channels]`.
   11167 // Gradients w.r.t. the output of the convolution.
   11168 //	strides: The stride of the sliding window for each dimension of the input
   11169 // of the convolution.
   11170 //	padding: The type of padding algorithm to use.
   11171 //
   11172 // Returns 4-D with shape
   11173 // `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
   11174 // the `filter` input of the convolution.
   11175 func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropFilterAttr) (output tf.Output) {
   11176 	if scope.Err() != nil {
   11177 		return
   11178 	}
   11179 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   11180 	for _, a := range optional {
   11181 		a(attrs)
   11182 	}
   11183 	opspec := tf.OpSpec{
   11184 		Type: "DepthwiseConv2dNativeBackpropFilter",
   11185 		Input: []tf.Input{
   11186 			input, filter_sizes, out_backprop,
   11187 		},
   11188 		Attrs: attrs,
   11189 	}
   11190 	op := scope.AddOperation(opspec)
   11191 	return op.Output(0)
   11192 }
   11193 
   11194 // Flushes the writer's unwritten events.
   11195 //
   11196 // Arguments:
   11197 //	writer: A handle to the summary writer resource.
   11198 //
   11199 // Returns the created operation.
   11200 func FlushSummaryWriter(scope *Scope, writer tf.Output) (o *tf.Operation) {
   11201 	if scope.Err() != nil {
   11202 		return
   11203 	}
   11204 	opspec := tf.OpSpec{
   11205 		Type: "FlushSummaryWriter",
   11206 		Input: []tf.Input{
   11207 			writer,
   11208 		},
   11209 	}
   11210 	return scope.AddOperation(opspec)
   11211 }
   11212 
   11213 // QuantizeV2Attr is an optional argument to QuantizeV2.
   11214 type QuantizeV2Attr func(optionalAttr)
   11215 
   11216 // QuantizeV2Mode sets the optional mode attribute to value.
   11217 // If not specified, defaults to "MIN_COMBINED"
   11218 func QuantizeV2Mode(value string) QuantizeV2Attr {
   11219 	return func(m optionalAttr) {
   11220 		m["mode"] = value
   11221 	}
   11222 }
   11223 
   11224 // QuantizeV2RoundMode sets the optional round_mode attribute to value.
   11225 // If not specified, defaults to "HALF_AWAY_FROM_ZERO"
   11226 func QuantizeV2RoundMode(value string) QuantizeV2Attr {
   11227 	return func(m optionalAttr) {
   11228 		m["round_mode"] = value
   11229 	}
   11230 }
   11231 
   11232 // Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
   11233 //
   11234 // [min_range, max_range] are scalar floats that specify the range for
   11235 // the 'input' data. The 'mode' attribute controls exactly which calculations are
   11236 // used to convert the float values to their quantized equivalents.  The
   11237 // 'round_mode' attribute controls which rounding tie-breaking algorithm is used
   11238 // when rounding float values to their quantized equivalents.
   11239 //
   11240 // In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
   11241 //
   11242 // ```
   11243 // out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
   11244 // if T == qint8, out[i] -= (range(T) + 1) / 2.0
   11245 // ```
   11246 // here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
   11247 //
   11248 // *MIN_COMBINED Mode Example*
   11249 //
   11250 // Assume the input is type float and has a possible range of [0.0, 6.0] and the
   11251 // output type is quint8 ([0, 255]). The min_range and max_range values should be
   11252 // specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
   11253 // value of the input by 255/6 and cast to quint8.
   11254 //
   11255 // If the output type was qint8 ([-128, 127]), the operation will additionally
   11256 // subtract each value by 128 prior to casting, so that the range of values aligns
   11257 // with the range of qint8.
   11258 //
   11259 // If the mode is 'MIN_FIRST', then this approach is used:
   11260 //
   11261 // ```
   11262 // num_discrete_values = 1 << (# of bits in T)
   11263 // range_adjust = num_discrete_values / (num_discrete_values - 1)
   11264 // range = (range_max - range_min) * range_adjust
   11265 // range_scale = num_discrete_values / range
   11266 // quantized = round(input * range_scale) - round(range_min * range_scale) +
   11267 //   numeric_limits<T>::min()
   11268 // quantized = max(quantized, numeric_limits<T>::min())
   11269 // quantized = min(quantized, numeric_limits<T>::max())
   11270 // ```
   11271 //
   11272 // The biggest difference between this and MIN_COMBINED is that the minimum range
   11273 // is rounded first, before it's subtracted from the rounded value. With
   11274 // MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
   11275 // and dequantizing will introduce a larger and larger error.
   11276 //
   11277 // *SCALED mode Example*
   11278 //
   11279 // `SCALED` mode matches the quantization approach used in
   11280 // `QuantizeAndDequantize{V2|V3}`.
   11281 //
   11282 // If the mode is `SCALED`, we do not use the full range of the output type,
   11283 // choosing to elide the lowest possible value for symmetry (e.g., output range is
   11284 // -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
   11285 // 0.
   11286 //
   11287 // We first find the range of values in our tensor. The
   11288 // range we use is always centered on 0, so we find m such that
   11289 // ```c++
   11290 //   m = max(abs(input_min), abs(input_max))
   11291 // ```
   11292 //
   11293 // Our input tensor range is then `[-m, m]`.
   11294 //
   11295 // Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
   11296 // If T is signed, this is
   11297 // ```
   11298 //   num_bits = sizeof(T) * 8
   11299 //   [min_fixed, max_fixed] =
   11300 //       [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
   11301 // ```
   11302 //
   11303 // Otherwise, if T is unsigned, the fixed-point range is
   11304 // ```
   11305 //   [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
   11306 // ```
   11307 //
   11308 // From this we compute our scaling factor, s:
   11309 // ```c++
   11310 //   s = (max_fixed - min_fixed) / (2 * m)
   11311 // ```
   11312 //
   11313 // Now we can quantize the elements of our tensor:
   11314 // ```c++
   11315 // result = round(input * s)
   11316 // ```
   11317 //
   11318 // One thing to watch out for is that the operator may choose to adjust the
   11319 // requested minimum and maximum values slightly during the quantization process,
   11320 // so you should always use the output ports as the range for further calculations.
   11321 // For example, if the requested minimum and maximum values are close to equal,
   11322 // they will be separated by a small epsilon value to prevent ill-formed quantized
   11323 // buffers from being created. Otherwise, you can end up with buffers where all the
   11324 // quantized values map to the same float value, which causes problems for
   11325 // operations that have to perform further calculations on them.
   11326 //
   11327 // Arguments:
   11328 //
   11329 //	min_range: The minimum scalar value possibly produced for the input.
   11330 //	max_range: The maximum scalar value possibly produced for the input.
   11331 //
   11332 //
   11333 // Returns The quantized data produced from the float input.The actual minimum scalar value used for the output.The actual maximum scalar value used for the output.
   11334 func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, T tf.DataType, optional ...QuantizeV2Attr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
   11335 	if scope.Err() != nil {
   11336 		return
   11337 	}
   11338 	attrs := map[string]interface{}{"T": T}
   11339 	for _, a := range optional {
   11340 		a(attrs)
   11341 	}
   11342 	opspec := tf.OpSpec{
   11343 		Type: "QuantizeV2",
   11344 		Input: []tf.Input{
   11345 			input, min_range, max_range,
   11346 		},
   11347 		Attrs: attrs,
   11348 	}
   11349 	op := scope.AddOperation(opspec)
   11350 	return op.Output(0), op.Output(1), op.Output(2)
   11351 }
   11352 
   11353 // Component-wise divides a SparseTensor by a dense Tensor.
   11354 //
   11355 // *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
   11356 // the other direction.
   11357 //
   11358 // Arguments:
   11359 //	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   11360 // SparseTensor, possibly not in canonical ordering.
   11361 //	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
   11362 //	sp_shape: 1-D.  Shape of the input SparseTensor.
   11363 //	dense: `R`-D.  The dense Tensor operand.
   11364 //
   11365 // Returns 1-D.  The `N` values that are operated on.
   11366 func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
   11367 	if scope.Err() != nil {
   11368 		return
   11369 	}
   11370 	opspec := tf.OpSpec{
   11371 		Type: "SparseDenseCwiseDiv",
   11372 		Input: []tf.Input{
   11373 			sp_indices, sp_values, sp_shape, dense,
   11374 		},
   11375 	}
   11376 	op := scope.AddOperation(opspec)
   11377 	return op.Output(0)
   11378 }
   11379 
   11380 // ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum.
   11381 type ResourceApplyMomentumAttr func(optionalAttr)
   11382 
   11383 // ResourceApplyMomentumUseLocking sets the optional use_locking attribute to value.
   11384 //
   11385 // value: If `True`, updating of the var and accum tensors will be protected
   11386 // by a lock; otherwise the behavior is undefined, but may exhibit less
   11387 // contention.
   11388 // If not specified, defaults to false
   11389 func ResourceApplyMomentumUseLocking(value bool) ResourceApplyMomentumAttr {
   11390 	return func(m optionalAttr) {
   11391 		m["use_locking"] = value
   11392 	}
   11393 }
   11394 
   11395 // ResourceApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
   11396 //
   11397 // value: If `True`, the tensor passed to compute grad will be
   11398 // var - lr * momentum * accum, so in the end, the var you get is actually
   11399 // var - lr * momentum * accum.
   11400 // If not specified, defaults to false
   11401 func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr {
   11402 	return func(m optionalAttr) {
   11403 		m["use_nesterov"] = value
   11404 	}
   11405 }
   11406 
   11407 // Update '*var' according to the momentum scheme. Set use_nesterov = True if you
   11408 //
   11409 // want to use Nesterov momentum.
   11410 //
   11411 // accum = accum * momentum + grad
   11412 // var -= lr * accum
   11413 //
   11414 // Arguments:
   11415 //	var_: Should be from a Variable().
   11416 //	accum: Should be from a Variable().
   11417 //	lr: Scaling factor. Must be a scalar.
   11418 //	grad: The gradient.
   11419 //	momentum: Momentum. Must be a scalar.
   11420 //
   11421 // Returns the created operation.
   11422 func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation) {
   11423 	if scope.Err() != nil {
   11424 		return
   11425 	}
   11426 	attrs := map[string]interface{}{}
   11427 	for _, a := range optional {
   11428 		a(attrs)
   11429 	}
   11430 	opspec := tf.OpSpec{
   11431 		Type: "ResourceApplyMomentum",
   11432 		Input: []tf.Input{
   11433 			var_, accum, lr, grad, momentum,
   11434 		},
   11435 		Attrs: attrs,
   11436 	}
   11437 	return scope.AddOperation(opspec)
   11438 }
   11439 
   11440 // Returns the truth value of (x >= y) element-wise.
   11441 //
   11442 // *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
   11443 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   11444 func GreaterEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   11445 	if scope.Err() != nil {
   11446 		return
   11447 	}
   11448 	opspec := tf.OpSpec{
   11449 		Type: "GreaterEqual",
   11450 		Input: []tf.Input{
   11451 			x, y,
   11452 		},
   11453 	}
   11454 	op := scope.AddOperation(opspec)
   11455 	return op.Output(0)
   11456 }
   11457 
   11458 // Conv3DAttr is an optional argument to Conv3D.
   11459 type Conv3DAttr func(optionalAttr)
   11460 
   11461 // Conv3DDataFormat sets the optional data_format attribute to value.
   11462 //
   11463 // value: The data format of the input and output data. With the
   11464 // default format "NDHWC", the data is stored in the order of:
   11465 //     [batch, in_depth, in_height, in_width, in_channels].
   11466 // Alternatively, the format could be "NCDHW", the data storage order is:
   11467 //     [batch, in_channels, in_depth, in_height, in_width].
   11468 // If not specified, defaults to "NDHWC"
   11469 func Conv3DDataFormat(value string) Conv3DAttr {
   11470 	return func(m optionalAttr) {
   11471 		m["data_format"] = value
   11472 	}
   11473 }
   11474 
   11475 // Conv3DDilations sets the optional dilations attribute to value.
   11476 //
   11477 // value: 1-D tensor of length 5.  The dilation factor for each dimension of
   11478 // `input`. If set to k > 1, there will be k-1 skipped cells between each
   11479 // filter element on that dimension. The dimension order is determined by the
   11480 // value of `data_format`, see above for details. Dilations in the batch and
   11481 // depth dimensions must be 1.
   11482 // If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
   11483 func Conv3DDilations(value []int64) Conv3DAttr {
   11484 	return func(m optionalAttr) {
   11485 		m["dilations"] = value
   11486 	}
   11487 }
   11488 
   11489 // Computes a 3-D convolution given 5-D `input` and `filter` tensors.
   11490 //
   11491 // In signal processing, cross-correlation is a measure of similarity of
   11492 // two waveforms as a function of a time-lag applied to one of them. This
   11493 // is also known as a sliding dot product or sliding inner-product.
   11494 //
   11495 // Our Conv3D implements a form of cross-correlation.
   11496 //
   11497 // Arguments:
   11498 //	input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
   11499 //	filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
   11500 // out_channels]`. `in_channels` must match between `input` and `filter`.
   11501 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   11502 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   11503 //	padding: The type of padding algorithm to use.
   11504 func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv3DAttr) (output tf.Output) {
   11505 	if scope.Err() != nil {
   11506 		return
   11507 	}
   11508 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   11509 	for _, a := range optional {
   11510 		a(attrs)
   11511 	}
   11512 	opspec := tf.OpSpec{
   11513 		Type: "Conv3D",
   11514 		Input: []tf.Input{
   11515 			input, filter,
   11516 		},
   11517 		Attrs: attrs,
   11518 	}
   11519 	op := scope.AddOperation(opspec)
   11520 	return op.Output(0)
   11521 }
   11522 
   11523 // Adds up a SparseTensor and a dense Tensor, using these special rules:
   11524 //
   11525 // (1) Broadcasts the dense side to have the same shape as the sparse side, if
   11526 //     eligible;
   11527 // (2) Then, only the dense values pointed to by the indices of the SparseTensor
   11528 //     participate in the cwise addition.
   11529 //
   11530 // By these rules, the result is a logical SparseTensor with exactly the same
   11531 // indices and shape, but possibly with different non-zero values.  The output of
   11532 // this Op is the resultant non-zero values.
   11533 //
   11534 // Arguments:
   11535 //	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   11536 // SparseTensor, possibly not in canonical ordering.
   11537 //	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
   11538 //	sp_shape: 1-D.  Shape of the input SparseTensor.
   11539 //	dense: `R`-D.  The dense Tensor operand.
   11540 //
   11541 // Returns 1-D.  The `N` values that are operated on.
   11542 func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
   11543 	if scope.Err() != nil {
   11544 		return
   11545 	}
   11546 	opspec := tf.OpSpec{
   11547 		Type: "SparseDenseCwiseAdd",
   11548 		Input: []tf.Input{
   11549 			sp_indices, sp_values, sp_shape, dense,
   11550 		},
   11551 	}
   11552 	op := scope.AddOperation(opspec)
   11553 	return op.Output(0)
   11554 }
   11555 
   11556 // Read an element from the TensorArray into output `value`.
   11557 //
   11558 // Arguments:
   11559 //	handle: The handle to a TensorArray.
   11560 //
   11561 //	flow_in: A float scalar that enforces proper chaining of operations.
   11562 //	dtype: The type of the elem that is returned.
   11563 //
   11564 // Returns The tensor that is read from the TensorArray.
   11565 func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
   11566 	if scope.Err() != nil {
   11567 		return
   11568 	}
   11569 	attrs := map[string]interface{}{"dtype": dtype}
   11570 	opspec := tf.OpSpec{
   11571 		Type: "TensorArrayReadV3",
   11572 		Input: []tf.Input{
   11573 			handle, index, flow_in,
   11574 		},
   11575 		Attrs: attrs,
   11576 	}
   11577 	op := scope.AddOperation(opspec)
   11578 	return op.Output(0)
   11579 }
   11580 
   11581 // EncodePngAttr is an optional argument to EncodePng.
   11582 type EncodePngAttr func(optionalAttr)
   11583 
   11584 // EncodePngCompression sets the optional compression attribute to value.
   11585 //
   11586 // value: Compression level.
   11587 // If not specified, defaults to -1
   11588 func EncodePngCompression(value int64) EncodePngAttr {
   11589 	return func(m optionalAttr) {
   11590 		m["compression"] = value
   11591 	}
   11592 }
   11593 
   11594 // PNG-encode an image.
   11595 //
   11596 // `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
   11597 // where `channels` is:
   11598 //
   11599 // *   1: for grayscale.
   11600 // *   2: for grayscale + alpha.
   11601 // *   3: for RGB.
   11602 // *   4: for RGBA.
   11603 //
   11604 // The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
   11605 // default or a value from 0 to 9.  9 is the highest compression level, generating
   11606 // the smallest output, but is slower.
   11607 //
   11608 // Arguments:
   11609 //	image: 3-D with shape `[height, width, channels]`.
   11610 //
   11611 // Returns 0-D. PNG-encoded image.
   11612 func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (contents tf.Output) {
   11613 	if scope.Err() != nil {
   11614 		return
   11615 	}
   11616 	attrs := map[string]interface{}{}
   11617 	for _, a := range optional {
   11618 		a(attrs)
   11619 	}
   11620 	opspec := tf.OpSpec{
   11621 		Type: "EncodePng",
   11622 		Input: []tf.Input{
   11623 			image,
   11624 		},
   11625 		Attrs: attrs,
   11626 	}
   11627 	op := scope.AddOperation(opspec)
   11628 	return op.Output(0)
   11629 }
   11630 
   11631 // DataFormatVecPermuteAttr is an optional argument to DataFormatVecPermute.
   11632 type DataFormatVecPermuteAttr func(optionalAttr)
   11633 
   11634 // DataFormatVecPermuteSrcFormat sets the optional src_format attribute to value.
   11635 //
   11636 // value: source data format.
   11637 // If not specified, defaults to "NHWC"
   11638 func DataFormatVecPermuteSrcFormat(value string) DataFormatVecPermuteAttr {
   11639 	return func(m optionalAttr) {
   11640 		m["src_format"] = value
   11641 	}
   11642 }
   11643 
   11644 // DataFormatVecPermuteDstFormat sets the optional dst_format attribute to value.
   11645 //
   11646 // value: destination data format.
   11647 // If not specified, defaults to "NCHW"
   11648 func DataFormatVecPermuteDstFormat(value string) DataFormatVecPermuteAttr {
   11649 	return func(m optionalAttr) {
   11650 		m["dst_format"] = value
   11651 	}
   11652 }
   11653 
   11654 // Returns the permuted vector/tensor in the destination data format given the
   11655 //
   11656 // one in the source data format.
   11657 //
   11658 // Arguments:
   11659 //	x: Vector of size 4 or Tensor of shape (4, 2) in source data format.
   11660 //
   11661 // Returns Vector of size 4 or Tensor of shape (4, 2) in destination data format.
   11662 func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output) {
   11663 	if scope.Err() != nil {
   11664 		return
   11665 	}
   11666 	attrs := map[string]interface{}{}
   11667 	for _, a := range optional {
   11668 		a(attrs)
   11669 	}
   11670 	opspec := tf.OpSpec{
   11671 		Type: "DataFormatVecPermute",
   11672 		Input: []tf.Input{
   11673 			x,
   11674 		},
   11675 		Attrs: attrs,
   11676 	}
   11677 	op := scope.AddOperation(opspec)
   11678 	return op.Output(0)
   11679 }
   11680 
   11681 // Returns element-wise integer closest to x.
   11682 //
   11683 // If the result is midway between two representable values,
   11684 // the even representable is chosen.
   11685 // For example:
   11686 //
   11687 // ```
   11688 // rint(-1.5) ==> -2.0
   11689 // rint(0.5000001) ==> 1.0
   11690 // rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
   11691 // ```
   11692 func Rint(scope *Scope, x tf.Output) (y tf.Output) {
   11693 	if scope.Err() != nil {
   11694 		return
   11695 	}
   11696 	opspec := tf.OpSpec{
   11697 		Type: "Rint",
   11698 		Input: []tf.Input{
   11699 			x,
   11700 		},
   11701 	}
   11702 	op := scope.AddOperation(opspec)
   11703 	return op.Output(0)
   11704 }
   11705 
   11706 // OrderedMapUnstageNoKeyAttr is an optional argument to OrderedMapUnstageNoKey.
   11707 type OrderedMapUnstageNoKeyAttr func(optionalAttr)
   11708 
   11709 // OrderedMapUnstageNoKeyCapacity sets the optional capacity attribute to value.
   11710 // If not specified, defaults to 0
   11711 //
   11712 // REQUIRES: value >= 0
   11713 func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr {
   11714 	return func(m optionalAttr) {
   11715 		m["capacity"] = value
   11716 	}
   11717 }
   11718 
   11719 // OrderedMapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
   11720 // If not specified, defaults to 0
   11721 //
   11722 // REQUIRES: value >= 0
   11723 func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr {
   11724 	return func(m optionalAttr) {
   11725 		m["memory_limit"] = value
   11726 	}
   11727 }
   11728 
   11729 // OrderedMapUnstageNoKeyContainer sets the optional container attribute to value.
   11730 // If not specified, defaults to ""
   11731 func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr {
   11732 	return func(m optionalAttr) {
   11733 		m["container"] = value
   11734 	}
   11735 }
   11736 
   11737 // OrderedMapUnstageNoKeySharedName sets the optional shared_name attribute to value.
   11738 // If not specified, defaults to ""
   11739 func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr {
   11740 	return func(m optionalAttr) {
   11741 		m["shared_name"] = value
   11742 	}
   11743 }
   11744 
   11745 // Op removes and returns the (key, value) element with the smallest
   11746 //
   11747 // key from the underlying container.   If the underlying container
   11748 // does not contain elements, the op will block until it does.
   11749 func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
   11750 	if scope.Err() != nil {
   11751 		return
   11752 	}
   11753 	attrs := map[string]interface{}{"dtypes": dtypes}
   11754 	for _, a := range optional {
   11755 		a(attrs)
   11756 	}
   11757 	opspec := tf.OpSpec{
   11758 		Type: "OrderedMapUnstageNoKey",
   11759 		Input: []tf.Input{
   11760 			indices,
   11761 		},
   11762 		Attrs: attrs,
   11763 	}
   11764 	op := scope.AddOperation(opspec)
   11765 	if scope.Err() != nil {
   11766 		return
   11767 	}
   11768 	var idx int
   11769 	var err error
   11770 	key = op.Output(idx)
   11771 	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
   11772 		scope.UpdateErr("OrderedMapUnstageNoKey", err)
   11773 		return
   11774 	}
   11775 	return key, values
   11776 }
   11777 
   11778 // MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
   11779 type MaxPool3DGradGradAttr func(optionalAttr)
   11780 
   11781 // MaxPool3DGradGradDataFormat sets the optional data_format attribute to value.
   11782 //
   11783 // value: The data format of the input and output data. With the
   11784 // default format "NDHWC", the data is stored in the order of:
   11785 //     [batch, in_depth, in_height, in_width, in_channels].
   11786 // Alternatively, the format could be "NCDHW", the data storage order is:
   11787 //     [batch, in_channels, in_depth, in_height, in_width].
   11788 // If not specified, defaults to "NDHWC"
   11789 func MaxPool3DGradGradDataFormat(value string) MaxPool3DGradGradAttr {
   11790 	return func(m optionalAttr) {
   11791 		m["data_format"] = value
   11792 	}
   11793 }
   11794 
   11795 // Computes second-order gradients of the maxpooling function.
   11796 //
   11797 // Arguments:
   11798 //	orig_input: The original input tensor.
   11799 //	orig_output: The original output tensor.
   11800 //	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
   11801 //	ksize: 1-D tensor of length 5. The size of the window for each dimension of
   11802 // the input tensor. Must have `ksize[0] = ksize[4] = 1`.
   11803 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   11804 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   11805 //	padding: The type of padding algorithm to use.
   11806 //
   11807 // Returns Gradients of gradients w.r.t. the input to `max_pool`.
   11808 func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradGradAttr) (output tf.Output) {
   11809 	if scope.Err() != nil {
   11810 		return
   11811 	}
   11812 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   11813 	for _, a := range optional {
   11814 		a(attrs)
   11815 	}
   11816 	opspec := tf.OpSpec{
   11817 		Type: "MaxPool3DGradGrad",
   11818 		Input: []tf.Input{
   11819 			orig_input, orig_output, grad,
   11820 		},
   11821 		Attrs: attrs,
   11822 	}
   11823 	op := scope.AddOperation(opspec)
   11824 	return op.Output(0)
   11825 }
   11826 
   11827 // Conv3DBackpropFilterV2Attr is an optional argument to Conv3DBackpropFilterV2.
   11828 type Conv3DBackpropFilterV2Attr func(optionalAttr)
   11829 
   11830 // Conv3DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
   11831 //
   11832 // value: The data format of the input and output data. With the
   11833 // default format "NDHWC", the data is stored in the order of:
   11834 //     [batch, in_depth, in_height, in_width, in_channels].
   11835 // Alternatively, the format could be "NCDHW", the data storage order is:
   11836 //     [batch, in_channels, in_depth, in_height, in_width].
   11837 // If not specified, defaults to "NDHWC"
   11838 func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {
   11839 	return func(m optionalAttr) {
   11840 		m["data_format"] = value
   11841 	}
   11842 }
   11843 
   11844 // Conv3DBackpropFilterV2Dilations sets the optional dilations attribute to value.
   11845 //
   11846 // value: 1-D tensor of length 5.  The dilation factor for each dimension of
   11847 // `input`. If set to k > 1, there will be k-1 skipped cells between each
   11848 // filter element on that dimension. The dimension order is determined by the
   11849 // value of `data_format`, see above for details. Dilations in the batch and
   11850 // depth dimensions must be 1.
   11851 // If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
   11852 func Conv3DBackpropFilterV2Dilations(value []int64) Conv3DBackpropFilterV2Attr {
   11853 	return func(m optionalAttr) {
   11854 		m["dilations"] = value
   11855 	}
   11856 }
   11857 
   11858 // Computes the gradients of 3-D convolution with respect to the filter.
   11859 //
   11860 // Arguments:
   11861 //	input: Shape `[batch, depth, rows, cols, in_channels]`.
   11862 //	filter_sizes: An integer vector representing the tensor shape of `filter`,
   11863 // where `filter` is a 5-D
   11864 // `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
   11865 // tensor.
   11866 //	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
   11867 // out_channels]`.
   11868 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   11869 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   11870 //	padding: The type of padding algorithm to use.
   11871 func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterV2Attr) (output tf.Output) {
   11872 	if scope.Err() != nil {
   11873 		return
   11874 	}
   11875 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   11876 	for _, a := range optional {
   11877 		a(attrs)
   11878 	}
   11879 	opspec := tf.OpSpec{
   11880 		Type: "Conv3DBackpropFilterV2",
   11881 		Input: []tf.Input{
   11882 			input, filter_sizes, out_backprop,
   11883 		},
   11884 		Attrs: attrs,
   11885 	}
   11886 	op := scope.AddOperation(opspec)
   11887 	return op.Output(0)
   11888 }
   11889 
   11890 // Execute a sub graph on a remote processor.
   11891 //
   11892 // The graph specifications(such as graph itself, input tensors and output names)
   11893 // are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
   11894 // as serialized_remote_fused_graph_execute_info.
   11895 // The specifications will be passed to a dedicated registered
   11896 // remote fused graph executor.  The executor will send the graph specifications
   11897 // to a remote processor and execute that graph.  The execution results
   11898 // will be passed to consumer nodes as outputs of this node.
   11899 //
   11900 // Arguments:
   11901 //	inputs: Arbitrary number of tensors with arbitrary data types
   11902 //
   11903 //	serialized_remote_fused_graph_execute_info: Serialized protocol buffer
   11904 // of RemoteFusedGraphExecuteInfo which contains graph specifications.
   11905 //
   11906 // Returns Arbitrary number of tensors with arbitrary data types
   11907 func RemoteFusedGraphExecute(scope *Scope, inputs []tf.Output, Toutputs []tf.DataType, serialized_remote_fused_graph_execute_info string) (outputs []tf.Output) {
   11908 	if scope.Err() != nil {
   11909 		return
   11910 	}
   11911 	attrs := map[string]interface{}{"Toutputs": Toutputs, "serialized_remote_fused_graph_execute_info": serialized_remote_fused_graph_execute_info}
   11912 	opspec := tf.OpSpec{
   11913 		Type: "RemoteFusedGraphExecute",
   11914 		Input: []tf.Input{
   11915 			tf.OutputList(inputs),
   11916 		},
   11917 		Attrs: attrs,
   11918 	}
   11919 	op := scope.AddOperation(opspec)
   11920 	if scope.Err() != nil {
   11921 		return
   11922 	}
   11923 	var idx int
   11924 	var err error
   11925 	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
   11926 		scope.UpdateErr("RemoteFusedGraphExecute", err)
   11927 		return
   11928 	}
   11929 	return outputs
   11930 }
   11931 
   11932 // ThreadUnsafeUnigramCandidateSamplerAttr is an optional argument to ThreadUnsafeUnigramCandidateSampler.
   11933 type ThreadUnsafeUnigramCandidateSamplerAttr func(optionalAttr)
   11934 
   11935 // ThreadUnsafeUnigramCandidateSamplerSeed sets the optional seed attribute to value.
   11936 //
   11937 // value: If either seed or seed2 are set to be non-zero, the random number
   11938 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   11939 // random seed.
   11940 // If not specified, defaults to 0
   11941 func ThreadUnsafeUnigramCandidateSamplerSeed(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
   11942 	return func(m optionalAttr) {
   11943 		m["seed"] = value
   11944 	}
   11945 }
   11946 
   11947 // ThreadUnsafeUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
   11948 //
   11949 // value: An second seed to avoid seed collision.
   11950 // If not specified, defaults to 0
   11951 func ThreadUnsafeUnigramCandidateSamplerSeed2(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
   11952 	return func(m optionalAttr) {
   11953 		m["seed2"] = value
   11954 	}
   11955 }
   11956 
   11957 // Generates labels for candidate sampling with a learned unigram distribution.
   11958 //
   11959 // See explanations of candidate sampling and the data formats at
   11960 // go/candidate-sampling.
   11961 //
   11962 // For each batch, this op picks a single set of sampled candidate labels.
   11963 //
   11964 // The advantages of sampling candidates per-batch are simplicity and the
   11965 // possibility of efficient dense matrix multiplication. The disadvantage is that
   11966 // the sampled candidates must be chosen independently of the context and of the
   11967 // true labels.
   11968 //
   11969 // Arguments:
   11970 //	true_classes: A batch_size * num_true matrix, in which each row contains the
   11971 // IDs of the num_true target_classes in the corresponding original label.
   11972 //	num_true: Number of true labels per context.
   11973 //	num_sampled: Number of candidates to randomly sample.
   11974 //	unique: If unique is true, we sample with rejection, so that all sampled
   11975 // candidates in a batch are unique. This requires some approximation to
   11976 // estimate the post-rejection sampling probabilities.
   11977 //	range_max: The sampler will sample integers from the interval [0, range_max).
   11978 //
   11979 // Returns A vector of length num_sampled, in which each element is
   11980 // the ID of a sampled candidate.A batch_size * num_true matrix, representing
   11981 // the number of times each candidate is expected to occur in a batch
   11982 // of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
   11983 // candidate representing the number of times the candidate is expected
   11984 // to occur in a batch of sampled candidates.  If unique=true, then this is a
   11985 // probability.
   11986 func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...ThreadUnsafeUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
   11987 	if scope.Err() != nil {
   11988 		return
   11989 	}
   11990 	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
   11991 	for _, a := range optional {
   11992 		a(attrs)
   11993 	}
   11994 	opspec := tf.OpSpec{
   11995 		Type: "ThreadUnsafeUnigramCandidateSampler",
   11996 		Input: []tf.Input{
   11997 			true_classes,
   11998 		},
   11999 		Attrs: attrs,
   12000 	}
   12001 	op := scope.AddOperation(opspec)
   12002 	return op.Output(0), op.Output(1), op.Output(2)
   12003 }
   12004 
   12005 // MaxPoolV2Attr is an optional argument to MaxPoolV2.
   12006 type MaxPoolV2Attr func(optionalAttr)
   12007 
   12008 // MaxPoolV2DataFormat sets the optional data_format attribute to value.
   12009 //
   12010 // value: Specify the data format of the input and output data. With the
   12011 // default format "NHWC", the data is stored in the order of:
   12012 //     [batch, in_height, in_width, in_channels].
   12013 // Alternatively, the format could be "NCHW", the data storage order of:
   12014 //     [batch, in_channels, in_height, in_width].
   12015 // If not specified, defaults to "NHWC"
   12016 func MaxPoolV2DataFormat(value string) MaxPoolV2Attr {
   12017 	return func(m optionalAttr) {
   12018 		m["data_format"] = value
   12019 	}
   12020 }
   12021 
   12022 // Performs max pooling on the input.
   12023 //
   12024 // Arguments:
   12025 //	input: 4-D input to pool over.
   12026 //	ksize: The size of the window for each dimension of the input tensor.
   12027 //	strides: The stride of the sliding window for each dimension of the
   12028 // input tensor.
   12029 //	padding: The type of padding algorithm to use.
   12030 //
   12031 // Returns The max pooled output tensor.
   12032 func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolV2Attr) (output tf.Output) {
   12033 	if scope.Err() != nil {
   12034 		return
   12035 	}
   12036 	attrs := map[string]interface{}{"padding": padding}
   12037 	for _, a := range optional {
   12038 		a(attrs)
   12039 	}
   12040 	opspec := tf.OpSpec{
   12041 		Type: "MaxPoolV2",
   12042 		Input: []tf.Input{
   12043 			input, ksize, strides,
   12044 		},
   12045 		Attrs: attrs,
   12046 	}
   12047 	op := scope.AddOperation(opspec)
   12048 	return op.Output(0)
   12049 }
   12050 
   12051 // Deprecated. Use TensorArrayReadV3
   12052 //
   12053 // DEPRECATED at GraphDef version 26: Use TensorArrayReadV3
   12054 func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
   12055 	if scope.Err() != nil {
   12056 		return
   12057 	}
   12058 	attrs := map[string]interface{}{"dtype": dtype}
   12059 	opspec := tf.OpSpec{
   12060 		Type: "TensorArrayReadV2",
   12061 		Input: []tf.Input{
   12062 			handle, index, flow_in,
   12063 		},
   12064 		Attrs: attrs,
   12065 	}
   12066 	op := scope.AddOperation(opspec)
   12067 	return op.Output(0)
   12068 }
   12069 
   12070 // Does nothing. Serves as a control trigger for scheduling.
   12071 //
   12072 // Only useful as a placeholder for control edges.
   12073 //
   12074 // Returns the created operation.
   12075 func ControlTrigger(scope *Scope) (o *tf.Operation) {
   12076 	if scope.Err() != nil {
   12077 		return
   12078 	}
   12079 	opspec := tf.OpSpec{
   12080 		Type: "ControlTrigger",
   12081 	}
   12082 	return scope.AddOperation(opspec)
   12083 }
   12084 
   12085 // Batch normalization.
   12086 //
   12087 // DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
   12088 //
   12089 // This op is deprecated. Prefer `tf.nn.batch_normalization`.
   12090 //
   12091 // Arguments:
   12092 //	t: A 4D input Tensor.
   12093 //	m: A 1D mean Tensor with size matching the last dimension of t.
   12094 // This is the first output from tf.nn.moments,
   12095 // or a saved moving average thereof.
   12096 //	v: A 1D variance Tensor with size matching the last dimension of t.
   12097 // This is the second output from tf.nn.moments,
   12098 // or a saved moving average thereof.
   12099 //	beta: A 1D beta Tensor with size matching the last dimension of t.
   12100 // An offset to be added to the normalized tensor.
   12101 //	gamma: A 1D gamma Tensor with size matching the last dimension of t.
   12102 // If "scale_after_normalization" is true, this tensor will be multiplied
   12103 // with the normalized tensor.
   12104 //	variance_epsilon: A small float number to avoid dividing by 0.
   12105 //	scale_after_normalization: A bool indicating whether the resulted tensor
   12106 // needs to be multiplied with gamma.
   12107 func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) {
   12108 	if scope.Err() != nil {
   12109 		return
   12110 	}
   12111 	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
   12112 	opspec := tf.OpSpec{
   12113 		Type: "BatchNormWithGlobalNormalization",
   12114 		Input: []tf.Input{
   12115 			t, m, v, beta, gamma,
   12116 		},
   12117 		Attrs: attrs,
   12118 	}
   12119 	op := scope.AddOperation(opspec)
   12120 	return op.Output(0)
   12121 }
   12122 
   12123 // MutableDenseHashTableV2Attr is an optional argument to MutableDenseHashTableV2.
   12124 type MutableDenseHashTableV2Attr func(optionalAttr)
   12125 
   12126 // MutableDenseHashTableV2Container sets the optional container attribute to value.
   12127 //
   12128 // value: If non-empty, this table is placed in the given container.
   12129 // Otherwise, a default container is used.
   12130 // If not specified, defaults to ""
   12131 func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr {
   12132 	return func(m optionalAttr) {
   12133 		m["container"] = value
   12134 	}
   12135 }
   12136 
   12137 // MutableDenseHashTableV2SharedName sets the optional shared_name attribute to value.
   12138 //
   12139 // value: If non-empty, this table is shared under the given name across
   12140 // multiple sessions.
   12141 // If not specified, defaults to ""
   12142 func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr {
   12143 	return func(m optionalAttr) {
   12144 		m["shared_name"] = value
   12145 	}
   12146 }
   12147 
   12148 // MutableDenseHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
   12149 // If not specified, defaults to false
   12150 func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr {
   12151 	return func(m optionalAttr) {
   12152 		m["use_node_name_sharing"] = value
   12153 	}
   12154 }
   12155 
   12156 // MutableDenseHashTableV2ValueShape sets the optional value_shape attribute to value.
   12157 //
   12158 // value: The shape of each value.
   12159 // If not specified, defaults to <>
   12160 func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr {
   12161 	return func(m optionalAttr) {
   12162 		m["value_shape"] = value
   12163 	}
   12164 }
   12165 
   12166 // MutableDenseHashTableV2InitialNumBuckets sets the optional initial_num_buckets attribute to value.
   12167 //
   12168 // value: The initial number of hash table buckets. Must be a power
   12169 // to 2.
   12170 // If not specified, defaults to 131072
   12171 func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr {
   12172 	return func(m optionalAttr) {
   12173 		m["initial_num_buckets"] = value
   12174 	}
   12175 }
   12176 
   12177 // MutableDenseHashTableV2MaxLoadFactor sets the optional max_load_factor attribute to value.
   12178 //
   12179 // value: The maximum ratio between number of entries and number of
   12180 // buckets before growing the table. Must be between 0 and 1.
   12181 // If not specified, defaults to 0.8
   12182 func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr {
   12183 	return func(m optionalAttr) {
   12184 		m["max_load_factor"] = value
   12185 	}
   12186 }
   12187 
   12188 // Creates an empty hash table that uses tensors as the backing store.
   12189 //
   12190 // It uses "open addressing" with quadratic reprobing to resolve
   12191 // collisions.
   12192 //
   12193 // This op creates a mutable hash table, specifying the type of its keys and
   12194 // values. Each value must be a scalar. Data can be inserted into the table using
   12195 // the insert operations. It does not support the initialization operation.
   12196 //
   12197 // Arguments:
   12198 //	empty_key: The key used to represent empty key buckets internally. Must not
   12199 // be used in insert or lookup operations.
   12200 //	value_dtype: Type of the table values.
   12201 //
   12202 // Returns Handle to a table.
   12203 func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, value_dtype tf.DataType, optional ...MutableDenseHashTableV2Attr) (table_handle tf.Output) {
   12204 	if scope.Err() != nil {
   12205 		return
   12206 	}
   12207 	attrs := map[string]interface{}{"value_dtype": value_dtype}
   12208 	for _, a := range optional {
   12209 		a(attrs)
   12210 	}
   12211 	opspec := tf.OpSpec{
   12212 		Type: "MutableDenseHashTableV2",
   12213 		Input: []tf.Input{
   12214 			empty_key,
   12215 		},
   12216 		Attrs: attrs,
   12217 	}
   12218 	op := scope.AddOperation(opspec)
   12219 	return op.Output(0)
   12220 }
   12221 
   12222 // Produces the max pool of the input tensor for quantized types.
   12223 //
   12224 // Arguments:
   12225 //	input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
   12226 //	min_input: The float value that the lowest quantized input value represents.
   12227 //	max_input: The float value that the highest quantized input value represents.
   12228 //	ksize: The size of the window for each dimension of the input tensor.
   12229 // The length must be 4 to match the number of dimensions of the input.
   12230 //	strides: The stride of the sliding window for each dimension of the input
   12231 // tensor. The length must be 4 to match the number of dimensions of the input.
   12232 //	padding: The type of padding algorithm to use.
   12233 //
   12234 // Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
   12235 func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
   12236 	if scope.Err() != nil {
   12237 		return
   12238 	}
   12239 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   12240 	opspec := tf.OpSpec{
   12241 		Type: "QuantizedMaxPool",
   12242 		Input: []tf.Input{
   12243 			input, min_input, max_input,
   12244 		},
   12245 		Attrs: attrs,
   12246 	}
   12247 	op := scope.AddOperation(opspec)
   12248 	return op.Output(0), op.Output(1), op.Output(2)
   12249 }
   12250 
   12251 // Computes softplus: `log(exp(features) + 1)`.
   12252 func Softplus(scope *Scope, features tf.Output) (activations tf.Output) {
   12253 	if scope.Err() != nil {
   12254 		return
   12255 	}
   12256 	opspec := tf.OpSpec{
   12257 		Type: "Softplus",
   12258 		Input: []tf.Input{
   12259 			features,
   12260 		},
   12261 	}
   12262 	op := scope.AddOperation(opspec)
   12263 	return op.Output(0)
   12264 }
   12265 
   12266 // Computes exponential of x - 1 element-wise.
   12267 //
   12268 // I.e., \\(y = (\exp x) - 1\\).
   12269 func Expm1(scope *Scope, x tf.Output) (y tf.Output) {
   12270 	if scope.Err() != nil {
   12271 		return
   12272 	}
   12273 	opspec := tf.OpSpec{
   12274 		Type: "Expm1",
   12275 		Input: []tf.Input{
   12276 			x,
   12277 		},
   12278 	}
   12279 	op := scope.AddOperation(opspec)
   12280 	return op.Output(0)
   12281 }
   12282 
   12283 // Returns the number of records this Reader has produced.
   12284 //
   12285 // This is the same as the number of ReaderRead executions that have
   12286 // succeeded.
   12287 //
   12288 // Arguments:
   12289 //	reader_handle: Handle to a Reader.
   12290 func ReaderNumRecordsProducedV2(scope *Scope, reader_handle tf.Output) (records_produced tf.Output) {
   12291 	if scope.Err() != nil {
   12292 		return
   12293 	}
   12294 	opspec := tf.OpSpec{
   12295 		Type: "ReaderNumRecordsProducedV2",
   12296 		Input: []tf.Input{
   12297 			reader_handle,
   12298 		},
   12299 	}
   12300 	op := scope.AddOperation(opspec)
   12301 	return op.Output(0)
   12302 }
   12303 
   12304 // Computes the sum along segments of a tensor.
   12305 //
   12306 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   12307 // segments.
   12308 //
   12309 // Computes a tensor such that
   12310 // \\(output_i = \sum_j data_j\\) where sum is over `j` such
   12311 // that `segment_ids[j] == i`.
   12312 //
   12313 // If the sum is empty for a given segment ID `i`, `output[i] = 0`.
   12314 //
   12315 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   12316 // <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
   12317 // </div>
   12318 //
   12319 // Arguments:
   12320 //
   12321 //	segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
   12322 // first dimension.  Values should be sorted and can be repeated.
   12323 //
   12324 // Returns Has same shape as data, except for dimension 0 which
   12325 // has size `k`, the number of segments.
   12326 func SegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
   12327 	if scope.Err() != nil {
   12328 		return
   12329 	}
   12330 	opspec := tf.OpSpec{
   12331 		Type: "SegmentSum",
   12332 		Input: []tf.Input{
   12333 			data, segment_ids,
   12334 		},
   12335 	}
   12336 	op := scope.AddOperation(opspec)
   12337 	return op.Output(0)
   12338 }
   12339 
   12340 // Creates a dataset that emits the lines of one or more text files.
   12341 //
   12342 // Arguments:
   12343 //	filenames: A scalar or a vector containing the name(s) of the file(s) to be
   12344 // read.
   12345 //	compression_type: A scalar containing either (i) the empty string (no
   12346 // compression), (ii) "ZLIB", or (iii) "GZIP".
   12347 //	buffer_size: A scalar containing the number of bytes to buffer.
   12348 func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
   12349 	if scope.Err() != nil {
   12350 		return
   12351 	}
   12352 	opspec := tf.OpSpec{
   12353 		Type: "TextLineDataset",
   12354 		Input: []tf.Input{
   12355 			filenames, compression_type, buffer_size,
   12356 		},
   12357 	}
   12358 	op := scope.AddOperation(opspec)
   12359 	return op.Output(0)
   12360 }
   12361 
   12362 // Checks whether a resource handle-based variable has been initialized.
   12363 //
   12364 // Arguments:
   12365 //	resource: the input resource handle.
   12366 //
   12367 // Returns a scalar boolean which is true if the variable has been
   12368 // initialized.
   12369 func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Output) {
   12370 	if scope.Err() != nil {
   12371 		return
   12372 	}
   12373 	opspec := tf.OpSpec{
   12374 		Type: "VarIsInitializedOp",
   12375 		Input: []tf.Input{
   12376 			resource,
   12377 		},
   12378 	}
   12379 	op := scope.AddOperation(opspec)
   12380 	return op.Output(0)
   12381 }
   12382 
   12383 // Pads a tensor with zeros.
   12384 //
   12385 // This operation pads a `input` with zeros according to the `paddings` you
   12386 // specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
   12387 // rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
   12388 // how many zeros to add before the contents of `input` in that dimension, and
   12389 // `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
   12390 // in that dimension.
   12391 //
   12392 // The padded size of each dimension D of the output is:
   12393 //
   12394 // `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
   12395 //
   12396 // For example:
   12397 //
   12398 // ```
   12399 // # 't' is [[1, 1], [2, 2]]
   12400 // # 'paddings' is [[1, 1], [2, 2]]
   12401 // # rank of 't' is 2
   12402 // pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
   12403 //                       [0, 0, 1, 1, 0, 0]
   12404 //                       [0, 0, 2, 2, 0, 0]
   12405 //                       [0, 0, 0, 0, 0, 0]]
   12406 // ```
   12407 func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {
   12408 	if scope.Err() != nil {
   12409 		return
   12410 	}
   12411 	opspec := tf.OpSpec{
   12412 		Type: "Pad",
   12413 		Input: []tf.Input{
   12414 			input, paddings,
   12415 		},
   12416 	}
   12417 	op := scope.AddOperation(opspec)
   12418 	return op.Output(0)
   12419 }
   12420 
   12421 // SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul.
   12422 type SparseTensorDenseMatMulAttr func(optionalAttr)
   12423 
   12424 // SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value.
   12425 //
   12426 // value: Use the adjoint of A in the matrix multiply.  If A is complex, this
   12427 // is transpose(conj(A)).  Otherwise it's transpose(A).
   12428 // If not specified, defaults to false
   12429 func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr {
   12430 	return func(m optionalAttr) {
   12431 		m["adjoint_a"] = value
   12432 	}
   12433 }
   12434 
   12435 // SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value.
   12436 //
   12437 // value: Use the adjoint of B in the matrix multiply.  If B is complex, this
   12438 // is transpose(conj(B)).  Otherwise it's transpose(B).
   12439 // If not specified, defaults to false
   12440 func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr {
   12441 	return func(m optionalAttr) {
   12442 		m["adjoint_b"] = value
   12443 	}
   12444 }
   12445 
   12446 // Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
   12447 //
   12448 // No validity checking is performed on the indices of A.  However, the following
   12449 // input format is recommended for optimal behavior:
   12450 //
   12451 // if adjoint_a == false:
   12452 //   A should be sorted in lexicographically increasing order.  Use SparseReorder
   12453 //   if you're not sure.
   12454 // if adjoint_a == true:
   12455 //   A should be sorted in order of increasing dimension 1 (i.e., "column major"
   12456 //   order instead of "row major" order).
   12457 //
   12458 // Arguments:
   12459 //	a_indices: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
   12460 //	a_values: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
   12461 //	a_shape: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
   12462 //	b: 2-D.  A dense Matrix.
   12463 func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) {
   12464 	if scope.Err() != nil {
   12465 		return
   12466 	}
   12467 	attrs := map[string]interface{}{}
   12468 	for _, a := range optional {
   12469 		a(attrs)
   12470 	}
   12471 	opspec := tf.OpSpec{
   12472 		Type: "SparseTensorDenseMatMul",
   12473 		Input: []tf.Input{
   12474 			a_indices, a_values, a_shape, b,
   12475 		},
   12476 		Attrs: attrs,
   12477 	}
   12478 	op := scope.AddOperation(opspec)
   12479 	return op.Output(0)
   12480 }
   12481 
   12482 // Deserialize and concatenate `SparseTensors` from a serialized minibatch.
   12483 //
   12484 // The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
   12485 // `N` is the minibatch size and the rows correspond to packed outputs of
   12486 // `SerializeSparse`.  The ranks of the original `SparseTensor` objects
   12487 // must all match.  When the final `SparseTensor` is created, it has rank one
   12488 // higher than the ranks of the incoming `SparseTensor` objects
   12489 // (they have been concatenated along a new row dimension).
   12490 //
   12491 // The output `SparseTensor` object's shape values for all dimensions but the
   12492 // first are the max across the input `SparseTensor` objects' shape values
   12493 // for the corresponding dimensions.  Its first shape value is `N`, the minibatch
   12494 // size.
   12495 //
   12496 // The input `SparseTensor` objects' indices are assumed ordered in
   12497 // standard lexicographic order.  If this is not the case, after this
   12498 // step run `SparseReorder` to restore index ordering.
   12499 //
   12500 // For example, if the serialized input is a `[2 x 3]` matrix representing two
   12501 // original `SparseTensor` objects:
   12502 //
   12503 //     index = [ 0]
   12504 //             [10]
   12505 //             [20]
   12506 //     values = [1, 2, 3]
   12507 //     shape = [50]
   12508 //
   12509 // and
   12510 //
   12511 //     index = [ 2]
   12512 //             [10]
   12513 //     values = [4, 5]
   12514 //     shape = [30]
   12515 //
   12516 // then the final deserialized `SparseTensor` will be:
   12517 //
   12518 //     index = [0  0]
   12519 //             [0 10]
   12520 //             [0 20]
   12521 //             [1  2]
   12522 //             [1 10]
   12523 //     values = [1, 2, 3, 4, 5]
   12524 //     shape = [2 50]
   12525 //
   12526 // Arguments:
   12527 //	serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects.
   12528 // Must have 3 columns.
   12529 //	dtype: The `dtype` of the serialized `SparseTensor` objects.
   12530 func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
   12531 	if scope.Err() != nil {
   12532 		return
   12533 	}
   12534 	attrs := map[string]interface{}{"dtype": dtype}
   12535 	opspec := tf.OpSpec{
   12536 		Type: "DeserializeManySparse",
   12537 		Input: []tf.Input{
   12538 			serialized_sparse,
   12539 		},
   12540 		Attrs: attrs,
   12541 	}
   12542 	op := scope.AddOperation(opspec)
   12543 	return op.Output(0), op.Output(1), op.Output(2)
   12544 }
   12545 
   12546 // StringJoinAttr is an optional argument to StringJoin.
   12547 type StringJoinAttr func(optionalAttr)
   12548 
   12549 // StringJoinSeparator sets the optional separator attribute to value.
   12550 //
   12551 // value: string, an optional join separator.
   12552 // If not specified, defaults to ""
   12553 func StringJoinSeparator(value string) StringJoinAttr {
   12554 	return func(m optionalAttr) {
   12555 		m["separator"] = value
   12556 	}
   12557 }
   12558 
   12559 // Joins the strings in the given list of string tensors into one tensor;
   12560 //
   12561 // with the given separator (default is an empty separator).
   12562 //
   12563 // Arguments:
   12564 //	inputs: A list of string tensors.  The tensors must all have the same shape,
   12565 // or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
   12566 // of non-scalar inputs.
   12567 func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (output tf.Output) {
   12568 	if scope.Err() != nil {
   12569 		return
   12570 	}
   12571 	attrs := map[string]interface{}{}
   12572 	for _, a := range optional {
   12573 		a(attrs)
   12574 	}
   12575 	opspec := tf.OpSpec{
   12576 		Type: "StringJoin",
   12577 		Input: []tf.Input{
   12578 			tf.OutputList(inputs),
   12579 		},
   12580 		Attrs: attrs,
   12581 	}
   12582 	op := scope.AddOperation(opspec)
   12583 	return op.Output(0)
   12584 }
   12585 
   12586 // Returns immutable tensor from memory region.
   12587 //
   12588 // The current implementation memmaps the tensor from a file.
   12589 //
   12590 // Arguments:
   12591 //	dtype: Type of the returned tensor.
   12592 //	shape: Shape of the returned tensor.
   12593 //	memory_region_name: Name of readonly memory region used by the tensor, see
   12594 // NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
   12595 func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output) {
   12596 	if scope.Err() != nil {
   12597 		return
   12598 	}
   12599 	attrs := map[string]interface{}{"dtype": dtype, "shape": shape, "memory_region_name": memory_region_name}
   12600 	opspec := tf.OpSpec{
   12601 		Type: "ImmutableConst",
   12602 
   12603 		Attrs: attrs,
   12604 	}
   12605 	op := scope.AddOperation(opspec)
   12606 	return op.Output(0)
   12607 }
   12608 
   12609 // Inverse real-valued fast Fourier transform.
   12610 //
   12611 // Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
   12612 // signal over the inner-most dimension of `input`.
   12613 //
   12614 // The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
   12615 // `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
   12616 // `fft_length` is not provided, it is computed from the size of the inner-most
   12617 // dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
   12618 // compute `input` is odd, it should be provided since it cannot be inferred
   12619 // properly.
   12620 //
   12621 // Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
   12622 // than the corresponding dimension of `input`, the dimension is cropped. If it is
   12623 // larger, the dimension is padded with zeros.
   12624 //
   12625 // Arguments:
   12626 //	input: A complex64 tensor.
   12627 //	fft_length: An int32 tensor of shape [1]. The FFT length.
   12628 //
   12629 // Returns A float32 tensor of the same rank as `input`. The inner-most
   12630 //   dimension of `input` is replaced with the `fft_length` samples of its inverse
   12631 //   1D Fourier transform.
   12632 //
   12633 // @compatibility(numpy)
   12634 // Equivalent to np.fft.irfft
   12635 // @end_compatibility
   12636 func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
   12637 	if scope.Err() != nil {
   12638 		return
   12639 	}
   12640 	opspec := tf.OpSpec{
   12641 		Type: "IRFFT",
   12642 		Input: []tf.Input{
   12643 			input, fft_length,
   12644 		},
   12645 	}
   12646 	op := scope.AddOperation(opspec)
   12647 	return op.Output(0)
   12648 }
   12649 
   12650 // Concatenates a list of `SparseTensor` along the specified dimension.
   12651 //
   12652 // Concatenation is with respect to the dense versions of these sparse tensors.
   12653 // It is assumed that each input is a `SparseTensor` whose elements are ordered
   12654 // along increasing dimension number.
   12655 //
   12656 // All inputs' shapes must match, except for the concat dimension.  The
   12657 // `indices`, `values`, and `shapes` lists must have the same length.
   12658 //
   12659 // The output shape is identical to the inputs', except along the concat
   12660 // dimension, where it is the sum of the inputs' sizes along that dimension.
   12661 //
   12662 // The output elements will be resorted to preserve the sort order along
   12663 // increasing dimension number.
   12664 //
   12665 // This op runs in `O(M log M)` time, where `M` is the total number of non-empty
   12666 // values across all inputs. This is due to the need for an internal sort in
   12667 // order to concatenate efficiently across an arbitrary dimension.
   12668 //
   12669 // For example, if `concat_dim = 1` and the inputs are
   12670 //
   12671 //     sp_inputs[0]: shape = [2, 3]
   12672 //     [0, 2]: "a"
   12673 //     [1, 0]: "b"
   12674 //     [1, 1]: "c"
   12675 //
   12676 //     sp_inputs[1]: shape = [2, 4]
   12677 //     [0, 1]: "d"
   12678 //     [0, 2]: "e"
   12679 //
   12680 // then the output will be
   12681 //
   12682 //     shape = [2, 7]
   12683 //     [0, 2]: "a"
   12684 //     [0, 4]: "d"
   12685 //     [0, 5]: "e"
   12686 //     [1, 0]: "b"
   12687 //     [1, 1]: "c"
   12688 //
   12689 // Graphically this is equivalent to doing
   12690 //
   12691 //     [    a] concat [  d e  ] = [    a   d e  ]
   12692 //     [b c  ]        [       ]   [b c          ]
   12693 //
   12694 // Arguments:
   12695 //	indices: 2-D.  Indices of each input `SparseTensor`.
   12696 //	values: 1-D.  Non-empty values of each `SparseTensor`.
   12697 //	shapes: 1-D.  Shapes of each `SparseTensor`.
   12698 //	concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
   12699 // where rank is the number of dimensions in each input `SparseTensor`.
   12700 //
   12701 // Returns 2-D.  Indices of the concatenated `SparseTensor`.1-D.  Non-empty values of the concatenated `SparseTensor`.1-D.  Shape of the concatenated `SparseTensor`.
   12702 func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, concat_dim int64) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
   12703 	if scope.Err() != nil {
   12704 		return
   12705 	}
   12706 	attrs := map[string]interface{}{"concat_dim": concat_dim}
   12707 	opspec := tf.OpSpec{
   12708 		Type: "SparseConcat",
   12709 		Input: []tf.Input{
   12710 			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes),
   12711 		},
   12712 		Attrs: attrs,
   12713 	}
   12714 	op := scope.AddOperation(opspec)
   12715 	return op.Output(0), op.Output(1), op.Output(2)
   12716 }
   12717 
   12718 // Generates sparse cross from a list of sparse and dense tensors.
   12719 //
   12720 // The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
   12721 // representing features of one feature column. It outputs a 2D `SparseTensor` with
   12722 // the batchwise crosses of these features.
   12723 //
   12724 // For example, if the inputs are
   12725 //
   12726 //     inputs[0]: SparseTensor with shape = [2, 2]
   12727 //     [0, 0]: "a"
   12728 //     [1, 0]: "b"
   12729 //     [1, 1]: "c"
   12730 //
   12731 //     inputs[1]: SparseTensor with shape = [2, 1]
   12732 //     [0, 0]: "d"
   12733 //     [1, 0]: "e"
   12734 //
   12735 //     inputs[2]: Tensor [["f"], ["g"]]
   12736 //
   12737 // then the output will be
   12738 //
   12739 //     shape = [2, 2]
   12740 //     [0, 0]: "a_X_d_X_f"
   12741 //     [1, 0]: "b_X_e_X_g"
   12742 //     [1, 1]: "c_X_e_X_g"
   12743 //
   12744 // if hashed_output=true then the output will be
   12745 //
   12746 //     shape = [2, 2]
   12747 //     [0, 0]: FingerprintCat64(
   12748 //                 Fingerprint64("f"), FingerprintCat64(
   12749 //                     Fingerprint64("d"), Fingerprint64("a")))
   12750 //     [1, 0]: FingerprintCat64(
   12751 //                 Fingerprint64("g"), FingerprintCat64(
   12752 //                     Fingerprint64("e"), Fingerprint64("b")))
   12753 //     [1, 1]: FingerprintCat64(
   12754 //                 Fingerprint64("g"), FingerprintCat64(
   12755 //                     Fingerprint64("e"), Fingerprint64("c")))
   12756 //
   12757 // Arguments:
   12758 //	indices: 2-D.  Indices of each input `SparseTensor`.
   12759 //	values: 1-D.   values of each `SparseTensor`.
   12760 //	shapes: 1-D.   Shapes of each `SparseTensor`.
   12761 //	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
   12762 //	hashed_output: If true, returns the hash of the cross instead of the string.
   12763 // This will allow us avoiding string manipulations.
   12764 //	num_buckets: It is used if hashed_output is true.
   12765 // output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
   12766 //	hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
   12767 // function to combine the crosses fingerprints.
   12768 //
   12769 //
   12770 //
   12771 // Returns 2-D.  Indices of the concatenated `SparseTensor`.1-D.  Non-empty values of the concatenated or hashed
   12772 // `SparseTensor`.1-D.  Shape of the concatenated `SparseTensor`.
   12773 func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, hashed_output bool, num_buckets int64, hash_key int64, out_type tf.DataType, internal_type tf.DataType) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
   12774 	if scope.Err() != nil {
   12775 		return
   12776 	}
   12777 	attrs := map[string]interface{}{"hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_type": out_type, "internal_type": internal_type}
   12778 	opspec := tf.OpSpec{
   12779 		Type: "SparseCross",
   12780 		Input: []tf.Input{
   12781 			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs),
   12782 		},
   12783 		Attrs: attrs,
   12784 	}
   12785 	op := scope.AddOperation(opspec)
   12786 	return op.Output(0), op.Output(1), op.Output(2)
   12787 }
   12788 
   12789 // ListDiffAttr is an optional argument to ListDiff.
   12790 type ListDiffAttr func(optionalAttr)
   12791 
   12792 // ListDiffOutIdx sets the optional out_idx attribute to value.
   12793 // If not specified, defaults to DT_INT32
   12794 func ListDiffOutIdx(value tf.DataType) ListDiffAttr {
   12795 	return func(m optionalAttr) {
   12796 		m["out_idx"] = value
   12797 	}
   12798 }
   12799 
   12800 // Computes the difference between two lists of numbers or strings.
   12801 //
   12802 // Given a list `x` and a list `y`, this operation returns a list `out` that
   12803 // represents all values that are in `x` but not in `y`. The returned list `out`
   12804 // is sorted in the same order that the numbers appear in `x` (duplicates are
   12805 // preserved). This operation also returns a list `idx` that represents the
   12806 // position of each `out` element in `x`. In other words:
   12807 //
   12808 // `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
   12809 //
   12810 // For example, given this input:
   12811 //
   12812 // ```
   12813 // x = [1, 2, 3, 4, 5, 6]
   12814 // y = [1, 3, 5]
   12815 // ```
   12816 //
   12817 // This operation would return:
   12818 //
   12819 // ```
   12820 // out ==> [2, 4, 6]
   12821 // idx ==> [1, 3, 5]
   12822 // ```
   12823 //
   12824 // Arguments:
   12825 //	x: 1-D. Values to keep.
   12826 //	y: 1-D. Values to remove.
   12827 //
   12828 // Returns 1-D. Values present in `x` but not in `y`.1-D. Positions of `x` values preserved in `out`.
   12829 func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output) {
   12830 	if scope.Err() != nil {
   12831 		return
   12832 	}
   12833 	attrs := map[string]interface{}{}
   12834 	for _, a := range optional {
   12835 		a(attrs)
   12836 	}
   12837 	opspec := tf.OpSpec{
   12838 		Type: "ListDiff",
   12839 		Input: []tf.Input{
   12840 			x, y,
   12841 		},
   12842 		Attrs: attrs,
   12843 	}
   12844 	op := scope.AddOperation(opspec)
   12845 	return op.Output(0), op.Output(1)
   12846 }
   12847 
   12848 // Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
   12849 //
   12850 // This Op does not require `a_indices` be sorted in standard lexicographic order.
   12851 //
   12852 // Arguments:
   12853 //	a_indices: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
   12854 //	a_values: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
   12855 //	a_shape: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
   12856 //	b: `ndims`-D Tensor.  With shape `a_shape`.
   12857 func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output) (output tf.Output) {
   12858 	if scope.Err() != nil {
   12859 		return
   12860 	}
   12861 	opspec := tf.OpSpec{
   12862 		Type: "SparseTensorDenseAdd",
   12863 		Input: []tf.Input{
   12864 			a_indices, a_values, a_shape, b,
   12865 		},
   12866 	}
   12867 	op := scope.AddOperation(opspec)
   12868 	return op.Output(0)
   12869 }
   12870 
   12871 // SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation.
   12872 type SparseToSparseSetOperationAttr func(optionalAttr)
   12873 
   12874 // SparseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
   12875 // If not specified, defaults to true
   12876 func SparseToSparseSetOperationValidateIndices(value bool) SparseToSparseSetOperationAttr {
   12877 	return func(m optionalAttr) {
   12878 		m["validate_indices"] = value
   12879 	}
   12880 }
   12881 
   12882 // Applies set operation along last dimension of 2 `SparseTensor` inputs.
   12883 //
   12884 // See SetOperationOp::SetOperationFromContext for values of `set_operation`.
   12885 //
   12886 // If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
   12887 // order and range of `set1` and `set2` indices.
   12888 //
   12889 // Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
   12890 // and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
   12891 // as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
   12892 // ignored.
   12893 //
   12894 // Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
   12895 // and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
   12896 // as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
   12897 // ignored.
   12898 //
   12899 // If `validate_indices` is `True`, this op validates the order and range of `set1`
   12900 // and `set2` indices.
   12901 //
   12902 // Output `result` is a `SparseTensor` represented by `result_indices`,
   12903 // `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
   12904 // has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
   12905 // dimension contains the result of `set_operation` applied to the corresponding
   12906 // `[0...n-1]` dimension of `set`.
   12907 //
   12908 // Arguments:
   12909 //	set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
   12910 // order.
   12911 //	set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
   12912 // order.
   12913 //	set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
   12914 // be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
   12915 // max set size across `0...n-1` dimensions.
   12916 //	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
   12917 // order.
   12918 //	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
   12919 // order.
   12920 //	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
   12921 // be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
   12922 // max set size across `0...n-1` dimensions.
   12923 //
   12924 //
   12925 // Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
   12926 // the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
   12927 // is the max result set size across all `0...n-1` dimensions.
   12928 func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, set1_shape tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...SparseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
   12929 	if scope.Err() != nil {
   12930 		return
   12931 	}
   12932 	attrs := map[string]interface{}{"set_operation": set_operation}
   12933 	for _, a := range optional {
   12934 		a(attrs)
   12935 	}
   12936 	opspec := tf.OpSpec{
   12937 		Type: "SparseToSparseSetOperation",
   12938 		Input: []tf.Input{
   12939 			set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape,
   12940 		},
   12941 		Attrs: attrs,
   12942 	}
   12943 	op := scope.AddOperation(opspec)
   12944 	return op.Output(0), op.Output(1), op.Output(2)
   12945 }
   12946 
   12947 // Computes numerical negative value element-wise.
   12948 //
   12949 // I.e., \\(y = -x\\).
   12950 func Neg(scope *Scope, x tf.Output) (y tf.Output) {
   12951 	if scope.Err() != nil {
   12952 		return
   12953 	}
   12954 	opspec := tf.OpSpec{
   12955 		Type: "Neg",
   12956 		Input: []tf.Input{
   12957 			x,
   12958 		},
   12959 	}
   12960 	op := scope.AddOperation(opspec)
   12961 	return op.Output(0)
   12962 }
   12963 
   12964 // Writes a `Summary` protocol buffer with a histogram.
   12965 //
   12966 // The generated
   12967 // [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
   12968 // has one summary value containing a histogram for `values`.
   12969 //
   12970 // This op reports an `InvalidArgument` error if any value is not finite.
   12971 //
   12972 // Arguments:
   12973 //	writer: A handle to a summary writer.
   12974 //	step: The step to write the summary for.
   12975 //	tag: Scalar.  Tag to use for the `Summary.Value`.
   12976 //	values: Any shape. Values to use to build the histogram.
   12977 //
   12978 // Returns the created operation.
   12979 func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, values tf.Output) (o *tf.Operation) {
   12980 	if scope.Err() != nil {
   12981 		return
   12982 	}
   12983 	opspec := tf.OpSpec{
   12984 		Type: "WriteHistogramSummary",
   12985 		Input: []tf.Input{
   12986 			writer, step, tag, values,
   12987 		},
   12988 	}
   12989 	return scope.AddOperation(opspec)
   12990 }
   12991 
   12992 // Adds two `SparseTensor` objects to produce another `SparseTensor`.
   12993 //
   12994 // The input `SparseTensor` objects' indices are assumed ordered in standard
   12995 // lexicographic order.  If this is not the case, before this step run
   12996 // `SparseReorder` to restore index ordering.
   12997 //
   12998 // By default, if two values sum to zero at some index, the output `SparseTensor`
   12999 // would still include that particular location in its index, storing a zero in the
   13000 // corresponding value slot.  To override this, callers can specify `thresh`,
   13001 // indicating that if the sum has a magnitude strictly smaller than `thresh`, its
   13002 // corresponding value and index would then not be included.  In particular,
   13003 // `thresh == 0` (default) means everything is kept and actual thresholding happens
   13004 // only for a positive value.
   13005 //
   13006 // In the following shapes, `nnz` is the count after taking `thresh` into account.
   13007 //
   13008 // Arguments:
   13009 //	a_indices: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
   13010 //	a_values: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
   13011 //	a_shape: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
   13012 //	b_indices: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
   13013 //	b_values: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
   13014 //	b_shape: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
   13015 //	thresh: 0-D.  The magnitude threshold that determines if an output value/index
   13016 // pair takes space.
   13017 func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output, thresh tf.Output) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output) {
   13018 	if scope.Err() != nil {
   13019 		return
   13020 	}
   13021 	opspec := tf.OpSpec{
   13022 		Type: "SparseAdd",
   13023 		Input: []tf.Input{
   13024 			a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh,
   13025 		},
   13026 	}
   13027 	op := scope.AddOperation(opspec)
   13028 	return op.Output(0), op.Output(1), op.Output(2)
   13029 }
   13030 
   13031 // OrderedMapPeekAttr is an optional argument to OrderedMapPeek.
   13032 type OrderedMapPeekAttr func(optionalAttr)
   13033 
   13034 // OrderedMapPeekCapacity sets the optional capacity attribute to value.
   13035 // If not specified, defaults to 0
   13036 //
   13037 // REQUIRES: value >= 0
   13038 func OrderedMapPeekCapacity(value int64) OrderedMapPeekAttr {
   13039 	return func(m optionalAttr) {
   13040 		m["capacity"] = value
   13041 	}
   13042 }
   13043 
   13044 // OrderedMapPeekMemoryLimit sets the optional memory_limit attribute to value.
   13045 // If not specified, defaults to 0
   13046 //
   13047 // REQUIRES: value >= 0
   13048 func OrderedMapPeekMemoryLimit(value int64) OrderedMapPeekAttr {
   13049 	return func(m optionalAttr) {
   13050 		m["memory_limit"] = value
   13051 	}
   13052 }
   13053 
   13054 // OrderedMapPeekContainer sets the optional container attribute to value.
   13055 // If not specified, defaults to ""
   13056 func OrderedMapPeekContainer(value string) OrderedMapPeekAttr {
   13057 	return func(m optionalAttr) {
   13058 		m["container"] = value
   13059 	}
   13060 }
   13061 
   13062 // OrderedMapPeekSharedName sets the optional shared_name attribute to value.
   13063 // If not specified, defaults to ""
   13064 func OrderedMapPeekSharedName(value string) OrderedMapPeekAttr {
   13065 	return func(m optionalAttr) {
   13066 		m["shared_name"] = value
   13067 	}
   13068 }
   13069 
   13070 // Op peeks at the values at the specified key.  If the
   13071 //
   13072 // underlying container does not contain this key
   13073 // this op will block until it does.   This Op is optimized for
   13074 // performance.
   13075 func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapPeekAttr) (values []tf.Output) {
   13076 	if scope.Err() != nil {
   13077 		return
   13078 	}
   13079 	attrs := map[string]interface{}{"dtypes": dtypes}
   13080 	for _, a := range optional {
   13081 		a(attrs)
   13082 	}
   13083 	opspec := tf.OpSpec{
   13084 		Type: "OrderedMapPeek",
   13085 		Input: []tf.Input{
   13086 			key, indices,
   13087 		},
   13088 		Attrs: attrs,
   13089 	}
   13090 	op := scope.AddOperation(opspec)
   13091 	if scope.Err() != nil {
   13092 		return
   13093 	}
   13094 	var idx int
   13095 	var err error
   13096 	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
   13097 		scope.UpdateErr("OrderedMapPeek", err)
   13098 		return
   13099 	}
   13100 	return values
   13101 }
   13102 
   13103 // DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg.
   13104 type DecodeAndCropJpegAttr func(optionalAttr)
   13105 
   13106 // DecodeAndCropJpegChannels sets the optional channels attribute to value.
   13107 //
   13108 // value: Number of color channels for the decoded image.
   13109 // If not specified, defaults to 0
   13110 func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr {
   13111 	return func(m optionalAttr) {
   13112 		m["channels"] = value
   13113 	}
   13114 }
   13115 
   13116 // DecodeAndCropJpegRatio sets the optional ratio attribute to value.
   13117 //
   13118 // value: Downscaling ratio.
   13119 // If not specified, defaults to 1
   13120 func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr {
   13121 	return func(m optionalAttr) {
   13122 		m["ratio"] = value
   13123 	}
   13124 }
   13125 
   13126 // DecodeAndCropJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
   13127 //
   13128 // value: If true use a slower but nicer upscaling of the
   13129 // chroma planes (yuv420/422 only).
   13130 // If not specified, defaults to true
   13131 func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr {
   13132 	return func(m optionalAttr) {
   13133 		m["fancy_upscaling"] = value
   13134 	}
   13135 }
   13136 
   13137 // DecodeAndCropJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
   13138 //
   13139 // value: If true try to recover an image from truncated input.
   13140 // If not specified, defaults to false
   13141 func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr {
   13142 	return func(m optionalAttr) {
   13143 		m["try_recover_truncated"] = value
   13144 	}
   13145 }
   13146 
   13147 // DecodeAndCropJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
   13148 //
   13149 // value: The minimum required fraction of lines before a truncated
   13150 // input is accepted.
   13151 // If not specified, defaults to 1
   13152 func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr {
   13153 	return func(m optionalAttr) {
   13154 		m["acceptable_fraction"] = value
   13155 	}
   13156 }
   13157 
   13158 // DecodeAndCropJpegDctMethod sets the optional dct_method attribute to value.
   13159 //
   13160 // value: string specifying a hint about the algorithm used for
   13161 // decompression.  Defaults to "" which maps to a system-specific
   13162 // default.  Currently valid values are ["INTEGER_FAST",
   13163 // "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
   13164 // jpeg library changes to a version that does not have that specific
   13165 // option.)
   13166 // If not specified, defaults to ""
   13167 func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr {
   13168 	return func(m optionalAttr) {
   13169 		m["dct_method"] = value
   13170 	}
   13171 }
   13172 
   13173 // Decode and Crop a JPEG-encoded image to a uint8 tensor.
   13174 //
   13175 // The attr `channels` indicates the desired number of color channels for the
   13176 // decoded image.
   13177 //
   13178 // Accepted values are:
   13179 //
   13180 // *   0: Use the number of channels in the JPEG-encoded image.
   13181 // *   1: output a grayscale image.
   13182 // *   3: output an RGB image.
   13183 //
   13184 // If needed, the JPEG-encoded image is transformed to match the requested number
   13185 // of color channels.
   13186 //
   13187 // The attr `ratio` allows downscaling the image by an integer factor during
   13188 // decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
   13189 // downscaling the image later.
   13190 //
   13191 //
   13192 // It is equivalent to a combination of decode and crop, but much faster by only
   13193 // decoding partial jpeg image.
   13194 //
   13195 // Arguments:
   13196 //	contents: 0-D.  The JPEG-encoded image.
   13197 //	crop_window: 1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].
   13198 //
   13199 // Returns 3-D with shape `[height, width, channels]`..
   13200 func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, optional ...DecodeAndCropJpegAttr) (image tf.Output) {
   13201 	if scope.Err() != nil {
   13202 		return
   13203 	}
   13204 	attrs := map[string]interface{}{}
   13205 	for _, a := range optional {
   13206 		a(attrs)
   13207 	}
   13208 	opspec := tf.OpSpec{
   13209 		Type: "DecodeAndCropJpeg",
   13210 		Input: []tf.Input{
   13211 			contents, crop_window,
   13212 		},
   13213 		Attrs: attrs,
   13214 	}
   13215 	op := scope.AddOperation(opspec)
   13216 	return op.Output(0)
   13217 }
   13218 
   13219 // AllCandidateSamplerAttr is an optional argument to AllCandidateSampler.
   13220 type AllCandidateSamplerAttr func(optionalAttr)
   13221 
   13222 // AllCandidateSamplerSeed sets the optional seed attribute to value.
   13223 //
   13224 // value: If either seed or seed2 are set to be non-zero, the random number
   13225 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   13226 // random seed.
   13227 // If not specified, defaults to 0
   13228 func AllCandidateSamplerSeed(value int64) AllCandidateSamplerAttr {
   13229 	return func(m optionalAttr) {
   13230 		m["seed"] = value
   13231 	}
   13232 }
   13233 
   13234 // AllCandidateSamplerSeed2 sets the optional seed2 attribute to value.
   13235 //
   13236 // value: An second seed to avoid seed collision.
   13237 // If not specified, defaults to 0
   13238 func AllCandidateSamplerSeed2(value int64) AllCandidateSamplerAttr {
   13239 	return func(m optionalAttr) {
   13240 		m["seed2"] = value
   13241 	}
   13242 }
   13243 
   13244 // Generates labels for candidate sampling with a learned unigram distribution.
   13245 //
   13246 // See explanations of candidate sampling and the data formats at
   13247 // go/candidate-sampling.
   13248 //
   13249 // For each batch, this op picks a single set of sampled candidate labels.
   13250 //
   13251 // The advantages of sampling candidates per-batch are simplicity and the
   13252 // possibility of efficient dense matrix multiplication. The disadvantage is that
   13253 // the sampled candidates must be chosen independently of the context and of the
   13254 // true labels.
   13255 //
   13256 // Arguments:
   13257 //	true_classes: A batch_size * num_true matrix, in which each row contains the
   13258 // IDs of the num_true target_classes in the corresponding original label.
   13259 //	num_true: Number of true labels per context.
   13260 //	num_sampled: Number of candidates to produce.
   13261 //	unique: If unique is true, we sample with rejection, so that all sampled
   13262 // candidates in a batch are unique. This requires some approximation to
   13263 // estimate the post-rejection sampling probabilities.
   13264 //
   13265 // Returns A vector of length num_sampled, in which each element is
   13266 // the ID of a sampled candidate.A batch_size * num_true matrix, representing
   13267 // the number of times each candidate is expected to occur in a batch
   13268 // of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
   13269 // candidate representing the number of times the candidate is expected
   13270 // to occur in a batch of sampled candidates.  If unique=true, then this is a
   13271 // probability.
   13272 func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, optional ...AllCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
   13273 	if scope.Err() != nil {
   13274 		return
   13275 	}
   13276 	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique}
   13277 	for _, a := range optional {
   13278 		a(attrs)
   13279 	}
   13280 	opspec := tf.OpSpec{
   13281 		Type: "AllCandidateSampler",
   13282 		Input: []tf.Input{
   13283 			true_classes,
   13284 		},
   13285 		Attrs: attrs,
   13286 	}
   13287 	op := scope.AddOperation(opspec)
   13288 	return op.Output(0), op.Output(1), op.Output(2)
   13289 }
   13290 
   13291 // Returns the element-wise min of two SparseTensors.
   13292 //
   13293 // Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
   13294 //
   13295 // Arguments:
   13296 //	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   13297 // SparseTensor, in the canonical lexicographic ordering.
   13298 //	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
   13299 //	a_shape: 1-D.  Shape of the input SparseTensor.
   13300 //	b_indices: counterpart to `a_indices` for the other operand.
   13301 //	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
   13302 //	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
   13303 //
   13304 // Returns 2-D.  The indices of the output SparseTensor.1-D.  The values of the output SparseTensor.
   13305 func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
   13306 	if scope.Err() != nil {
   13307 		return
   13308 	}
   13309 	opspec := tf.OpSpec{
   13310 		Type: "SparseSparseMinimum",
   13311 		Input: []tf.Input{
   13312 			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
   13313 		},
   13314 	}
   13315 	op := scope.AddOperation(opspec)
   13316 	return op.Output(0), op.Output(1)
   13317 }
   13318 
   13319 // Constructs a tensor by tiling a given tensor.
   13320 //
   13321 // This operation creates a new tensor by replicating `input` `multiples` times.
   13322 // The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
   13323 // and the values of `input` are replicated `multiples[i]` times along the 'i'th
   13324 // dimension. For example, tiling `[a b c d]` by `[2]` produces
   13325 // `[a b c d a b c d]`.
   13326 //
   13327 // Arguments:
   13328 //	input: 1-D or higher.
   13329 //	multiples: 1-D. Length must be the same as the number of dimensions in `input`
   13330 func Tile(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
   13331 	if scope.Err() != nil {
   13332 		return
   13333 	}
   13334 	opspec := tf.OpSpec{
   13335 		Type: "Tile",
   13336 		Input: []tf.Input{
   13337 			input, multiples,
   13338 		},
   13339 	}
   13340 	op := scope.AddOperation(opspec)
   13341 	return op.Output(0)
   13342 }
   13343 
   13344 // Saves the input tensors to disk.
   13345 //
   13346 // The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
   13347 // is written to `filename` with name `tensor_names[i]`.
   13348 //
   13349 // See also `SaveSlices`.
   13350 //
   13351 // Arguments:
   13352 //	filename: Must have a single element. The name of the file to which we write
   13353 // the tensor.
   13354 //	tensor_names: Shape `[N]`. The names of the tensors to be saved.
   13355 //	data: `N` tensors to save.
   13356 //
   13357 // Returns the created operation.
   13358 func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation) {
   13359 	if scope.Err() != nil {
   13360 		return
   13361 	}
   13362 	opspec := tf.OpSpec{
   13363 		Type: "Save",
   13364 		Input: []tf.Input{
   13365 			filename, tensor_names, tf.OutputList(data),
   13366 		},
   13367 	}
   13368 	return scope.AddOperation(opspec)
   13369 }
   13370 
   13371 // Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
   13372 //
   13373 // true, this follows Python semantics in that the result here is consistent
   13374 // with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
   13375 //
   13376 // *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
   13377 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   13378 func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   13379 	if scope.Err() != nil {
   13380 		return
   13381 	}
   13382 	opspec := tf.OpSpec{
   13383 		Type: "FloorMod",
   13384 		Input: []tf.Input{
   13385 			x, y,
   13386 		},
   13387 	}
   13388 	op := scope.AddOperation(opspec)
   13389 	return op.Output(0)
   13390 }
   13391 
   13392 // TakeManySparseFromTensorsMapAttr is an optional argument to TakeManySparseFromTensorsMap.
   13393 type TakeManySparseFromTensorsMapAttr func(optionalAttr)
   13394 
   13395 // TakeManySparseFromTensorsMapContainer sets the optional container attribute to value.
   13396 //
   13397 // value: The container name for the `SparseTensorsMap` read by this op.
   13398 // If not specified, defaults to ""
   13399 func TakeManySparseFromTensorsMapContainer(value string) TakeManySparseFromTensorsMapAttr {
   13400 	return func(m optionalAttr) {
   13401 		m["container"] = value
   13402 	}
   13403 }
   13404 
   13405 // TakeManySparseFromTensorsMapSharedName sets the optional shared_name attribute to value.
   13406 //
   13407 // value: The shared name for the `SparseTensorsMap` read by this op.
   13408 // It should not be blank; rather the `shared_name` or unique Operation name
   13409 // of the Op that created the original `SparseTensorsMap` should be used.
   13410 // If not specified, defaults to ""
   13411 func TakeManySparseFromTensorsMapSharedName(value string) TakeManySparseFromTensorsMapAttr {
   13412 	return func(m optionalAttr) {
   13413 		m["shared_name"] = value
   13414 	}
   13415 }
   13416 
   13417 // Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
   13418 //
   13419 // The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
   13420 // `N` is the minibatch size and the rows correspond to the output handles of
   13421 // `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`.  The ranks of the
   13422 // original `SparseTensor` objects that went into the given input ops must all
   13423 // match.  When the final `SparseTensor` is created, it has rank one
   13424 // higher than the ranks of the incoming `SparseTensor` objects
   13425 // (they have been concatenated along a new row dimension on the left).
   13426 //
   13427 // The output `SparseTensor` object's shape values for all dimensions but the
   13428 // first are the max across the input `SparseTensor` objects' shape values
   13429 // for the corresponding dimensions.  Its first shape value is `N`, the minibatch
   13430 // size.
   13431 //
   13432 // The input `SparseTensor` objects' indices are assumed ordered in
   13433 // standard lexicographic order.  If this is not the case, after this
   13434 // step run `SparseReorder` to restore index ordering.
   13435 //
   13436 // For example, if the handles represent an input, which is a `[2, 3]` matrix
   13437 // representing two original `SparseTensor` objects:
   13438 //
   13439 // ```
   13440 //     index = [ 0]
   13441 //             [10]
   13442 //             [20]
   13443 //     values = [1, 2, 3]
   13444 //     shape = [50]
   13445 // ```
   13446 //
   13447 // and
   13448 //
   13449 // ```
   13450 //     index = [ 2]
   13451 //             [10]
   13452 //     values = [4, 5]
   13453 //     shape = [30]
   13454 // ```
   13455 //
   13456 // then the final `SparseTensor` will be:
   13457 //
   13458 // ```
   13459 //     index = [0  0]
   13460 //             [0 10]
   13461 //             [0 20]
   13462 //             [1  2]
   13463 //             [1 10]
   13464 //     values = [1, 2, 3, 4, 5]
   13465 //     shape = [2 50]
   13466 // ```
   13467 //
   13468 // Arguments:
   13469 //	sparse_handles: 1-D, The `N` serialized `SparseTensor` objects.
   13470 // Shape: `[N]`.
   13471 //	dtype: The `dtype` of the `SparseTensor` objects stored in the
   13472 // `SparseTensorsMap`.
   13473 //
   13474 // Returns 2-D.  The `indices` of the minibatch `SparseTensor`.1-D.  The `values` of the minibatch `SparseTensor`.1-D.  The `shape` of the minibatch `SparseTensor`.
   13475 func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, optional ...TakeManySparseFromTensorsMapAttr) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
   13476 	if scope.Err() != nil {
   13477 		return
   13478 	}
   13479 	attrs := map[string]interface{}{"dtype": dtype}
   13480 	for _, a := range optional {
   13481 		a(attrs)
   13482 	}
   13483 	opspec := tf.OpSpec{
   13484 		Type: "TakeManySparseFromTensorsMap",
   13485 		Input: []tf.Input{
   13486 			sparse_handles,
   13487 		},
   13488 		Attrs: attrs,
   13489 	}
   13490 	op := scope.AddOperation(opspec)
   13491 	return op.Output(0), op.Output(1), op.Output(2)
   13492 }
   13493 
   13494 // Says whether the targets are in the top `K` predictions.
   13495 //
   13496 // This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
   13497 // prediction for the target class is among the top `k` predictions among
   13498 // all predictions for example `i`. Note that the behavior of `InTopK` differs
   13499 // from the `TopK` op in its handling of ties; if multiple classes have the
   13500 // same prediction value and straddle the top-`k` boundary, all of those
   13501 // classes are considered to be in the top `k`.
   13502 //
   13503 // More formally, let
   13504 //
   13505 //   \\(predictions_i\\) be the predictions for all classes for example `i`,
   13506 //   \\(targets_i\\) be the target class for example `i`,
   13507 //   \\(out_i\\) be the output for example `i`,
   13508 //
   13509 // $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
   13510 //
   13511 // Arguments:
   13512 //	predictions: A `batch_size` x `classes` tensor.
   13513 //	targets: A `batch_size` vector of class ids.
   13514 //	k: Number of top elements to look at for computing precision.
   13515 //
   13516 // Returns Computed precision at `k` as a `bool Tensor`.
   13517 func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output) {
   13518 	if scope.Err() != nil {
   13519 		return
   13520 	}
   13521 	opspec := tf.OpSpec{
   13522 		Type: "InTopKV2",
   13523 		Input: []tf.Input{
   13524 			predictions, targets, k,
   13525 		},
   13526 	}
   13527 	op := scope.AddOperation(opspec)
   13528 	return op.Output(0)
   13529 }
   13530 
   13531 // Assigns a new value to a variable.
   13532 //
   13533 // Any ReadVariableOp with a control dependency on this op is guaranteed to return
   13534 // this value or a subsequent newer value of the variable.
   13535 //
   13536 // Arguments:
   13537 //	resource: handle to the resource in which to store the variable.
   13538 //	value: the value to set the new tensor to use.
   13539 //
   13540 // Returns the created operation.
   13541 func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
   13542 	if scope.Err() != nil {
   13543 		return
   13544 	}
   13545 	opspec := tf.OpSpec{
   13546 		Type: "AssignVariableOp",
   13547 		Input: []tf.Input{
   13548 			resource, value,
   13549 		},
   13550 	}
   13551 	return scope.AddOperation(opspec)
   13552 }
   13553 
   13554 // Returns a tensor of ones with the same shape and type as x.
   13555 //
   13556 // Arguments:
   13557 //	x: a tensor of type T.
   13558 //
   13559 // Returns a tensor of the same shape and type as x but filled with ones.
   13560 func OnesLike(scope *Scope, x tf.Output) (y tf.Output) {
   13561 	if scope.Err() != nil {
   13562 		return
   13563 	}
   13564 	opspec := tf.OpSpec{
   13565 		Type: "OnesLike",
   13566 		Input: []tf.Input{
   13567 			x,
   13568 		},
   13569 	}
   13570 	op := scope.AddOperation(opspec)
   13571 	return op.Output(0)
   13572 }
   13573 
   13574 // The gradient of SparseFillEmptyRows.
   13575 //
   13576 // Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
   13577 // shaped `[N_full]`, where `N_full >= N` and copies data into either
   13578 // `d_values` or `d_default_value`.  Here `d_values` is shaped `[N]` and
   13579 // `d_default_value` is a scalar.
   13580 //
   13581 //   d_values[j] = grad_values[reverse_index_map[j]]
   13582 //   d_default_value = sum_{k : 0 .. N_full - 1} (
   13583 //      grad_values[k] * 1{k not in reverse_index_map})
   13584 //
   13585 // Arguments:
   13586 //	reverse_index_map: 1-D.  The reverse index map from SparseFillEmptyRows.
   13587 //	grad_values: 1-D.  The gradients from backprop.
   13588 //
   13589 // Returns 1-D.  The backprop into values.0-D.  The backprop into default_value.
   13590 func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output) {
   13591 	if scope.Err() != nil {
   13592 		return
   13593 	}
   13594 	opspec := tf.OpSpec{
   13595 		Type: "SparseFillEmptyRowsGrad",
   13596 		Input: []tf.Input{
   13597 			reverse_index_map, grad_values,
   13598 		},
   13599 	}
   13600 	op := scope.AddOperation(opspec)
   13601 	return op.Output(0), op.Output(1)
   13602 }
   13603 
   13604 // Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
   13605 //
   13606 // if < 0, `scale * features` otherwise.
   13607 //
   13608 // See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
   13609 func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
   13610 	if scope.Err() != nil {
   13611 		return
   13612 	}
   13613 	opspec := tf.OpSpec{
   13614 		Type: "Selu",
   13615 		Input: []tf.Input{
   13616 			features,
   13617 		},
   13618 	}
   13619 	op := scope.AddOperation(opspec)
   13620 	return op.Output(0)
   13621 }
   13622 
   13623 // SetSizeAttr is an optional argument to SetSize.
   13624 type SetSizeAttr func(optionalAttr)
   13625 
   13626 // SetSizeValidateIndices sets the optional validate_indices attribute to value.
   13627 // If not specified, defaults to true
   13628 func SetSizeValidateIndices(value bool) SetSizeAttr {
   13629 	return func(m optionalAttr) {
   13630 		m["validate_indices"] = value
   13631 	}
   13632 }
   13633 
   13634 // Number of unique elements along last dimension of input `set`.
   13635 //
   13636 // Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
   13637 // and `set_shape`. The last dimension contains values in a set, duplicates are
   13638 // allowed but ignored.
   13639 //
   13640 // If `validate_indices` is `True`, this op validates the order and range of `set`
   13641 // indices.
   13642 //
   13643 // Arguments:
   13644 //	set_indices: 2D `Tensor`, indices of a `SparseTensor`.
   13645 //	set_values: 1D `Tensor`, values of a `SparseTensor`.
   13646 //	set_shape: 1D `Tensor`, shape of a `SparseTensor`.
   13647 //
   13648 // Returns For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
   13649 // `n-1` dimensions as `set`. Each value is the number of unique elements in
   13650 // the corresponding `[0...n-1]` dimension of `set`.
   13651 func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output) {
   13652 	if scope.Err() != nil {
   13653 		return
   13654 	}
   13655 	attrs := map[string]interface{}{}
   13656 	for _, a := range optional {
   13657 		a(attrs)
   13658 	}
   13659 	opspec := tf.OpSpec{
   13660 		Type: "SetSize",
   13661 		Input: []tf.Input{
   13662 			set_indices, set_values, set_shape,
   13663 		},
   13664 		Attrs: attrs,
   13665 	}
   13666 	op := scope.AddOperation(opspec)
   13667 	return op.Output(0)
   13668 }
   13669 
   13670 // Computes the sign and the log of the absolute value of the determinant of
   13671 //
   13672 // one or more square matrices.
   13673 //
   13674 // The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
   13675 // form square matrices. The outputs are two tensors containing the signs and
   13676 // absolute values of the log determinants for all N input submatrices
   13677 // `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).
   13678 // The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU
   13679 // is the LU decomposition of the input and P is the corresponding
   13680 // permutation matrix.
   13681 //
   13682 // Arguments:
   13683 //	input: Shape is `[N, M, M]`.
   13684 //
   13685 // Returns The signs of the log determinants of the inputs. Shape is `[N]`.The logs of the absolute values of the determinants
   13686 // of the N input matrices.  Shape is `[N]`.
   13687 func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output) {
   13688 	if scope.Err() != nil {
   13689 		return
   13690 	}
   13691 	opspec := tf.OpSpec{
   13692 		Type: "LogMatrixDeterminant",
   13693 		Input: []tf.Input{
   13694 			input,
   13695 		},
   13696 	}
   13697 	op := scope.AddOperation(opspec)
   13698 	return op.Output(0), op.Output(1)
   13699 }
   13700 
   13701 // SumAttr is an optional argument to Sum.
   13702 type SumAttr func(optionalAttr)
   13703 
   13704 // SumKeepDims sets the optional keep_dims attribute to value.
   13705 //
   13706 // value: If true, retain reduced dimensions with length 1.
   13707 // If not specified, defaults to false
   13708 func SumKeepDims(value bool) SumAttr {
   13709 	return func(m optionalAttr) {
   13710 		m["keep_dims"] = value
   13711 	}
   13712 }
   13713 
   13714 // Computes the sum of elements across dimensions of a tensor.
   13715 //
   13716 // Reduces `input` along the dimensions given in `axis`. Unless
   13717 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   13718 // `axis`. If `keep_dims` is true, the reduced dimensions are
   13719 // retained with length 1.
   13720 //
   13721 // Arguments:
   13722 //	input: The tensor to reduce.
   13723 //	axis: The dimensions to reduce. Must be in the range
   13724 // `[-rank(input), rank(input))`.
   13725 //
   13726 // Returns The reduced tensor.
   13727 func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (output tf.Output) {
   13728 	if scope.Err() != nil {
   13729 		return
   13730 	}
   13731 	attrs := map[string]interface{}{}
   13732 	for _, a := range optional {
   13733 		a(attrs)
   13734 	}
   13735 	opspec := tf.OpSpec{
   13736 		Type: "Sum",
   13737 		Input: []tf.Input{
   13738 			input, axis,
   13739 		},
   13740 		Attrs: attrs,
   13741 	}
   13742 	op := scope.AddOperation(opspec)
   13743 	return op.Output(0)
   13744 }
   13745 
   13746 // Delete the tensor specified by its handle in the session.
   13747 //
   13748 // Arguments:
   13749 //	handle: The handle for a tensor stored in the session state.
   13750 //
   13751 // Returns the created operation.
   13752 func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation) {
   13753 	if scope.Err() != nil {
   13754 		return
   13755 	}
   13756 	opspec := tf.OpSpec{
   13757 		Type: "DeleteSessionTensor",
   13758 		Input: []tf.Input{
   13759 			handle,
   13760 		},
   13761 	}
   13762 	return scope.AddOperation(opspec)
   13763 }
   13764 
   13765 // L2 Loss.
   13766 //
   13767 // Computes half the L2 norm of a tensor without the `sqrt`:
   13768 //
   13769 //     output = sum(t ** 2) / 2
   13770 //
   13771 // Arguments:
   13772 //	t: Typically 2-D, but may have any dimensions.
   13773 //
   13774 // Returns 0-D.
   13775 func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
   13776 	if scope.Err() != nil {
   13777 		return
   13778 	}
   13779 	opspec := tf.OpSpec{
   13780 		Type: "L2Loss",
   13781 		Input: []tf.Input{
   13782 			t,
   13783 		},
   13784 	}
   13785 	op := scope.AddOperation(opspec)
   13786 	return op.Output(0)
   13787 }
   13788 
   13789 // DenseToSparseSetOperationAttr is an optional argument to DenseToSparseSetOperation.
   13790 type DenseToSparseSetOperationAttr func(optionalAttr)
   13791 
   13792 // DenseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
   13793 // If not specified, defaults to true
   13794 func DenseToSparseSetOperationValidateIndices(value bool) DenseToSparseSetOperationAttr {
   13795 	return func(m optionalAttr) {
   13796 		m["validate_indices"] = value
   13797 	}
   13798 }
   13799 
   13800 // Applies set operation along last dimension of `Tensor` and `SparseTensor`.
   13801 //
   13802 // See SetOperationOp::SetOperationFromContext for values of `set_operation`.
   13803 //
   13804 // Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
   13805 // and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
   13806 // as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
   13807 // ignored.
   13808 //
   13809 // If `validate_indices` is `True`, this op validates the order and range of `set2`
   13810 // indices.
   13811 //
   13812 // Output `result` is a `SparseTensor` represented by `result_indices`,
   13813 // `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
   13814 // has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
   13815 // dimension contains the result of `set_operation` applied to the corresponding
   13816 // `[0...n-1]` dimension of `set`.
   13817 //
   13818 // Arguments:
   13819 //	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
   13820 // Dimension `n` contains values in a set, duplicates are allowed but ignored.
   13821 //	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
   13822 // order.
   13823 //	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
   13824 // order.
   13825 //	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
   13826 // be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
   13827 // max set size across `n-1` dimensions.
   13828 //
   13829 //
   13830 // Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
   13831 // the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
   13832 // is the max result set size across all `0...n-1` dimensions.
   13833 func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...DenseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
   13834 	if scope.Err() != nil {
   13835 		return
   13836 	}
   13837 	attrs := map[string]interface{}{"set_operation": set_operation}
   13838 	for _, a := range optional {
   13839 		a(attrs)
   13840 	}
   13841 	opspec := tf.OpSpec{
   13842 		Type: "DenseToSparseSetOperation",
   13843 		Input: []tf.Input{
   13844 			set1, set2_indices, set2_values, set2_shape,
   13845 		},
   13846 		Attrs: attrs,
   13847 	}
   13848 	op := scope.AddOperation(opspec)
   13849 	return op.Output(0), op.Output(1), op.Output(2)
   13850 }
   13851 
   13852 // FusedResizeAndPadConv2DAttr is an optional argument to FusedResizeAndPadConv2D.
   13853 type FusedResizeAndPadConv2DAttr func(optionalAttr)
   13854 
   13855 // FusedResizeAndPadConv2DResizeAlignCorners sets the optional resize_align_corners attribute to value.
   13856 //
   13857 // value: If true, rescale input by (new_height - 1) / (height - 1),
   13858 // which exactly aligns the 4 corners of images and resized images. If false, rescale
   13859 // by new_height / height. Treat similarly the width dimension.
   13860 // If not specified, defaults to false
   13861 func FusedResizeAndPadConv2DResizeAlignCorners(value bool) FusedResizeAndPadConv2DAttr {
   13862 	return func(m optionalAttr) {
   13863 		m["resize_align_corners"] = value
   13864 	}
   13865 }
   13866 
   13867 // Performs a resize and padding as a preprocess during a convolution.
   13868 //
   13869 // It's often possible to do spatial transformations more efficiently as part of
   13870 // the packing stage of a convolution, so this op allows for an optimized
   13871 // implementation where these stages are fused together. This prevents the need to
   13872 // write out the intermediate results as whole tensors, reducing memory pressure,
   13873 // and we can get some latency gains by merging the transformation calculations.
   13874 // The data_format attribute for Conv2D isn't supported by this op, and defaults to
   13875 // 'NHWC' order.
   13876 // Internally this op uses a single per-graph scratch buffer, which means that it
   13877 // will block if multiple versions are being run in parallel. This is because this
   13878 // operator is primarily an optimization to minimize memory usage.
   13879 //
   13880 // Arguments:
   13881 //	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
   13882 //	size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
   13883 // new size for the images.
   13884 //	paddings: A two-column matrix specifying the padding sizes. The number of
   13885 // rows must be the same as the rank of `input`.
   13886 //	filter: 4-D with shape
   13887 // `[filter_height, filter_width, in_channels, out_channels]`.
   13888 //
   13889 //	strides: 1-D of length 4.  The stride of the sliding window for each dimension
   13890 // of `input`. Must be in the same order as the dimension specified with format.
   13891 //	padding: The type of padding algorithm to use.
   13892 func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string, optional ...FusedResizeAndPadConv2DAttr) (output tf.Output) {
   13893 	if scope.Err() != nil {
   13894 		return
   13895 	}
   13896 	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
   13897 	for _, a := range optional {
   13898 		a(attrs)
   13899 	}
   13900 	opspec := tf.OpSpec{
   13901 		Type: "FusedResizeAndPadConv2D",
   13902 		Input: []tf.Input{
   13903 			input, size, paddings, filter,
   13904 		},
   13905 		Attrs: attrs,
   13906 	}
   13907 	op := scope.AddOperation(opspec)
   13908 	return op.Output(0)
   13909 }
   13910 
   13911 // Subtracts a value from the current value of a variable.
   13912 //
   13913 // Any ReadVariableOp which depends directly or indirectly on this assign is
   13914 // guaranteed to see the incremented value or a subsequent newer one.
   13915 //
   13916 // Outputs the incremented value, which can be used to totally order the
   13917 // increments to this variable.
   13918 //
   13919 // Arguments:
   13920 //	resource: handle to the resource in which to store the variable.
   13921 //	value: the value by which the variable will be incremented.
   13922 //
   13923 // Returns the created operation.
   13924 func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
   13925 	if scope.Err() != nil {
   13926 		return
   13927 	}
   13928 	opspec := tf.OpSpec{
   13929 		Type: "AssignSubVariableOp",
   13930 		Input: []tf.Input{
   13931 			resource, value,
   13932 		},
   13933 	}
   13934 	return scope.AddOperation(opspec)
   13935 }
   13936 
   13937 // RestoreAttr is an optional argument to Restore.
   13938 type RestoreAttr func(optionalAttr)
   13939 
   13940 // RestorePreferredShard sets the optional preferred_shard attribute to value.
   13941 //
   13942 // value: Index of file to open first if multiple files match
   13943 // `file_pattern`.
   13944 // If not specified, defaults to -1
   13945 func RestorePreferredShard(value int64) RestoreAttr {
   13946 	return func(m optionalAttr) {
   13947 		m["preferred_shard"] = value
   13948 	}
   13949 }
   13950 
   13951 // Restores a tensor from checkpoint files.
   13952 //
   13953 // Reads a tensor stored in one or several files. If there are several files (for
   13954 // instance because a tensor was saved as slices), `file_pattern` may contain
   13955 // wildcard symbols (`*` and `?`) in the filename portion only, not in the
   13956 // directory portion.
   13957 //
   13958 // If a `file_pattern` matches several files, `preferred_shard` can be used to hint
   13959 // in which file the requested tensor is likely to be found. This op will first
   13960 // open the file at index `preferred_shard` in the list of matching files and try
   13961 // to restore tensors from that file.  Only if some tensors or tensor slices are
   13962 // not found in that first file, then the Op opens all the files. Setting
   13963 // `preferred_shard` to match the value passed as the `shard` input
   13964 // of a matching `Save` Op may speed up Restore.  This attribute only affects
   13965 // performance, not correctness.  The default value -1 means files are processed in
   13966 // order.
   13967 //
   13968 // See also `RestoreSlice`.
   13969 //
   13970 // Arguments:
   13971 //	file_pattern: Must have a single element. The pattern of the files from
   13972 // which we read the tensor.
   13973 //	tensor_name: Must have a single element. The name of the tensor to be
   13974 // restored.
   13975 //	dt: The type of the tensor to be restored.
   13976 //
   13977 // Returns The restored tensor.
   13978 func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, optional ...RestoreAttr) (tensor tf.Output) {
   13979 	if scope.Err() != nil {
   13980 		return
   13981 	}
   13982 	attrs := map[string]interface{}{"dt": dt}
   13983 	for _, a := range optional {
   13984 		a(attrs)
   13985 	}
   13986 	opspec := tf.OpSpec{
   13987 		Type: "Restore",
   13988 		Input: []tf.Input{
   13989 			file_pattern, tensor_name,
   13990 		},
   13991 		Attrs: attrs,
   13992 	}
   13993 	op := scope.AddOperation(opspec)
   13994 	return op.Output(0)
   13995 }
   13996 
   13997 // QuantizedResizeBilinearAttr is an optional argument to QuantizedResizeBilinear.
   13998 type QuantizedResizeBilinearAttr func(optionalAttr)
   13999 
   14000 // QuantizedResizeBilinearAlignCorners sets the optional align_corners attribute to value.
   14001 //
   14002 // value: If true, rescale input by (new_height - 1) / (height - 1), which
   14003 // exactly aligns the 4 corners of images and resized images. If false, rescale
   14004 // by new_height / height. Treat similarly the width dimension.
   14005 // If not specified, defaults to false
   14006 func QuantizedResizeBilinearAlignCorners(value bool) QuantizedResizeBilinearAttr {
   14007 	return func(m optionalAttr) {
   14008 		m["align_corners"] = value
   14009 	}
   14010 }
   14011 
   14012 // Resize quantized `images` to `size` using quantized bilinear interpolation.
   14013 //
   14014 // Input images and output images must be quantized types.
   14015 //
   14016 // Arguments:
   14017 //	images: 4-D with shape `[batch, height, width, channels]`.
   14018 //	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
   14019 // new size for the images.
   14020 //
   14021 //
   14022 //
   14023 // Returns 4-D with shape
   14024 // `[batch, new_height, new_width, channels]`.
   14025 func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, optional ...QuantizedResizeBilinearAttr) (resized_images tf.Output, out_min tf.Output, out_max tf.Output) {
   14026 	if scope.Err() != nil {
   14027 		return
   14028 	}
   14029 	attrs := map[string]interface{}{}
   14030 	for _, a := range optional {
   14031 		a(attrs)
   14032 	}
   14033 	opspec := tf.OpSpec{
   14034 		Type: "QuantizedResizeBilinear",
   14035 		Input: []tf.Input{
   14036 			images, size, min, max,
   14037 		},
   14038 		Attrs: attrs,
   14039 	}
   14040 	op := scope.AddOperation(opspec)
   14041 	return op.Output(0), op.Output(1), op.Output(2)
   14042 }
   14043 
   14044 // Computes the minimum along segments of a tensor.
   14045 //
   14046 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   14047 // segments.
   14048 //
   14049 // Computes a tensor such that
   14050 // \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
   14051 // that `segment_ids[j] == i`.
   14052 //
   14053 // If the min is empty for a given segment ID `i`, `output[i] = 0`.
   14054 //
   14055 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   14056 // <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
   14057 // </div>
   14058 //
   14059 // Arguments:
   14060 //
   14061 //	segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
   14062 // first dimension.  Values should be sorted and can be repeated.
   14063 //
   14064 // Returns Has same shape as data, except for dimension 0 which
   14065 // has size `k`, the number of segments.
   14066 func SegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
   14067 	if scope.Err() != nil {
   14068 		return
   14069 	}
   14070 	opspec := tf.OpSpec{
   14071 		Type: "SegmentMin",
   14072 		Input: []tf.Input{
   14073 			data, segment_ids,
   14074 		},
   14075 	}
   14076 	op := scope.AddOperation(opspec)
   14077 	return op.Output(0)
   14078 }
   14079 
   14080 // SdcaOptimizerAttr is an optional argument to SdcaOptimizer.
   14081 type SdcaOptimizerAttr func(optionalAttr)
   14082 
   14083 // SdcaOptimizerAdaptative sets the optional adaptative attribute to value.
   14084 //
   14085 // value: Whether to use Adapative SDCA for the inner loop.
   14086 // If not specified, defaults to false
   14087 func SdcaOptimizerAdaptative(value bool) SdcaOptimizerAttr {
   14088 	return func(m optionalAttr) {
   14089 		m["adaptative"] = value
   14090 	}
   14091 }
   14092 
   14093 // Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
   14094 //
   14095 // linear models with L1 + L2 regularization. As global optimization objective is
   14096 // strongly-convex, the optimizer optimizes the dual objective at each step. The
   14097 // optimizer applies each update one example at a time. Examples are sampled
   14098 // uniformly, and the optimizer is learning rate free and enjoys linear convergence
   14099 // rate.
   14100 //
   14101 // [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
   14102 // Shai Shalev-Shwartz, Tong Zhang. 2012
   14103 //
   14104 // $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
   14105 //
   14106 // [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
   14107 // Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
   14108 // Peter Richtarik, Martin Takac. 2015
   14109 //
   14110 // [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
   14111 // Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
   14112 //
   14113 // Arguments:
   14114 //	sparse_example_indices: a list of vectors which contain example indices.
   14115 //	sparse_feature_indices: a list of vectors which contain feature indices.
   14116 //	sparse_feature_values: a list of vectors which contains feature value
   14117 // associated with each feature group.
   14118 //	dense_features: a list of matrices which contains the dense feature values.
   14119 //	example_weights: a vector which contains the weight associated with each
   14120 // example.
   14121 //	example_labels: a vector which contains the label/target associated with each
   14122 // example.
   14123 //	sparse_indices: a list of vectors where each value is the indices which has
   14124 // corresponding weights in sparse_weights. This field maybe omitted for the
   14125 // dense approach.
   14126 //	sparse_weights: a list of vectors where each value is the weight associated with
   14127 // a sparse feature group.
   14128 //	dense_weights: a list of vectors where the values are the weights associated
   14129 // with a dense feature group.
   14130 //	example_state_data: a list of vectors containing the example state data.
   14131 //	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
   14132 // squared and hinge losses.
   14133 //	l1: Symmetric l1 regularization strength.
   14134 //	l2: Symmetric l2 regularization strength.
   14135 //	num_loss_partitions: Number of partitions of the global loss function.
   14136 //	num_inner_iterations: Number of iterations per mini-batch.
   14137 //
   14138 // Returns a list of vectors containing the updated example state
   14139 // data.a list of vectors where each value is the delta
   14140 // weights associated with a sparse feature group.a list of vectors where the values are the delta
   14141 // weights associated with a dense feature group.
   14142 func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerAttr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
   14143 	if scope.Err() != nil {
   14144 		return
   14145 	}
   14146 	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
   14147 	for _, a := range optional {
   14148 		a(attrs)
   14149 	}
   14150 	opspec := tf.OpSpec{
   14151 		Type: "SdcaOptimizer",
   14152 		Input: []tf.Input{
   14153 			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
   14154 		},
   14155 		Attrs: attrs,
   14156 	}
   14157 	op := scope.AddOperation(opspec)
   14158 	if scope.Err() != nil {
   14159 		return
   14160 	}
   14161 	var idx int
   14162 	var err error
   14163 	out_example_state_data = op.Output(idx)
   14164 	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
   14165 		scope.UpdateErr("SdcaOptimizer", err)
   14166 		return
   14167 	}
   14168 	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
   14169 		scope.UpdateErr("SdcaOptimizer", err)
   14170 		return
   14171 	}
   14172 	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
   14173 }
   14174 
   14175 // SparseMatMulAttr is an optional argument to SparseMatMul.
   14176 type SparseMatMulAttr func(optionalAttr)
   14177 
   14178 // SparseMatMulTransposeA sets the optional transpose_a attribute to value.
   14179 // If not specified, defaults to false
   14180 func SparseMatMulTransposeA(value bool) SparseMatMulAttr {
   14181 	return func(m optionalAttr) {
   14182 		m["transpose_a"] = value
   14183 	}
   14184 }
   14185 
   14186 // SparseMatMulTransposeB sets the optional transpose_b attribute to value.
   14187 // If not specified, defaults to false
   14188 func SparseMatMulTransposeB(value bool) SparseMatMulAttr {
   14189 	return func(m optionalAttr) {
   14190 		m["transpose_b"] = value
   14191 	}
   14192 }
   14193 
   14194 // SparseMatMulAIsSparse sets the optional a_is_sparse attribute to value.
   14195 // If not specified, defaults to false
   14196 func SparseMatMulAIsSparse(value bool) SparseMatMulAttr {
   14197 	return func(m optionalAttr) {
   14198 		m["a_is_sparse"] = value
   14199 	}
   14200 }
   14201 
   14202 // SparseMatMulBIsSparse sets the optional b_is_sparse attribute to value.
   14203 // If not specified, defaults to false
   14204 func SparseMatMulBIsSparse(value bool) SparseMatMulAttr {
   14205 	return func(m optionalAttr) {
   14206 		m["b_is_sparse"] = value
   14207 	}
   14208 }
   14209 
   14210 // Multiply matrix "a" by matrix "b".
   14211 //
   14212 // The inputs must be two-dimensional matrices and the inner dimension of "a" must
   14213 // match the outer dimension of "b". This op is optimized for the case where at
   14214 // least one of "a" or "b" is sparse. The breakeven for using this versus a dense
   14215 // matrix multiply on one platform was 30% zero values in the sparse matrix.
   14216 //
   14217 // The gradient computation of this operation will only take advantage of sparsity
   14218 // in the input gradient when that gradient comes from a Relu.
   14219 func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output) {
   14220 	if scope.Err() != nil {
   14221 		return
   14222 	}
   14223 	attrs := map[string]interface{}{}
   14224 	for _, a := range optional {
   14225 		a(attrs)
   14226 	}
   14227 	opspec := tf.OpSpec{
   14228 		Type: "SparseMatMul",
   14229 		Input: []tf.Input{
   14230 			a, b,
   14231 		},
   14232 		Attrs: attrs,
   14233 	}
   14234 	op := scope.AddOperation(opspec)
   14235 	return op.Output(0)
   14236 }
   14237 
   14238 // Computes the power of one value to another.
   14239 //
   14240 // Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
   14241 // corresponding elements in `x` and `y`. For example:
   14242 //
   14243 // ```
   14244 // # tensor 'x' is [[2, 2]], [3, 3]]
   14245 // # tensor 'y' is [[8, 16], [2, 3]]
   14246 // tf.pow(x, y) ==> [[256, 65536], [9, 27]]
   14247 // ```
   14248 func Pow(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   14249 	if scope.Err() != nil {
   14250 		return
   14251 	}
   14252 	opspec := tf.OpSpec{
   14253 		Type: "Pow",
   14254 		Input: []tf.Input{
   14255 			x, y,
   14256 		},
   14257 	}
   14258 	op := scope.AddOperation(opspec)
   14259 	return op.Output(0)
   14260 }
   14261 
   14262 // ShapeAttr is an optional argument to Shape.
   14263 type ShapeAttr func(optionalAttr)
   14264 
   14265 // ShapeOutType sets the optional out_type attribute to value.
   14266 // If not specified, defaults to DT_INT32
   14267 func ShapeOutType(value tf.DataType) ShapeAttr {
   14268 	return func(m optionalAttr) {
   14269 		m["out_type"] = value
   14270 	}
   14271 }
   14272 
   14273 // Returns the shape of a tensor.
   14274 //
   14275 // This operation returns a 1-D integer tensor representing the shape of `input`.
   14276 //
   14277 // For example:
   14278 //
   14279 // ```
   14280 // # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
   14281 // shape(t) ==> [2, 2, 3]
   14282 // ```
   14283 func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
   14284 	if scope.Err() != nil {
   14285 		return
   14286 	}
   14287 	attrs := map[string]interface{}{}
   14288 	for _, a := range optional {
   14289 		a(attrs)
   14290 	}
   14291 	opspec := tf.OpSpec{
   14292 		Type: "Shape",
   14293 		Input: []tf.Input{
   14294 			input,
   14295 		},
   14296 		Attrs: attrs,
   14297 	}
   14298 	op := scope.AddOperation(opspec)
   14299 	return op.Output(0)
   14300 }
   14301 
   14302 // Computes fingerprints of the input strings.
   14303 //
   14304 // Arguments:
   14305 //	input: vector of strings to compute fingerprints on.
   14306 //
   14307 // Returns a (N,2) shaped matrix where N is the number of elements in the input
   14308 // vector. Each row contains the low and high parts of the fingerprint.
   14309 func SdcaFprint(scope *Scope, input tf.Output) (output tf.Output) {
   14310 	if scope.Err() != nil {
   14311 		return
   14312 	}
   14313 	opspec := tf.OpSpec{
   14314 		Type: "SdcaFprint",
   14315 		Input: []tf.Input{
   14316 			input,
   14317 		},
   14318 	}
   14319 	op := scope.AddOperation(opspec)
   14320 	return op.Output(0)
   14321 }
   14322 
   14323 // RandomPoissonV2Attr is an optional argument to RandomPoissonV2.
   14324 type RandomPoissonV2Attr func(optionalAttr)
   14325 
   14326 // RandomPoissonV2Seed sets the optional seed attribute to value.
   14327 //
   14328 // value: If either `seed` or `seed2` are set to be non-zero, the random number
   14329 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   14330 // random seed.
   14331 // If not specified, defaults to 0
   14332 func RandomPoissonV2Seed(value int64) RandomPoissonV2Attr {
   14333 	return func(m optionalAttr) {
   14334 		m["seed"] = value
   14335 	}
   14336 }
   14337 
   14338 // RandomPoissonV2Seed2 sets the optional seed2 attribute to value.
   14339 //
   14340 // value: A second seed to avoid seed collision.
   14341 // If not specified, defaults to 0
   14342 func RandomPoissonV2Seed2(value int64) RandomPoissonV2Attr {
   14343 	return func(m optionalAttr) {
   14344 		m["seed2"] = value
   14345 	}
   14346 }
   14347 
   14348 // RandomPoissonV2Dtype sets the optional dtype attribute to value.
   14349 // If not specified, defaults to DT_INT64
   14350 func RandomPoissonV2Dtype(value tf.DataType) RandomPoissonV2Attr {
   14351 	return func(m optionalAttr) {
   14352 		m["dtype"] = value
   14353 	}
   14354 }
   14355 
   14356 // Outputs random values from the Poisson distribution(s) described by rate.
   14357 //
   14358 // This op uses two algorithms, depending on rate. If rate >= 10, then
   14359 // the algorithm by Hormann is used to acquire samples via
   14360 // transformation-rejection.
   14361 // See http://www.sciencedirect.com/science/article/pii/0167668793909974.
   14362 //
   14363 // Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
   14364 // random variables.
   14365 // See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
   14366 // Programming, Volume 2. Addison Wesley
   14367 //
   14368 // Arguments:
   14369 //	shape: 1-D integer tensor. Shape of independent samples to draw from each
   14370 // distribution described by the shape parameters given in rate.
   14371 //	rate: A tensor in which each scalar is a "rate" parameter describing the
   14372 // associated poisson distribution.
   14373 //
   14374 // Returns A tensor with shape `shape + shape(rate)`. Each slice
   14375 // `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
   14376 // `rate[i0, i1, ...iN]`.
   14377 func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output) {
   14378 	if scope.Err() != nil {
   14379 		return
   14380 	}
   14381 	attrs := map[string]interface{}{}
   14382 	for _, a := range optional {
   14383 		a(attrs)
   14384 	}
   14385 	opspec := tf.OpSpec{
   14386 		Type: "RandomPoissonV2",
   14387 		Input: []tf.Input{
   14388 			shape, rate,
   14389 		},
   14390 		Attrs: attrs,
   14391 	}
   14392 	op := scope.AddOperation(opspec)
   14393 	return op.Output(0)
   14394 }
   14395 
   14396 // MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve.
   14397 type MatrixTriangularSolveAttr func(optionalAttr)
   14398 
   14399 // MatrixTriangularSolveLower sets the optional lower attribute to value.
   14400 //
   14401 // value: Boolean indicating whether the innermost matrices in `matrix` are
   14402 // lower or upper triangular.
   14403 // If not specified, defaults to true
   14404 func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr {
   14405 	return func(m optionalAttr) {
   14406 		m["lower"] = value
   14407 	}
   14408 }
   14409 
   14410 // MatrixTriangularSolveAdjoint sets the optional adjoint attribute to value.
   14411 //
   14412 // value: Boolean indicating whether to solve with `matrix` or its (block-wise)
   14413 //          adjoint.
   14414 //
   14415 // @compatibility(numpy)
   14416 // Equivalent to np.linalg.triangular_solve
   14417 // @end_compatibility
   14418 // If not specified, defaults to false
   14419 func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr {
   14420 	return func(m optionalAttr) {
   14421 		m["adjoint"] = value
   14422 	}
   14423 }
   14424 
   14425 // Solves systems of linear equations with upper or lower triangular matrices by
   14426 //
   14427 // backsubstitution.
   14428 //
   14429 // `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
   14430 // square matrices. If `lower` is `True` then the strictly upper triangular part
   14431 // of each inner-most matrix is assumed to be zero and not accessed.
   14432 // If `lower` is False then the strictly lower triangular part of each inner-most
   14433 // matrix is assumed to be zero and not accessed.
   14434 // `rhs` is a tensor of shape `[..., M, K]`.
   14435 //
   14436 // The output is a tensor of shape `[..., M, K]`. If `adjoint` is
   14437 // `True` then the innermost matrices in `output` satisfy matrix equations
   14438 // `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
   14439 // If `adjoint` is `False` then the strictly then the  innermost matrices in
   14440 // `output` satisfy matrix equations
   14441 // `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
   14442 //
   14443 // Arguments:
   14444 //	matrix: Shape is `[..., M, M]`.
   14445 //	rhs: Shape is `[..., M, K]`.
   14446 //
   14447 // Returns Shape is `[..., M, K]`.
   14448 func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixTriangularSolveAttr) (output tf.Output) {
   14449 	if scope.Err() != nil {
   14450 		return
   14451 	}
   14452 	attrs := map[string]interface{}{}
   14453 	for _, a := range optional {
   14454 		a(attrs)
   14455 	}
   14456 	opspec := tf.OpSpec{
   14457 		Type: "MatrixTriangularSolve",
   14458 		Input: []tf.Input{
   14459 			matrix, rhs,
   14460 		},
   14461 		Attrs: attrs,
   14462 	}
   14463 	op := scope.AddOperation(opspec)
   14464 	return op.Output(0)
   14465 }
   14466 
   14467 // Computes inverse hyperbolic sine of x element-wise.
   14468 func Asinh(scope *Scope, x tf.Output) (y tf.Output) {
   14469 	if scope.Err() != nil {
   14470 		return
   14471 	}
   14472 	opspec := tf.OpSpec{
   14473 		Type: "Asinh",
   14474 		Input: []tf.Input{
   14475 			x,
   14476 		},
   14477 	}
   14478 	op := scope.AddOperation(opspec)
   14479 	return op.Output(0)
   14480 }
   14481 
   14482 // Creates a dataset with a range of values. Corresponds to python's xrange.
   14483 //
   14484 // Arguments:
   14485 //	start: corresponds to start in python's xrange().
   14486 //	stop: corresponds to stop in python's xrange().
   14487 //	step: corresponds to step in python's xrange().
   14488 //
   14489 //
   14490 func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   14491 	if scope.Err() != nil {
   14492 		return
   14493 	}
   14494 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   14495 	opspec := tf.OpSpec{
   14496 		Type: "RangeDataset",
   14497 		Input: []tf.Input{
   14498 			start, stop, step,
   14499 		},
   14500 		Attrs: attrs,
   14501 	}
   14502 	op := scope.AddOperation(opspec)
   14503 	return op.Output(0)
   14504 }
   14505 
   14506 // DepthwiseConv2dNativeBackpropInputAttr is an optional argument to DepthwiseConv2dNativeBackpropInput.
   14507 type DepthwiseConv2dNativeBackpropInputAttr func(optionalAttr)
   14508 
   14509 // DepthwiseConv2dNativeBackpropInputDataFormat sets the optional data_format attribute to value.
   14510 //
   14511 // value: Specify the data format of the input and output data. With the
   14512 // default format "NHWC", the data is stored in the order of:
   14513 //     [batch, height, width, channels].
   14514 // Alternatively, the format could be "NCHW", the data storage order of:
   14515 //     [batch, channels, height, width].
   14516 // If not specified, defaults to "NHWC"
   14517 func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr {
   14518 	return func(m optionalAttr) {
   14519 		m["data_format"] = value
   14520 	}
   14521 }
   14522 
   14523 // DepthwiseConv2dNativeBackpropInputDilations sets the optional dilations attribute to value.
   14524 //
   14525 // value: 1-D tensor of length 4.  The dilation factor for each dimension of
   14526 // `input`. If set to k > 1, there will be k-1 skipped cells between each filter
   14527 // element on that dimension. The dimension order is determined by the value of
   14528 // `data_format`, see above for details. Dilations in the batch and depth
   14529 // dimensions must be 1.
   14530 // If not specified, defaults to <i:1 i:1 i:1 i:1 >
   14531 func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
   14532 	return func(m optionalAttr) {
   14533 		m["dilations"] = value
   14534 	}
   14535 }
   14536 
   14537 // Computes the gradients of depthwise convolution with respect to the input.
   14538 //
   14539 // Arguments:
   14540 //	input_sizes: An integer vector representing the shape of `input`, based
   14541 // on `data_format`.  For example, if `data_format` is 'NHWC' then
   14542 //  `input` is a 4-D `[batch, height, width, channels]` tensor.
   14543 //	filter: 4-D with shape
   14544 // `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
   14545 //	out_backprop: 4-D with shape  based on `data_format`.
   14546 // For example, if `data_format` is 'NHWC' then
   14547 // out_backprop shape is `[batch, out_height, out_width, out_channels]`.
   14548 // Gradients w.r.t. the output of the convolution.
   14549 //	strides: The stride of the sliding window for each dimension of the input
   14550 // of the convolution.
   14551 //	padding: The type of padding algorithm to use.
   14552 //
   14553 // Returns 4-D with shape according to `data_format`.  For example, if
   14554 // `data_format` is 'NHWC', output shape is `[batch, in_height,
   14555 // in_width, in_channels]`.  Gradient w.r.t. the input of the
   14556 // convolution.
   14557 func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropInputAttr) (output tf.Output) {
   14558 	if scope.Err() != nil {
   14559 		return
   14560 	}
   14561 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   14562 	for _, a := range optional {
   14563 		a(attrs)
   14564 	}
   14565 	opspec := tf.OpSpec{
   14566 		Type: "DepthwiseConv2dNativeBackpropInput",
   14567 		Input: []tf.Input{
   14568 			input_sizes, filter, out_backprop,
   14569 		},
   14570 		Attrs: attrs,
   14571 	}
   14572 	op := scope.AddOperation(opspec)
   14573 	return op.Output(0)
   14574 }
   14575 
   14576 // Adds sparse updates to the variable referenced by `resource`.
   14577 //
   14578 // This operation computes
   14579 //
   14580 //     # Scalar indices
   14581 //     ref[indices, ...] += updates[...]
   14582 //
   14583 //     # Vector indices (for each i)
   14584 //     ref[indices[i], ...] += updates[i, ...]
   14585 //
   14586 //     # High rank indices (for each i, ..., j)
   14587 //     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
   14588 //
   14589 // Duplicate entries are handled correctly: if multiple `indices` reference
   14590 // the same location, their contributions add.
   14591 //
   14592 // Requires `updates.shape = indices.shape + ref.shape[1:]`.
   14593 //
   14594 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   14595 // <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
   14596 // </div>
   14597 //
   14598 // Arguments:
   14599 //	resource: Should be from a `Variable` node.
   14600 //	indices: A tensor of indices into the first dimension of `ref`.
   14601 //	updates: A tensor of updated values to add to `ref`.
   14602 //
   14603 // Returns the created operation.
   14604 func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
   14605 	if scope.Err() != nil {
   14606 		return
   14607 	}
   14608 	opspec := tf.OpSpec{
   14609 		Type: "ResourceScatterAdd",
   14610 		Input: []tf.Input{
   14611 			resource, indices, updates,
   14612 		},
   14613 	}
   14614 	return scope.AddOperation(opspec)
   14615 }
   14616 
   14617 // Computes the gradient for the inverse of `x` wrt its input.
   14618 //
   14619 // Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
   14620 // is the corresponding input gradient.
   14621 func ReciprocalGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
   14622 	if scope.Err() != nil {
   14623 		return
   14624 	}
   14625 	opspec := tf.OpSpec{
   14626 		Type: "ReciprocalGrad",
   14627 		Input: []tf.Input{
   14628 			y, dy,
   14629 		},
   14630 	}
   14631 	op := scope.AddOperation(opspec)
   14632 	return op.Output(0)
   14633 }
   14634 
   14635 // Returns the min of x and y (i.e. x < y ? x : y) element-wise.
   14636 //
   14637 // *NOTE*: `Minimum` supports broadcasting. More about broadcasting
   14638 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   14639 func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   14640 	if scope.Err() != nil {
   14641 		return
   14642 	}
   14643 	opspec := tf.OpSpec{
   14644 		Type: "Minimum",
   14645 		Input: []tf.Input{
   14646 			x, y,
   14647 		},
   14648 	}
   14649 	op := scope.AddOperation(opspec)
   14650 	return op.Output(0)
   14651 }
   14652 
   14653 // MfccAttr is an optional argument to Mfcc.
   14654 type MfccAttr func(optionalAttr)
   14655 
   14656 // MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
   14657 //
   14658 // value: The highest frequency to use when calculating the
   14659 // ceptstrum.
   14660 // If not specified, defaults to 4000
   14661 func MfccUpperFrequencyLimit(value float32) MfccAttr {
   14662 	return func(m optionalAttr) {
   14663 		m["upper_frequency_limit"] = value
   14664 	}
   14665 }
   14666 
   14667 // MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
   14668 //
   14669 // value: The lowest frequency to use when calculating the
   14670 // ceptstrum.
   14671 // If not specified, defaults to 20
   14672 func MfccLowerFrequencyLimit(value float32) MfccAttr {
   14673 	return func(m optionalAttr) {
   14674 		m["lower_frequency_limit"] = value
   14675 	}
   14676 }
   14677 
   14678 // MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
   14679 //
   14680 // value: Resolution of the Mel bank used internally.
   14681 // If not specified, defaults to 40
   14682 func MfccFilterbankChannelCount(value int64) MfccAttr {
   14683 	return func(m optionalAttr) {
   14684 		m["filterbank_channel_count"] = value
   14685 	}
   14686 }
   14687 
   14688 // MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
   14689 //
   14690 // value: How many output channels to produce per time slice.
   14691 // If not specified, defaults to 13
   14692 func MfccDctCoefficientCount(value int64) MfccAttr {
   14693 	return func(m optionalAttr) {
   14694 		m["dct_coefficient_count"] = value
   14695 	}
   14696 }
   14697 
   14698 // Transforms a spectrogram into a form that's useful for speech recognition.
   14699 //
   14700 // Mel Frequency Cepstral Coefficients are a way of representing audio data that's
   14701 // been effective as an input feature for machine learning. They are created by
   14702 // taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
   14703 // higher frequencies that are less significant to the human ear. They have a long
   14704 // history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
   14705 // is a good resource to learn more.
   14706 //
   14707 // Arguments:
   14708 //	spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
   14709 // set to true.
   14710 //	sample_rate: How many samples per second the source audio used.
   14711 func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
   14712 	if scope.Err() != nil {
   14713 		return
   14714 	}
   14715 	attrs := map[string]interface{}{}
   14716 	for _, a := range optional {
   14717 		a(attrs)
   14718 	}
   14719 	opspec := tf.OpSpec{
   14720 		Type: "Mfcc",
   14721 		Input: []tf.Input{
   14722 			spectrogram, sample_rate,
   14723 		},
   14724 		Attrs: attrs,
   14725 	}
   14726 	op := scope.AddOperation(opspec)
   14727 	return op.Output(0)
   14728 }
   14729 
   14730 // Returns the element-wise sum of a list of tensors.
   14731 //
   14732 // `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
   14733 // wait for all of its inputs to be ready before beginning to sum. This can
   14734 // save memory if inputs are ready at different times, since minimum temporary
   14735 // storage is proportional to the output size rather than the inputs size.
   14736 //
   14737 // Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
   14738 //
   14739 // Returns a `Tensor` of same shape and type as the elements of `inputs`.
   14740 //
   14741 // Arguments:
   14742 //	inputs: A list of `Tensor` objects, each with same shape and type.
   14743 //	shape: Shape of elements of `inputs`.
   14744 func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Output) {
   14745 	if scope.Err() != nil {
   14746 		return
   14747 	}
   14748 	attrs := map[string]interface{}{"shape": shape}
   14749 	opspec := tf.OpSpec{
   14750 		Type: "AccumulateNV2",
   14751 		Input: []tf.Input{
   14752 			tf.OutputList(inputs),
   14753 		},
   14754 		Attrs: attrs,
   14755 	}
   14756 	op := scope.AddOperation(opspec)
   14757 	return op.Output(0)
   14758 }
   14759 
   14760 // Convert the quantized 'input' tensor into a lower-precision 'output', using the
   14761 //
   14762 // actual distribution of the values to maximize the usage of the lower bit depth
   14763 // and adjusting the output min and max ranges accordingly.
   14764 //
   14765 // [input_min, input_max] are scalar floats that specify the range for the float
   14766 // interpretation of the 'input' data. For example, if input_min is -1.0f and
   14767 // input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
   14768 // value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
   14769 //
   14770 // This operator tries to squeeze as much precision as possible into an output with
   14771 // a lower bit depth by calculating the actual min and max values found in the
   14772 // data. For example, maybe that quint16 input has no values lower than 16,384 and
   14773 // none higher than 49,152. That means only half the range is actually needed, all
   14774 // the float interpretations are between -0.5f and 0.5f, so if we want to compress
   14775 // the data into a quint8 output, we can use that range rather than the theoretical
   14776 // -1.0f to 1.0f that is suggested by the input min and max.
   14777 //
   14778 // In practice, this is most useful for taking output from operations like
   14779 // QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
   14780 // may have large potential output ranges, but in practice have a distribution of
   14781 // input values that only uses a small fraction of the possible range. By feeding
   14782 // that output into this operator, we can reduce it from 32 bits down to 8 with
   14783 // minimal loss of accuracy.
   14784 //
   14785 // Arguments:
   14786 //
   14787 //	input_min: The float value that the minimum quantized input value represents.
   14788 //	input_max: The float value that the maximum quantized input value represents.
   14789 //	out_type: The type of the output. Should be a lower bit depth than Tinput.
   14790 //
   14791 // Returns The float value that the minimum quantized output value represents.The float value that the maximum quantized output value represents.
   14792 func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
   14793 	if scope.Err() != nil {
   14794 		return
   14795 	}
   14796 	attrs := map[string]interface{}{"out_type": out_type}
   14797 	opspec := tf.OpSpec{
   14798 		Type: "QuantizeDownAndShrinkRange",
   14799 		Input: []tf.Input{
   14800 			input, input_min, input_max,
   14801 		},
   14802 		Attrs: attrs,
   14803 	}
   14804 	op := scope.AddOperation(opspec)
   14805 	return op.Output(0), op.Output(1), op.Output(2)
   14806 }
   14807 
   14808 // RandomGammaAttr is an optional argument to RandomGamma.
   14809 type RandomGammaAttr func(optionalAttr)
   14810 
   14811 // RandomGammaSeed sets the optional seed attribute to value.
   14812 //
   14813 // value: If either `seed` or `seed2` are set to be non-zero, the random number
   14814 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   14815 // random seed.
   14816 // If not specified, defaults to 0
   14817 func RandomGammaSeed(value int64) RandomGammaAttr {
   14818 	return func(m optionalAttr) {
   14819 		m["seed"] = value
   14820 	}
   14821 }
   14822 
   14823 // RandomGammaSeed2 sets the optional seed2 attribute to value.
   14824 //
   14825 // value: A second seed to avoid seed collision.
   14826 // If not specified, defaults to 0
   14827 func RandomGammaSeed2(value int64) RandomGammaAttr {
   14828 	return func(m optionalAttr) {
   14829 		m["seed2"] = value
   14830 	}
   14831 }
   14832 
   14833 // Outputs random values from the Gamma distribution(s) described by alpha.
   14834 //
   14835 // This op uses the algorithm by Marsaglia et al. to acquire samples via
   14836 // transformation-rejection from pairs of uniform and normal random variables.
   14837 // See http://dl.acm.org/citation.cfm?id=358414
   14838 //
   14839 // Arguments:
   14840 //	shape: 1-D integer tensor. Shape of independent samples to draw from each
   14841 // distribution described by the shape parameters given in alpha.
   14842 //	alpha: A tensor in which each scalar is a "shape" parameter describing the
   14843 // associated gamma distribution.
   14844 //
   14845 // Returns A tensor with shape `shape + shape(alpha)`. Each slice
   14846 // `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
   14847 // `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
   14848 func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output) {
   14849 	if scope.Err() != nil {
   14850 		return
   14851 	}
   14852 	attrs := map[string]interface{}{}
   14853 	for _, a := range optional {
   14854 		a(attrs)
   14855 	}
   14856 	opspec := tf.OpSpec{
   14857 		Type: "RandomGamma",
   14858 		Input: []tf.Input{
   14859 			shape, alpha,
   14860 		},
   14861 		Attrs: attrs,
   14862 	}
   14863 	op := scope.AddOperation(opspec)
   14864 	return op.Output(0)
   14865 }
   14866 
   14867 // QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
   14868 type QuantizedConv2DAttr func(optionalAttr)
   14869 
   14870 // QuantizedConv2DOutType sets the optional out_type attribute to value.
   14871 // If not specified, defaults to DT_QINT32
   14872 func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr {
   14873 	return func(m optionalAttr) {
   14874 		m["out_type"] = value
   14875 	}
   14876 }
   14877 
   14878 // QuantizedConv2DDilations sets the optional dilations attribute to value.
   14879 //
   14880 // value: 1-D tensor of length 4.  The dilation factor for each dimension of
   14881 // `input`. If set to k > 1, there will be k-1 skipped cells between each
   14882 // filter element on that dimension. The dimension order is determined by the
   14883 // value of `data_format`, see above for details. Dilations in the batch and
   14884 // depth dimensions must be 1.
   14885 // If not specified, defaults to <i:1 i:1 i:1 i:1 >
   14886 func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr {
   14887 	return func(m optionalAttr) {
   14888 		m["dilations"] = value
   14889 	}
   14890 }
   14891 
   14892 // Computes a 2D convolution given quantized 4D input and filter tensors.
   14893 //
   14894 // The inputs are quantized tensors where the lowest value represents the real
   14895 // number of the associated minimum, and the highest represents the maximum.
   14896 // This means that you can only interpret the quantized output in the same way, by
   14897 // taking the returned minimum and maximum values into account.
   14898 //
   14899 // Arguments:
   14900 //
   14901 //	filter: filter's input_depth dimension must match input's depth dimensions.
   14902 //	min_input: The float value that the lowest quantized input value represents.
   14903 //	max_input: The float value that the highest quantized input value represents.
   14904 //	min_filter: The float value that the lowest quantized filter value represents.
   14905 //	max_filter: The float value that the highest quantized filter value represents.
   14906 //	strides: The stride of the sliding window for each dimension of the input
   14907 // tensor.
   14908 //	padding: The type of padding algorithm to use.
   14909 //
   14910 // Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
   14911 func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
   14912 	if scope.Err() != nil {
   14913 		return
   14914 	}
   14915 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   14916 	for _, a := range optional {
   14917 		a(attrs)
   14918 	}
   14919 	opspec := tf.OpSpec{
   14920 		Type: "QuantizedConv2D",
   14921 		Input: []tf.Input{
   14922 			input, filter, min_input, max_input, min_filter, max_filter,
   14923 		},
   14924 		Attrs: attrs,
   14925 	}
   14926 	op := scope.AddOperation(opspec)
   14927 	return op.Output(0), op.Output(1), op.Output(2)
   14928 }
   14929 
   14930 // ResourceGatherAttr is an optional argument to ResourceGather.
   14931 type ResourceGatherAttr func(optionalAttr)
   14932 
   14933 // ResourceGatherValidateIndices sets the optional validate_indices attribute to value.
   14934 // If not specified, defaults to true
   14935 func ResourceGatherValidateIndices(value bool) ResourceGatherAttr {
   14936 	return func(m optionalAttr) {
   14937 		m["validate_indices"] = value
   14938 	}
   14939 }
   14940 
   14941 // Gather slices from the variable pointed to by `resource` according to `indices`.
   14942 //
   14943 // `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
   14944 // Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
   14945 //
   14946 // ```python
   14947 //     # Scalar indices
   14948 //     output[:, ..., :] = params[indices, :, ... :]
   14949 //
   14950 //     # Vector indices
   14951 //     output[i, :, ..., :] = params[indices[i], :, ... :]
   14952 //
   14953 //     # Higher rank indices
   14954 //     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
   14955 // ```
   14956 func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, optional ...ResourceGatherAttr) (output tf.Output) {
   14957 	if scope.Err() != nil {
   14958 		return
   14959 	}
   14960 	attrs := map[string]interface{}{"dtype": dtype}
   14961 	for _, a := range optional {
   14962 		a(attrs)
   14963 	}
   14964 	opspec := tf.OpSpec{
   14965 		Type: "ResourceGather",
   14966 		Input: []tf.Input{
   14967 			resource, indices,
   14968 		},
   14969 		Attrs: attrs,
   14970 	}
   14971 	op := scope.AddOperation(opspec)
   14972 	return op.Output(0)
   14973 }
   14974 
   14975 // Delete the TensorArray from its resource container.
   14976 //
   14977 // This enables the user to close and release the resource in the middle
   14978 // of a step/run.
   14979 //
   14980 // Arguments:
   14981 //	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
   14982 //
   14983 // Returns the created operation.
   14984 func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation) {
   14985 	if scope.Err() != nil {
   14986 		return
   14987 	}
   14988 	opspec := tf.OpSpec{
   14989 		Type: "TensorArrayCloseV3",
   14990 		Input: []tf.Input{
   14991 			handle,
   14992 		},
   14993 	}
   14994 	return scope.AddOperation(opspec)
   14995 }
   14996 
   14997 // MaxPoolGradGradAttr is an optional argument to MaxPoolGradGrad.
   14998 type MaxPoolGradGradAttr func(optionalAttr)
   14999 
   15000 // MaxPoolGradGradDataFormat sets the optional data_format attribute to value.
   15001 //
   15002 // value: Specify the data format of the input and output data. With the
   15003 // default format "NHWC", the data is stored in the order of:
   15004 //     [batch, in_height, in_width, in_channels].
   15005 // Alternatively, the format could be "NCHW", the data storage order of:
   15006 //     [batch, in_channels, in_height, in_width].
   15007 // If not specified, defaults to "NHWC"
   15008 func MaxPoolGradGradDataFormat(value string) MaxPoolGradGradAttr {
   15009 	return func(m optionalAttr) {
   15010 		m["data_format"] = value
   15011 	}
   15012 }
   15013 
   15014 // Computes second-order gradients of the maxpooling function.
   15015 //
   15016 // Arguments:
   15017 //	orig_input: The original input tensor.
   15018 //	orig_output: The original output tensor.
   15019 //	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
   15020 //	ksize: The size of the window for each dimension of the input tensor.
   15021 //	strides: The stride of the sliding window for each dimension of the
   15022 // input tensor.
   15023 //	padding: The type of padding algorithm to use.
   15024 //
   15025 // Returns Gradients of gradients w.r.t. the input to `max_pool`.
   15026 func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradAttr) (output tf.Output) {
   15027 	if scope.Err() != nil {
   15028 		return
   15029 	}
   15030 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   15031 	for _, a := range optional {
   15032 		a(attrs)
   15033 	}
   15034 	opspec := tf.OpSpec{
   15035 		Type: "MaxPoolGradGrad",
   15036 		Input: []tf.Input{
   15037 			orig_input, orig_output, grad,
   15038 		},
   15039 		Attrs: attrs,
   15040 	}
   15041 	op := scope.AddOperation(opspec)
   15042 	return op.Output(0)
   15043 }
   15044 
   15045 // RandomUniformIntAttr is an optional argument to RandomUniformInt.
   15046 type RandomUniformIntAttr func(optionalAttr)
   15047 
   15048 // RandomUniformIntSeed sets the optional seed attribute to value.
   15049 //
   15050 // value: If either `seed` or `seed2` are set to be non-zero, the random number
   15051 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   15052 // random seed.
   15053 // If not specified, defaults to 0
   15054 func RandomUniformIntSeed(value int64) RandomUniformIntAttr {
   15055 	return func(m optionalAttr) {
   15056 		m["seed"] = value
   15057 	}
   15058 }
   15059 
   15060 // RandomUniformIntSeed2 sets the optional seed2 attribute to value.
   15061 //
   15062 // value: A second seed to avoid seed collision.
   15063 // If not specified, defaults to 0
   15064 func RandomUniformIntSeed2(value int64) RandomUniformIntAttr {
   15065 	return func(m optionalAttr) {
   15066 		m["seed2"] = value
   15067 	}
   15068 }
   15069 
   15070 // Outputs random integers from a uniform distribution.
   15071 //
   15072 // The generated values are uniform integers in the range `[minval, maxval)`.
   15073 // The lower bound `minval` is included in the range, while the upper bound
   15074 // `maxval` is excluded.
   15075 //
   15076 // The random integers are slightly biased unless `maxval - minval` is an exact
   15077 // power of two.  The bias is small for values of `maxval - minval` significantly
   15078 // smaller than the range of the output (either `2^32` or `2^64`).
   15079 //
   15080 // Arguments:
   15081 //	shape: The shape of the output tensor.
   15082 //	minval: 0-D.  Inclusive lower bound on the generated integers.
   15083 //	maxval: 0-D.  Exclusive upper bound on the generated integers.
   15084 //
   15085 // Returns A tensor of the specified shape filled with uniform random integers.
   15086 func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, optional ...RandomUniformIntAttr) (output tf.Output) {
   15087 	if scope.Err() != nil {
   15088 		return
   15089 	}
   15090 	attrs := map[string]interface{}{}
   15091 	for _, a := range optional {
   15092 		a(attrs)
   15093 	}
   15094 	opspec := tf.OpSpec{
   15095 		Type: "RandomUniformInt",
   15096 		Input: []tf.Input{
   15097 			shape, minval, maxval,
   15098 		},
   15099 		Attrs: attrs,
   15100 	}
   15101 	op := scope.AddOperation(opspec)
   15102 	return op.Output(0)
   15103 }
   15104 
   15105 // SkipgramAttr is an optional argument to Skipgram.
   15106 type SkipgramAttr func(optionalAttr)
   15107 
   15108 // SkipgramWindowSize sets the optional window_size attribute to value.
   15109 //
   15110 // value: The number of words to predict to the left and right of the target.
   15111 // If not specified, defaults to 5
   15112 func SkipgramWindowSize(value int64) SkipgramAttr {
   15113 	return func(m optionalAttr) {
   15114 		m["window_size"] = value
   15115 	}
   15116 }
   15117 
   15118 // SkipgramMinCount sets the optional min_count attribute to value.
   15119 //
   15120 // value: The minimum number of word occurrences for it to be included in the
   15121 // vocabulary.
   15122 // If not specified, defaults to 5
   15123 func SkipgramMinCount(value int64) SkipgramAttr {
   15124 	return func(m optionalAttr) {
   15125 		m["min_count"] = value
   15126 	}
   15127 }
   15128 
   15129 // SkipgramSubsample sets the optional subsample attribute to value.
   15130 //
   15131 // value: Threshold for word occurrence. Words that appear with higher
   15132 // frequency will be randomly down-sampled. Set to 0 to disable.
   15133 // If not specified, defaults to 0.001
   15134 func SkipgramSubsample(value float32) SkipgramAttr {
   15135 	return func(m optionalAttr) {
   15136 		m["subsample"] = value
   15137 	}
   15138 }
   15139 
   15140 // Parses a text file and creates a batch of examples.
   15141 //
   15142 // DEPRECATED at GraphDef version 19: Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result
   15143 //
   15144 // Arguments:
   15145 //	filename: The corpus's text file name.
   15146 //	batch_size: The size of produced batch.
   15147 //
   15148 // Returns A vector of words in the corpus.Frequencies of words. Sorted in the non-ascending order.Number of words per epoch in the data file.The current epoch number.The total number of words processed so far.A vector of word ids.A vector of word ids.
   15149 func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, current_epoch tf.Output, total_words_processed tf.Output, examples tf.Output, labels tf.Output) {
   15150 	if scope.Err() != nil {
   15151 		return
   15152 	}
   15153 	attrs := map[string]interface{}{"filename": filename, "batch_size": batch_size}
   15154 	for _, a := range optional {
   15155 		a(attrs)
   15156 	}
   15157 	opspec := tf.OpSpec{
   15158 		Type: "Skipgram",
   15159 
   15160 		Attrs: attrs,
   15161 	}
   15162 	op := scope.AddOperation(opspec)
   15163 	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
   15164 }
   15165 
   15166 // StringToNumberAttr is an optional argument to StringToNumber.
   15167 type StringToNumberAttr func(optionalAttr)
   15168 
   15169 // StringToNumberOutType sets the optional out_type attribute to value.
   15170 //
   15171 // value: The numeric type to interpret each string in `string_tensor` as.
   15172 // If not specified, defaults to DT_FLOAT
   15173 func StringToNumberOutType(value tf.DataType) StringToNumberAttr {
   15174 	return func(m optionalAttr) {
   15175 		m["out_type"] = value
   15176 	}
   15177 }
   15178 
   15179 // Converts each string in the input Tensor to the specified numeric type.
   15180 //
   15181 // (Note that int32 overflow results in an error while float overflow
   15182 // results in a rounded value.)
   15183 //
   15184 // Returns A Tensor of the same shape as the input `string_tensor`.
   15185 func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output) {
   15186 	if scope.Err() != nil {
   15187 		return
   15188 	}
   15189 	attrs := map[string]interface{}{}
   15190 	for _, a := range optional {
   15191 		a(attrs)
   15192 	}
   15193 	opspec := tf.OpSpec{
   15194 		Type: "StringToNumber",
   15195 		Input: []tf.Input{
   15196 			string_tensor,
   15197 		},
   15198 		Attrs: attrs,
   15199 	}
   15200 	op := scope.AddOperation(opspec)
   15201 	return op.Output(0)
   15202 }
   15203 
   15204 // ResourceApplyFtrlV2Attr is an optional argument to ResourceApplyFtrlV2.
   15205 type ResourceApplyFtrlV2Attr func(optionalAttr)
   15206 
   15207 // ResourceApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
   15208 //
   15209 // value: If `True`, updating of the var and accum tensors will be protected
   15210 // by a lock; otherwise the behavior is undefined, but may exhibit less
   15211 // contention.
   15212 // If not specified, defaults to false
   15213 func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr {
   15214 	return func(m optionalAttr) {
   15215 		m["use_locking"] = value
   15216 	}
   15217 }
   15218 
   15219 // Update '*var' according to the Ftrl-proximal scheme.
   15220 //
   15221 // grad_with_shrinkage = grad + 2 * l2_shrinkage * var
   15222 // accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
   15223 // linear += grad_with_shrinkage +
   15224 //     (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
   15225 // quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
   15226 // var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
   15227 // accum = accum_new
   15228 //
   15229 // Arguments:
   15230 //	var_: Should be from a Variable().
   15231 //	accum: Should be from a Variable().
   15232 //	linear: Should be from a Variable().
   15233 //	grad: The gradient.
   15234 //	lr: Scaling factor. Must be a scalar.
   15235 //	l1: L1 regulariation. Must be a scalar.
   15236 //	l2: L2 shrinkage regulariation. Must be a scalar.
   15237 //
   15238 //	lr_power: Scaling factor. Must be a scalar.
   15239 //
   15240 // Returns the created operation.
   15241 func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlV2Attr) (o *tf.Operation) {
   15242 	if scope.Err() != nil {
   15243 		return
   15244 	}
   15245 	attrs := map[string]interface{}{}
   15246 	for _, a := range optional {
   15247 		a(attrs)
   15248 	}
   15249 	opspec := tf.OpSpec{
   15250 		Type: "ResourceApplyFtrlV2",
   15251 		Input: []tf.Input{
   15252 			var_, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
   15253 		},
   15254 		Attrs: attrs,
   15255 	}
   15256 	return scope.AddOperation(opspec)
   15257 }
   15258 
   15259 // TruncatedNormalAttr is an optional argument to TruncatedNormal.
   15260 type TruncatedNormalAttr func(optionalAttr)
   15261 
   15262 // TruncatedNormalSeed sets the optional seed attribute to value.
   15263 //
   15264 // value: If either `seed` or `seed2` are set to be non-zero, the random number
   15265 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   15266 // random seed.
   15267 // If not specified, defaults to 0
   15268 func TruncatedNormalSeed(value int64) TruncatedNormalAttr {
   15269 	return func(m optionalAttr) {
   15270 		m["seed"] = value
   15271 	}
   15272 }
   15273 
   15274 // TruncatedNormalSeed2 sets the optional seed2 attribute to value.
   15275 //
   15276 // value: A second seed to avoid seed collision.
   15277 // If not specified, defaults to 0
   15278 func TruncatedNormalSeed2(value int64) TruncatedNormalAttr {
   15279 	return func(m optionalAttr) {
   15280 		m["seed2"] = value
   15281 	}
   15282 }
   15283 
   15284 // Outputs random values from a truncated normal distribution.
   15285 //
   15286 // The generated values follow a normal distribution with mean 0 and standard
   15287 // deviation 1, except that values whose magnitude is more than 2 standard
   15288 // deviations from the mean are dropped and re-picked.
   15289 //
   15290 // Arguments:
   15291 //	shape: The shape of the output tensor.
   15292 //	dtype: The type of the output.
   15293 //
   15294 // Returns A tensor of the specified shape filled with random truncated normal
   15295 // values.
   15296 func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...TruncatedNormalAttr) (output tf.Output) {
   15297 	if scope.Err() != nil {
   15298 		return
   15299 	}
   15300 	attrs := map[string]interface{}{"dtype": dtype}
   15301 	for _, a := range optional {
   15302 		a(attrs)
   15303 	}
   15304 	opspec := tf.OpSpec{
   15305 		Type: "TruncatedNormal",
   15306 		Input: []tf.Input{
   15307 			shape,
   15308 		},
   15309 		Attrs: attrs,
   15310 	}
   15311 	op := scope.AddOperation(opspec)
   15312 	return op.Output(0)
   15313 }
   15314 
   15315 // FakeQuantWithMinMaxVarsPerChannelAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannel.
   15316 type FakeQuantWithMinMaxVarsPerChannelAttr func(optionalAttr)
   15317 
   15318 // FakeQuantWithMinMaxVarsPerChannelNumBits sets the optional num_bits attribute to value.
   15319 // If not specified, defaults to 8
   15320 func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelAttr {
   15321 	return func(m optionalAttr) {
   15322 		m["num_bits"] = value
   15323 	}
   15324 }
   15325 
   15326 // FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value.
   15327 // If not specified, defaults to false
   15328 func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr {
   15329 	return func(m optionalAttr) {
   15330 		m["narrow_range"] = value
   15331 	}
   15332 }
   15333 
   15334 // Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
   15335 //
   15336 // `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
   15337 // to 'outputs' tensor of same shape as `inputs`.
   15338 //
   15339 // `[min; max]` define the clamping range for the `inputs` data.
   15340 // `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
   15341 // when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
   15342 // then de-quantized and output as floats in `[min; max]` interval.
   15343 // `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
   15344 //
   15345 // This operation has a gradient and thus allows for training `min` and `max`
   15346 // values.
   15347 func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output) {
   15348 	if scope.Err() != nil {
   15349 		return
   15350 	}
   15351 	attrs := map[string]interface{}{}
   15352 	for _, a := range optional {
   15353 		a(attrs)
   15354 	}
   15355 	opspec := tf.OpSpec{
   15356 		Type: "FakeQuantWithMinMaxVarsPerChannel",
   15357 		Input: []tf.Input{
   15358 			inputs, min, max,
   15359 		},
   15360 		Attrs: attrs,
   15361 	}
   15362 	op := scope.AddOperation(opspec)
   15363 	return op.Output(0)
   15364 }
   15365 
   15366 // RandomShuffleAttr is an optional argument to RandomShuffle.
   15367 type RandomShuffleAttr func(optionalAttr)
   15368 
   15369 // RandomShuffleSeed sets the optional seed attribute to value.
   15370 //
   15371 // value: If either `seed` or `seed2` are set to be non-zero, the random number
   15372 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   15373 // random seed.
   15374 // If not specified, defaults to 0
   15375 func RandomShuffleSeed(value int64) RandomShuffleAttr {
   15376 	return func(m optionalAttr) {
   15377 		m["seed"] = value
   15378 	}
   15379 }
   15380 
   15381 // RandomShuffleSeed2 sets the optional seed2 attribute to value.
   15382 //
   15383 // value: A second seed to avoid seed collision.
   15384 // If not specified, defaults to 0
   15385 func RandomShuffleSeed2(value int64) RandomShuffleAttr {
   15386 	return func(m optionalAttr) {
   15387 		m["seed2"] = value
   15388 	}
   15389 }
   15390 
   15391 // Randomly shuffles a tensor along its first dimension.
   15392 //
   15393 //   The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
   15394 //   to one and only one `output[i]`. For example, a mapping that might occur for a
   15395 //   3x2 tensor is:
   15396 //
   15397 // ```
   15398 // [[1, 2],       [[5, 6],
   15399 //  [3, 4],  ==>   [1, 2],
   15400 //  [5, 6]]        [3, 4]]
   15401 // ```
   15402 //
   15403 // Arguments:
   15404 //	value: The tensor to be shuffled.
   15405 //
   15406 // Returns A tensor of same shape and type as `value`, shuffled along its first
   15407 // dimension.
   15408 func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
   15409 	if scope.Err() != nil {
   15410 		return
   15411 	}
   15412 	attrs := map[string]interface{}{}
   15413 	for _, a := range optional {
   15414 		a(attrs)
   15415 	}
   15416 	opspec := tf.OpSpec{
   15417 		Type: "RandomShuffle",
   15418 		Input: []tf.Input{
   15419 			value,
   15420 		},
   15421 		Attrs: attrs,
   15422 	}
   15423 	op := scope.AddOperation(opspec)
   15424 	return op.Output(0)
   15425 }
   15426 
   15427 // OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
   15428 type OrderedMapIncompleteSizeAttr func(optionalAttr)
   15429 
   15430 // OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
   15431 // If not specified, defaults to 0
   15432 //
   15433 // REQUIRES: value >= 0
   15434 func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
   15435 	return func(m optionalAttr) {
   15436 		m["capacity"] = value
   15437 	}
   15438 }
   15439 
   15440 // OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
   15441 // If not specified, defaults to 0
   15442 //
   15443 // REQUIRES: value >= 0
   15444 func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
   15445 	return func(m optionalAttr) {
   15446 		m["memory_limit"] = value
   15447 	}
   15448 }
   15449 
   15450 // OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
   15451 // If not specified, defaults to ""
   15452 func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
   15453 	return func(m optionalAttr) {
   15454 		m["container"] = value
   15455 	}
   15456 }
   15457 
   15458 // OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
   15459 // If not specified, defaults to ""
   15460 func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
   15461 	return func(m optionalAttr) {
   15462 		m["shared_name"] = value
   15463 	}
   15464 }
   15465 
   15466 // Op returns the number of incomplete elements in the underlying container.
   15467 func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
   15468 	if scope.Err() != nil {
   15469 		return
   15470 	}
   15471 	attrs := map[string]interface{}{"dtypes": dtypes}
   15472 	for _, a := range optional {
   15473 		a(attrs)
   15474 	}
   15475 	opspec := tf.OpSpec{
   15476 		Type: "OrderedMapIncompleteSize",
   15477 
   15478 		Attrs: attrs,
   15479 	}
   15480 	op := scope.AddOperation(opspec)
   15481 	return op.Output(0)
   15482 }
   15483 
   15484 // DecodeRawAttr is an optional argument to DecodeRaw.
   15485 type DecodeRawAttr func(optionalAttr)
   15486 
   15487 // DecodeRawLittleEndian sets the optional little_endian attribute to value.
   15488 //
   15489 // value: Whether the input `bytes` are in little-endian order.
   15490 // Ignored for `out_type` values that are stored in a single byte like
   15491 // `uint8`.
   15492 // If not specified, defaults to true
   15493 func DecodeRawLittleEndian(value bool) DecodeRawAttr {
   15494 	return func(m optionalAttr) {
   15495 		m["little_endian"] = value
   15496 	}
   15497 }
   15498 
   15499 // Reinterpret the bytes of a string as a vector of numbers.
   15500 //
   15501 // Arguments:
   15502 //	bytes: All the elements must have the same length.
   15503 //
   15504 //
   15505 // Returns A Tensor with one more dimension than the input `bytes`.  The
   15506 // added dimension will have size equal to the length of the elements
   15507 // of `bytes` divided by the number of bytes to represent `out_type`.
   15508 func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {
   15509 	if scope.Err() != nil {
   15510 		return
   15511 	}
   15512 	attrs := map[string]interface{}{"out_type": out_type}
   15513 	for _, a := range optional {
   15514 		a(attrs)
   15515 	}
   15516 	opspec := tf.OpSpec{
   15517 		Type: "DecodeRaw",
   15518 		Input: []tf.Input{
   15519 			bytes,
   15520 		},
   15521 		Attrs: attrs,
   15522 	}
   15523 	op := scope.AddOperation(opspec)
   15524 	return op.Output(0)
   15525 }
   15526 
   15527 // Copy a tensor setting everything outside a central band in each innermost matrix
   15528 //
   15529 // to zero.
   15530 //
   15531 // The `band` part is computed as follows:
   15532 // Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
   15533 // tensor with the same shape where
   15534 //
   15535 // `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
   15536 //
   15537 // The indicator function
   15538 //
   15539 // `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
   15540 //                  (num_upper < 0 || (n-m) <= num_upper)`.
   15541 //
   15542 // For example:
   15543 //
   15544 // ```
   15545 // # if 'input' is [[ 0,  1,  2, 3]
   15546 //                  [-1,  0,  1, 2]
   15547 //                  [-2, -1,  0, 1]
   15548 //                  [-3, -2, -1, 0]],
   15549 //
   15550 // tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
   15551 //                                        [-1,  0,  1, 2]
   15552 //                                        [ 0, -1,  0, 1]
   15553 //                                        [ 0,  0, -1, 0]],
   15554 //
   15555 // tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
   15556 //                                       [-1,  0,  1, 0]
   15557 //                                       [-2, -1,  0, 1]
   15558 //                                       [ 0, -2, -1, 0]]
   15559 // ```
   15560 //
   15561 // Useful special cases:
   15562 //
   15563 // ```
   15564 //  tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
   15565 //  tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
   15566 //  tf.matrix_band_part(input, 0, 0) ==> Diagonal.
   15567 // ```
   15568 //
   15569 // Arguments:
   15570 //	input: Rank `k` tensor.
   15571 //	num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
   15572 // lower triangle.
   15573 //	num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep
   15574 // entire upper triangle.
   15575 //
   15576 // Returns Rank `k` tensor of the same shape as input. The extracted banded tensor.
   15577 func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output) {
   15578 	if scope.Err() != nil {
   15579 		return
   15580 	}
   15581 	opspec := tf.OpSpec{
   15582 		Type: "MatrixBandPart",
   15583 		Input: []tf.Input{
   15584 			input, num_lower, num_upper,
   15585 		},
   15586 	}
   15587 	op := scope.AddOperation(opspec)
   15588 	return op.Output(0)
   15589 }
   15590 
   15591 // DecodeCompressedAttr is an optional argument to DecodeCompressed.
   15592 type DecodeCompressedAttr func(optionalAttr)
   15593 
   15594 // DecodeCompressedCompressionType sets the optional compression_type attribute to value.
   15595 //
   15596 // value: A scalar containing either (i) the empty string (no
   15597 // compression), (ii) "ZLIB", or (iii) "GZIP".
   15598 // If not specified, defaults to ""
   15599 func DecodeCompressedCompressionType(value string) DecodeCompressedAttr {
   15600 	return func(m optionalAttr) {
   15601 		m["compression_type"] = value
   15602 	}
   15603 }
   15604 
   15605 // Decompress strings.
   15606 //
   15607 // This op decompresses each element of the `bytes` input `Tensor`, which
   15608 // is assumed to be compressed using the given `compression_type`.
   15609 //
   15610 // The `output` is a string `Tensor` of the same shape as `bytes`,
   15611 // each element containing the decompressed data from the corresponding
   15612 // element in `bytes`.
   15613 //
   15614 // Arguments:
   15615 //	bytes: A Tensor of string which is compressed.
   15616 //
   15617 // Returns A Tensor with the same shape as input `bytes`, uncompressed
   15618 // from bytes.
   15619 func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output) {
   15620 	if scope.Err() != nil {
   15621 		return
   15622 	}
   15623 	attrs := map[string]interface{}{}
   15624 	for _, a := range optional {
   15625 		a(attrs)
   15626 	}
   15627 	opspec := tf.OpSpec{
   15628 		Type: "DecodeCompressed",
   15629 		Input: []tf.Input{
   15630 			bytes,
   15631 		},
   15632 		Attrs: attrs,
   15633 	}
   15634 	op := scope.AddOperation(opspec)
   15635 	return op.Output(0)
   15636 }
   15637 
   15638 // WholeFileReaderV2Attr is an optional argument to WholeFileReaderV2.
   15639 type WholeFileReaderV2Attr func(optionalAttr)
   15640 
   15641 // WholeFileReaderV2Container sets the optional container attribute to value.
   15642 //
   15643 // value: If non-empty, this reader is placed in the given container.
   15644 // Otherwise, a default container is used.
   15645 // If not specified, defaults to ""
   15646 func WholeFileReaderV2Container(value string) WholeFileReaderV2Attr {
   15647 	return func(m optionalAttr) {
   15648 		m["container"] = value
   15649 	}
   15650 }
   15651 
   15652 // WholeFileReaderV2SharedName sets the optional shared_name attribute to value.
   15653 //
   15654 // value: If non-empty, this reader is named in the given bucket
   15655 // with this shared_name. Otherwise, the node name is used instead.
   15656 // If not specified, defaults to ""
   15657 func WholeFileReaderV2SharedName(value string) WholeFileReaderV2Attr {
   15658 	return func(m optionalAttr) {
   15659 		m["shared_name"] = value
   15660 	}
   15661 }
   15662 
   15663 // A Reader that outputs the entire contents of a file as a value.
   15664 //
   15665 // To use, enqueue filenames in a Queue.  The output of ReaderRead will
   15666 // be a filename (key) and the contents of that file (value).
   15667 //
   15668 // Returns The handle to reference the Reader.
   15669 func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output) {
   15670 	if scope.Err() != nil {
   15671 		return
   15672 	}
   15673 	attrs := map[string]interface{}{}
   15674 	for _, a := range optional {
   15675 		a(attrs)
   15676 	}
   15677 	opspec := tf.OpSpec{
   15678 		Type: "WholeFileReaderV2",
   15679 
   15680 		Attrs: attrs,
   15681 	}
   15682 	op := scope.AddOperation(opspec)
   15683 	return op.Output(0)
   15684 }
   15685 
   15686 // Transforms a tf.Example proto (as a string) into typed tensors.
   15687 //
   15688 // Arguments:
   15689 //	serialized: A vector containing a batch of binary serialized Example protos.
   15690 //	dense_defaults: A list of Tensors (some may be empty), whose length matches
   15691 // the length of `dense_keys`. dense_defaults[j] provides default values
   15692 // when the example's feature_map lacks dense_key[j].  If an empty Tensor is
   15693 // provided for dense_defaults[j], then the Feature dense_keys[j] is required.
   15694 // The input type is inferred from dense_defaults[j], even when it's empty.
   15695 // If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
   15696 // then the shape of dense_defaults[j] must match that of dense_shapes[j].
   15697 // If dense_shapes[j] has an undefined major dimension (variable strides dense
   15698 // feature), dense_defaults[j] must contain a single element:
   15699 // the padding element.
   15700 //	num_sparse: The number of sparse features to be parsed from the example. This
   15701 // must match the lengths of `sparse_keys` and `sparse_types`.
   15702 //	sparse_keys: A list of `num_sparse` strings.
   15703 // The keys expected in the Examples' features associated with sparse values.
   15704 //	dense_keys: The keys expected in the Examples' features associated with dense
   15705 // values.
   15706 //	sparse_types: A list of `num_sparse` types; the data types of data in each
   15707 // Feature given in sparse_keys.
   15708 // Currently the ParseSingleExample op supports DT_FLOAT (FloatList),
   15709 // DT_INT64 (Int64List), and DT_STRING (BytesList).
   15710 //	dense_shapes: The shapes of data in each Feature given in dense_keys.
   15711 // The length of this list must match the length of `dense_keys`.  The
   15712 // number of elements in the Feature corresponding to dense_key[j] must
   15713 // always equal dense_shapes[j].NumEntries().  If dense_shapes[j] ==
   15714 // (D0, D1, ..., DN) then the shape of output Tensor dense_values[j]
   15715 // will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,
   15716 // ..., DN), the shape of the output Tensor dense_values[j] will be (M,
   15717 // D1, .., DN), where M is the number of blocks of elements of length
   15718 // D1 * .... * DN, in the input.
   15719 func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
   15720 	if scope.Err() != nil {
   15721 		return
   15722 	}
   15723 	attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes}
   15724 	opspec := tf.OpSpec{
   15725 		Type: "ParseSingleExample",
   15726 		Input: []tf.Input{
   15727 			serialized, tf.OutputList(dense_defaults),
   15728 		},
   15729 		Attrs: attrs,
   15730 	}
   15731 	op := scope.AddOperation(opspec)
   15732 	if scope.Err() != nil {
   15733 		return
   15734 	}
   15735 	var idx int
   15736 	var err error
   15737 	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
   15738 		scope.UpdateErr("ParseSingleExample", err)
   15739 		return
   15740 	}
   15741 	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
   15742 		scope.UpdateErr("ParseSingleExample", err)
   15743 		return
   15744 	}
   15745 	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
   15746 		scope.UpdateErr("ParseSingleExample", err)
   15747 		return
   15748 	}
   15749 	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
   15750 		scope.UpdateErr("ParseSingleExample", err)
   15751 		return
   15752 	}
   15753 	return sparse_indices, sparse_values, sparse_shapes, dense_values
   15754 }
   15755 
   15756 // Computes acos of x element-wise.
   15757 func Acos(scope *Scope, x tf.Output) (y tf.Output) {
   15758 	if scope.Err() != nil {
   15759 		return
   15760 	}
   15761 	opspec := tf.OpSpec{
   15762 		Type: "Acos",
   15763 		Input: []tf.Input{
   15764 			x,
   15765 		},
   15766 	}
   15767 	op := scope.AddOperation(opspec)
   15768 	return op.Output(0)
   15769 }
   15770 
   15771 // MaxPoolWithArgmaxAttr is an optional argument to MaxPoolWithArgmax.
   15772 type MaxPoolWithArgmaxAttr func(optionalAttr)
   15773 
   15774 // MaxPoolWithArgmaxTargmax sets the optional Targmax attribute to value.
   15775 // If not specified, defaults to DT_INT64
   15776 func MaxPoolWithArgmaxTargmax(value tf.DataType) MaxPoolWithArgmaxAttr {
   15777 	return func(m optionalAttr) {
   15778 		m["Targmax"] = value
   15779 	}
   15780 }
   15781 
   15782 // Performs max pooling on the input and outputs both max values and indices.
   15783 //
   15784 // The indices in `argmax` are flattened, so that a maximum value at position
   15785 // `[b, y, x, c]` becomes flattened index
   15786 // `((b * height + y) * width + x) * channels + c`.
   15787 //
   15788 // The indices returned are always in `[0, height) x [0, width)` before flattening,
   15789 // even if padding is involved and the mathematically correct answer is outside
   15790 // (either negative or too large).  This is a bug, but fixing it is difficult to do
   15791 // in a safe backwards compatible way, especially due to flattening.
   15792 //
   15793 // Arguments:
   15794 //	input: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
   15795 //	ksize: The size of the window for each dimension of the input tensor.
   15796 //	strides: The stride of the sliding window for each dimension of the
   15797 // input tensor.
   15798 //	padding: The type of padding algorithm to use.
   15799 //
   15800 // Returns The max pooled output tensor.4-D.  The flattened indices of the max values chosen for each output.
   15801 func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolWithArgmaxAttr) (output tf.Output, argmax tf.Output) {
   15802 	if scope.Err() != nil {
   15803 		return
   15804 	}
   15805 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   15806 	for _, a := range optional {
   15807 		a(attrs)
   15808 	}
   15809 	opspec := tf.OpSpec{
   15810 		Type: "MaxPoolWithArgmax",
   15811 		Input: []tf.Input{
   15812 			input,
   15813 		},
   15814 		Attrs: attrs,
   15815 	}
   15816 	op := scope.AddOperation(opspec)
   15817 	return op.Output(0), op.Output(1)
   15818 }
   15819 
   15820 // Transforms a serialized tensorflow.TensorProto proto into a Tensor.
   15821 //
   15822 // Arguments:
   15823 //	serialized: A scalar string containing a serialized TensorProto proto.
   15824 //	out_type: The type of the serialized tensor.  The provided type must match the
   15825 // type of the serialized tensor and no implicit conversion will take place.
   15826 //
   15827 // Returns A Tensor of type `out_type`.
   15828 func ParseTensor(scope *Scope, serialized tf.Output, out_type tf.DataType) (output tf.Output) {
   15829 	if scope.Err() != nil {
   15830 		return
   15831 	}
   15832 	attrs := map[string]interface{}{"out_type": out_type}
   15833 	opspec := tf.OpSpec{
   15834 		Type: "ParseTensor",
   15835 		Input: []tf.Input{
   15836 			serialized,
   15837 		},
   15838 		Attrs: attrs,
   15839 	}
   15840 	op := scope.AddOperation(opspec)
   15841 	return op.Output(0)
   15842 }
   15843 
   15844 // MapClearAttr is an optional argument to MapClear.
   15845 type MapClearAttr func(optionalAttr)
   15846 
   15847 // MapClearCapacity sets the optional capacity attribute to value.
   15848 // If not specified, defaults to 0
   15849 //
   15850 // REQUIRES: value >= 0
   15851 func MapClearCapacity(value int64) MapClearAttr {
   15852 	return func(m optionalAttr) {
   15853 		m["capacity"] = value
   15854 	}
   15855 }
   15856 
   15857 // MapClearMemoryLimit sets the optional memory_limit attribute to value.
   15858 // If not specified, defaults to 0
   15859 //
   15860 // REQUIRES: value >= 0
   15861 func MapClearMemoryLimit(value int64) MapClearAttr {
   15862 	return func(m optionalAttr) {
   15863 		m["memory_limit"] = value
   15864 	}
   15865 }
   15866 
   15867 // MapClearContainer sets the optional container attribute to value.
   15868 // If not specified, defaults to ""
   15869 func MapClearContainer(value string) MapClearAttr {
   15870 	return func(m optionalAttr) {
   15871 		m["container"] = value
   15872 	}
   15873 }
   15874 
   15875 // MapClearSharedName sets the optional shared_name attribute to value.
   15876 // If not specified, defaults to ""
   15877 func MapClearSharedName(value string) MapClearAttr {
   15878 	return func(m optionalAttr) {
   15879 		m["shared_name"] = value
   15880 	}
   15881 }
   15882 
   15883 // Op removes all elements in the underlying container.
   15884 //
   15885 // Returns the created operation.
   15886 func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *tf.Operation) {
   15887 	if scope.Err() != nil {
   15888 		return
   15889 	}
   15890 	attrs := map[string]interface{}{"dtypes": dtypes}
   15891 	for _, a := range optional {
   15892 		a(attrs)
   15893 	}
   15894 	opspec := tf.OpSpec{
   15895 		Type: "MapClear",
   15896 
   15897 		Attrs: attrs,
   15898 	}
   15899 	return scope.AddOperation(opspec)
   15900 }
   15901 
   15902 // DecodeCSVAttr is an optional argument to DecodeCSV.
   15903 type DecodeCSVAttr func(optionalAttr)
   15904 
   15905 // DecodeCSVFieldDelim sets the optional field_delim attribute to value.
   15906 //
   15907 // value: char delimiter to separate fields in a record.
   15908 // If not specified, defaults to ","
   15909 func DecodeCSVFieldDelim(value string) DecodeCSVAttr {
   15910 	return func(m optionalAttr) {
   15911 		m["field_delim"] = value
   15912 	}
   15913 }
   15914 
   15915 // DecodeCSVUseQuoteDelim sets the optional use_quote_delim attribute to value.
   15916 //
   15917 // value: If false, treats double quotation marks as regular
   15918 // characters inside of the string fields (ignoring RFC 4180, Section 2,
   15919 // Bullet 5).
   15920 // If not specified, defaults to true
   15921 func DecodeCSVUseQuoteDelim(value bool) DecodeCSVAttr {
   15922 	return func(m optionalAttr) {
   15923 		m["use_quote_delim"] = value
   15924 	}
   15925 }
   15926 
   15927 // DecodeCSVNaValue sets the optional na_value attribute to value.
   15928 //
   15929 // value: Additional string to recognize as NA/NaN.
   15930 // If not specified, defaults to ""
   15931 func DecodeCSVNaValue(value string) DecodeCSVAttr {
   15932 	return func(m optionalAttr) {
   15933 		m["na_value"] = value
   15934 	}
   15935 }
   15936 
   15937 // Convert CSV records to tensors. Each column maps to one tensor.
   15938 //
   15939 // RFC 4180 format is expected for the CSV records.
   15940 // (https://tools.ietf.org/html/rfc4180)
   15941 // Note that we allow leading and trailing spaces with int or float field.
   15942 //
   15943 // Arguments:
   15944 //	records: Each string is a record/row in the csv and all records should have
   15945 // the same format.
   15946 //	record_defaults: One tensor per column of the input record, with either a
   15947 // scalar default value for that column or empty if the column is required.
   15948 //
   15949 // Returns Each tensor will have the same shape as records.
   15950 func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, optional ...DecodeCSVAttr) (output []tf.Output) {
   15951 	if scope.Err() != nil {
   15952 		return
   15953 	}
   15954 	attrs := map[string]interface{}{}
   15955 	for _, a := range optional {
   15956 		a(attrs)
   15957 	}
   15958 	opspec := tf.OpSpec{
   15959 		Type: "DecodeCSV",
   15960 		Input: []tf.Input{
   15961 			records, tf.OutputList(record_defaults),
   15962 		},
   15963 		Attrs: attrs,
   15964 	}
   15965 	op := scope.AddOperation(opspec)
   15966 	if scope.Err() != nil {
   15967 		return
   15968 	}
   15969 	var idx int
   15970 	var err error
   15971 	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
   15972 		scope.UpdateErr("DecodeCSV", err)
   15973 		return
   15974 	}
   15975 	return output
   15976 }
   15977 
   15978 // Returns the rank of a tensor.
   15979 //
   15980 // This operation returns an integer representing the rank of `input`.
   15981 //
   15982 // For example:
   15983 //
   15984 // ```
   15985 // # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
   15986 // # shape of tensor 't' is [2, 2, 3]
   15987 // rank(t) ==> 3
   15988 // ```
   15989 //
   15990 // **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
   15991 // of a tensor is the number of indices required to uniquely select each element
   15992 // of the tensor. Rank is also known as "order", "degree", or "ndims."
   15993 func Rank(scope *Scope, input tf.Output) (output tf.Output) {
   15994 	if scope.Err() != nil {
   15995 		return
   15996 	}
   15997 	opspec := tf.OpSpec{
   15998 		Type: "Rank",
   15999 		Input: []tf.Input{
   16000 			input,
   16001 		},
   16002 	}
   16003 	op := scope.AddOperation(opspec)
   16004 	return op.Output(0)
   16005 }
   16006 
   16007 // Output a fact about factorials.
   16008 func Fact(scope *Scope) (fact tf.Output) {
   16009 	if scope.Err() != nil {
   16010 		return
   16011 	}
   16012 	opspec := tf.OpSpec{
   16013 		Type: "Fact",
   16014 	}
   16015 	op := scope.AddOperation(opspec)
   16016 	return op.Output(0)
   16017 }
   16018 
   16019 // Makes its input available to the next iteration.
   16020 //
   16021 // Arguments:
   16022 //	data: The tensor to be made available to the next iteration.
   16023 //
   16024 // Returns The same tensor as `data`.
   16025 func NextIteration(scope *Scope, data tf.Output) (output tf.Output) {
   16026 	if scope.Err() != nil {
   16027 		return
   16028 	}
   16029 	opspec := tf.OpSpec{
   16030 		Type: "NextIteration",
   16031 		Input: []tf.Input{
   16032 			data,
   16033 		},
   16034 	}
   16035 	op := scope.AddOperation(opspec)
   16036 	return op.Output(0)
   16037 }
   16038 
   16039 // Creates a dataset that skips `count` elements from the `input_dataset`.
   16040 //
   16041 // Arguments:
   16042 //
   16043 //	count: A scalar representing the number of elements from the `input_dataset`
   16044 // that should be skipped.  If count is -1, skips everything.
   16045 //
   16046 //
   16047 func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   16048 	if scope.Err() != nil {
   16049 		return
   16050 	}
   16051 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   16052 	opspec := tf.OpSpec{
   16053 		Type: "SkipDataset",
   16054 		Input: []tf.Input{
   16055 			input_dataset, count,
   16056 		},
   16057 		Attrs: attrs,
   16058 	}
   16059 	op := scope.AddOperation(opspec)
   16060 	return op.Output(0)
   16061 }
   16062 
   16063 // Computes hyperbolic tangent of `x` element-wise.
   16064 func Tanh(scope *Scope, x tf.Output) (y tf.Output) {
   16065 	if scope.Err() != nil {
   16066 		return
   16067 	}
   16068 	opspec := tf.OpSpec{
   16069 		Type: "Tanh",
   16070 		Input: []tf.Input{
   16071 			x,
   16072 		},
   16073 	}
   16074 	op := scope.AddOperation(opspec)
   16075 	return op.Output(0)
   16076 }
   16077 
   16078 // Computes the maximum along segments of a tensor.
   16079 //
   16080 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   16081 // segments.
   16082 //
   16083 // Computes a tensor such that
   16084 // \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
   16085 // that `segment_ids[j] == i`.
   16086 //
   16087 // If the max is empty for a given segment ID `i`, `output[i] = 0`.
   16088 //
   16089 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   16090 // <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
   16091 // </div>
   16092 //
   16093 // Arguments:
   16094 //
   16095 //	segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
   16096 // first dimension.  Values should be sorted and can be repeated.
   16097 //
   16098 // Returns Has same shape as data, except for dimension 0 which
   16099 // has size `k`, the number of segments.
   16100 func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
   16101 	if scope.Err() != nil {
   16102 		return
   16103 	}
   16104 	opspec := tf.OpSpec{
   16105 		Type: "SegmentMax",
   16106 		Input: []tf.Input{
   16107 			data, segment_ids,
   16108 		},
   16109 	}
   16110 	op := scope.AddOperation(opspec)
   16111 	return op.Output(0)
   16112 }
   16113 
   16114 // AvgPoolGradAttr is an optional argument to AvgPoolGrad.
   16115 type AvgPoolGradAttr func(optionalAttr)
   16116 
   16117 // AvgPoolGradDataFormat sets the optional data_format attribute to value.
   16118 //
   16119 // value: Specify the data format of the input and output data. With the
   16120 // default format "NHWC", the data is stored in the order of:
   16121 //     [batch, in_height, in_width, in_channels].
   16122 // Alternatively, the format could be "NCHW", the data storage order of:
   16123 //     [batch, in_channels, in_height, in_width].
   16124 // If not specified, defaults to "NHWC"
   16125 func AvgPoolGradDataFormat(value string) AvgPoolGradAttr {
   16126 	return func(m optionalAttr) {
   16127 		m["data_format"] = value
   16128 	}
   16129 }
   16130 
   16131 // Computes gradients of the average pooling function.
   16132 //
   16133 // Arguments:
   16134 //	orig_input_shape: 1-D.  Shape of the original input to `avg_pool`.
   16135 //	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
   16136 // the output of `avg_pool`.
   16137 //	ksize: The size of the sliding window for each dimension of the input.
   16138 //	strides: The stride of the sliding window for each dimension of the input.
   16139 //	padding: The type of padding algorithm to use.
   16140 //
   16141 // Returns 4-D.  Gradients w.r.t. the input of `avg_pool`.
   16142 func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) {
   16143 	if scope.Err() != nil {
   16144 		return
   16145 	}
   16146 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   16147 	for _, a := range optional {
   16148 		a(attrs)
   16149 	}
   16150 	opspec := tf.OpSpec{
   16151 		Type: "AvgPoolGrad",
   16152 		Input: []tf.Input{
   16153 			orig_input_shape, grad,
   16154 		},
   16155 		Attrs: attrs,
   16156 	}
   16157 	op := scope.AddOperation(opspec)
   16158 	return op.Output(0)
   16159 }
   16160 
   16161 // StageClearAttr is an optional argument to StageClear.
   16162 type StageClearAttr func(optionalAttr)
   16163 
   16164 // StageClearCapacity sets the optional capacity attribute to value.
   16165 // If not specified, defaults to 0
   16166 //
   16167 // REQUIRES: value >= 0
   16168 func StageClearCapacity(value int64) StageClearAttr {
   16169 	return func(m optionalAttr) {
   16170 		m["capacity"] = value
   16171 	}
   16172 }
   16173 
   16174 // StageClearMemoryLimit sets the optional memory_limit attribute to value.
   16175 // If not specified, defaults to 0
   16176 //
   16177 // REQUIRES: value >= 0
   16178 func StageClearMemoryLimit(value int64) StageClearAttr {
   16179 	return func(m optionalAttr) {
   16180 		m["memory_limit"] = value
   16181 	}
   16182 }
   16183 
   16184 // StageClearContainer sets the optional container attribute to value.
   16185 // If not specified, defaults to ""
   16186 func StageClearContainer(value string) StageClearAttr {
   16187 	return func(m optionalAttr) {
   16188 		m["container"] = value
   16189 	}
   16190 }
   16191 
   16192 // StageClearSharedName sets the optional shared_name attribute to value.
   16193 // If not specified, defaults to ""
   16194 func StageClearSharedName(value string) StageClearAttr {
   16195 	return func(m optionalAttr) {
   16196 		m["shared_name"] = value
   16197 	}
   16198 }
   16199 
   16200 // Op removes all elements in the underlying container.
   16201 //
   16202 // Returns the created operation.
   16203 func StageClear(scope *Scope, dtypes []tf.DataType, optional ...StageClearAttr) (o *tf.Operation) {
   16204 	if scope.Err() != nil {
   16205 		return
   16206 	}
   16207 	attrs := map[string]interface{}{"dtypes": dtypes}
   16208 	for _, a := range optional {
   16209 		a(attrs)
   16210 	}
   16211 	opspec := tf.OpSpec{
   16212 		Type: "StageClear",
   16213 
   16214 		Attrs: attrs,
   16215 	}
   16216 	return scope.AddOperation(opspec)
   16217 }
   16218 
   16219 // ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits.
   16220 type ComputeAccidentalHitsAttr func(optionalAttr)
   16221 
   16222 // ComputeAccidentalHitsSeed sets the optional seed attribute to value.
   16223 //
   16224 // value: If either seed or seed2 are set to be non-zero, the random number
   16225 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   16226 // random seed.
   16227 // If not specified, defaults to 0
   16228 func ComputeAccidentalHitsSeed(value int64) ComputeAccidentalHitsAttr {
   16229 	return func(m optionalAttr) {
   16230 		m["seed"] = value
   16231 	}
   16232 }
   16233 
   16234 // ComputeAccidentalHitsSeed2 sets the optional seed2 attribute to value.
   16235 //
   16236 // value: An second seed to avoid seed collision.
   16237 // If not specified, defaults to 0
   16238 func ComputeAccidentalHitsSeed2(value int64) ComputeAccidentalHitsAttr {
   16239 	return func(m optionalAttr) {
   16240 		m["seed2"] = value
   16241 	}
   16242 }
   16243 
   16244 // Computes the ids of the positions in sampled_candidates that match true_labels.
   16245 //
   16246 // When doing log-odds NCE, the result of this op should be passed through a
   16247 // SparseToDense op, then added to the logits of the sampled candidates. This has
   16248 // the effect of 'removing' the sampled labels that match the true labels by
   16249 // making the classifier sure that they are sampled labels.
   16250 //
   16251 // Arguments:
   16252 //	true_classes: The true_classes output of UnpackSparseLabels.
   16253 //	sampled_candidates: The sampled_candidates output of CandidateSampler.
   16254 //	num_true: Number of true labels per context.
   16255 //
   16256 // Returns A vector of indices corresponding to rows of true_candidates.A vector of IDs of positions in sampled_candidates that match a true_label
   16257 // for the row with the corresponding index in indices.A vector of the same length as indices and ids, in which each element
   16258 // is -FLOAT_MAX.
   16259 func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, num_true int64, optional ...ComputeAccidentalHitsAttr) (indices tf.Output, ids tf.Output, weights tf.Output) {
   16260 	if scope.Err() != nil {
   16261 		return
   16262 	}
   16263 	attrs := map[string]interface{}{"num_true": num_true}
   16264 	for _, a := range optional {
   16265 		a(attrs)
   16266 	}
   16267 	opspec := tf.OpSpec{
   16268 		Type: "ComputeAccidentalHits",
   16269 		Input: []tf.Input{
   16270 			true_classes, sampled_candidates,
   16271 		},
   16272 		Attrs: attrs,
   16273 	}
   16274 	op := scope.AddOperation(opspec)
   16275 	return op.Output(0), op.Output(1), op.Output(2)
   16276 }
   16277 
   16278 // Computes sigmoid of `x` element-wise.
   16279 //
   16280 // Specifically, `y = 1 / (1 + exp(-x))`.
   16281 func Sigmoid(scope *Scope, x tf.Output) (y tf.Output) {
   16282 	if scope.Err() != nil {
   16283 		return
   16284 	}
   16285 	opspec := tf.OpSpec{
   16286 		Type: "Sigmoid",
   16287 		Input: []tf.Input{
   16288 			x,
   16289 		},
   16290 	}
   16291 	op := scope.AddOperation(opspec)
   16292 	return op.Output(0)
   16293 }
   16294 
   16295 // RandomStandardNormalAttr is an optional argument to RandomStandardNormal.
   16296 type RandomStandardNormalAttr func(optionalAttr)
   16297 
   16298 // RandomStandardNormalSeed sets the optional seed attribute to value.
   16299 //
   16300 // value: If either `seed` or `seed2` are set to be non-zero, the random number
   16301 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   16302 // random seed.
   16303 // If not specified, defaults to 0
   16304 func RandomStandardNormalSeed(value int64) RandomStandardNormalAttr {
   16305 	return func(m optionalAttr) {
   16306 		m["seed"] = value
   16307 	}
   16308 }
   16309 
   16310 // RandomStandardNormalSeed2 sets the optional seed2 attribute to value.
   16311 //
   16312 // value: A second seed to avoid seed collision.
   16313 // If not specified, defaults to 0
   16314 func RandomStandardNormalSeed2(value int64) RandomStandardNormalAttr {
   16315 	return func(m optionalAttr) {
   16316 		m["seed2"] = value
   16317 	}
   16318 }
   16319 
   16320 // Outputs random values from a normal distribution.
   16321 //
   16322 // The generated values will have mean 0 and standard deviation 1.
   16323 //
   16324 // Arguments:
   16325 //	shape: The shape of the output tensor.
   16326 //	dtype: The type of the output.
   16327 //
   16328 // Returns A tensor of the specified shape filled with random normal values.
   16329 func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomStandardNormalAttr) (output tf.Output) {
   16330 	if scope.Err() != nil {
   16331 		return
   16332 	}
   16333 	attrs := map[string]interface{}{"dtype": dtype}
   16334 	for _, a := range optional {
   16335 		a(attrs)
   16336 	}
   16337 	opspec := tf.OpSpec{
   16338 		Type: "RandomStandardNormal",
   16339 		Input: []tf.Input{
   16340 			shape,
   16341 		},
   16342 		Attrs: attrs,
   16343 	}
   16344 	op := scope.AddOperation(opspec)
   16345 	return op.Output(0)
   16346 }
   16347 
   16348 // FusedBatchNormAttr is an optional argument to FusedBatchNorm.
   16349 type FusedBatchNormAttr func(optionalAttr)
   16350 
   16351 // FusedBatchNormEpsilon sets the optional epsilon attribute to value.
   16352 //
   16353 // value: A small float number added to the variance of x.
   16354 // If not specified, defaults to 0.0001
   16355 func FusedBatchNormEpsilon(value float32) FusedBatchNormAttr {
   16356 	return func(m optionalAttr) {
   16357 		m["epsilon"] = value
   16358 	}
   16359 }
   16360 
   16361 // FusedBatchNormDataFormat sets the optional data_format attribute to value.
   16362 //
   16363 // value: The data format for x and y. Either "NHWC" (default) or "NCHW".
   16364 // If not specified, defaults to "NHWC"
   16365 func FusedBatchNormDataFormat(value string) FusedBatchNormAttr {
   16366 	return func(m optionalAttr) {
   16367 		m["data_format"] = value
   16368 	}
   16369 }
   16370 
   16371 // FusedBatchNormIsTraining sets the optional is_training attribute to value.
   16372 //
   16373 // value: A bool value to indicate the operation is for training (default)
   16374 // or inference.
   16375 // If not specified, defaults to true
   16376 func FusedBatchNormIsTraining(value bool) FusedBatchNormAttr {
   16377 	return func(m optionalAttr) {
   16378 		m["is_training"] = value
   16379 	}
   16380 }
   16381 
   16382 // Batch normalization.
   16383 //
   16384 // Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
   16385 // The size of 1D Tensors matches the dimension C of the 4D Tensors.
   16386 //
   16387 // Arguments:
   16388 //	x: A 4D Tensor for input data.
   16389 //	scale: A 1D Tensor for scaling factor, to scale the normalized x.
   16390 //	offset: A 1D Tensor for offset, to shift to the normalized x.
   16391 //	mean: A 1D Tensor for population mean. Used for inference only;
   16392 // must be empty for training.
   16393 //	variance: A 1D Tensor for population variance. Used for inference only;
   16394 // must be empty for training.
   16395 //
   16396 // Returns A 4D Tensor for output data.A 1D Tensor for the computed batch mean, to be used by TensorFlow
   16397 // to compute the running mean.A 1D Tensor for the computed batch variance, to be used by
   16398 // TensorFlow to compute the running variance.A 1D Tensor for the computed batch mean, to be reused
   16399 // in the gradient computation.A 1D Tensor for the computed batch variance (inverted variance
   16400 // in the cuDNN case), to be reused in the gradient computation.
   16401 func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormAttr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
   16402 	if scope.Err() != nil {
   16403 		return
   16404 	}
   16405 	attrs := map[string]interface{}{}
   16406 	for _, a := range optional {
   16407 		a(attrs)
   16408 	}
   16409 	opspec := tf.OpSpec{
   16410 		Type: "FusedBatchNorm",
   16411 		Input: []tf.Input{
   16412 			x, scale, offset, mean, variance,
   16413 		},
   16414 		Attrs: attrs,
   16415 	}
   16416 	op := scope.AddOperation(opspec)
   16417 	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
   16418 }
   16419 
   16420 // Computes tan of x element-wise.
   16421 func Tan(scope *Scope, x tf.Output) (y tf.Output) {
   16422 	if scope.Err() != nil {
   16423 		return
   16424 	}
   16425 	opspec := tf.OpSpec{
   16426 		Type: "Tan",
   16427 		Input: []tf.Input{
   16428 			x,
   16429 		},
   16430 	}
   16431 	op := scope.AddOperation(opspec)
   16432 	return op.Output(0)
   16433 }
   16434 
   16435 // FusedBatchNormV2Attr is an optional argument to FusedBatchNormV2.
   16436 type FusedBatchNormV2Attr func(optionalAttr)
   16437 
   16438 // FusedBatchNormV2Epsilon sets the optional epsilon attribute to value.
   16439 //
   16440 // value: A small float number added to the variance of x.
   16441 // If not specified, defaults to 0.0001
   16442 func FusedBatchNormV2Epsilon(value float32) FusedBatchNormV2Attr {
   16443 	return func(m optionalAttr) {
   16444 		m["epsilon"] = value
   16445 	}
   16446 }
   16447 
   16448 // FusedBatchNormV2DataFormat sets the optional data_format attribute to value.
   16449 //
   16450 // value: The data format for x and y. Either "NHWC" (default) or "NCHW".
   16451 // If not specified, defaults to "NHWC"
   16452 func FusedBatchNormV2DataFormat(value string) FusedBatchNormV2Attr {
   16453 	return func(m optionalAttr) {
   16454 		m["data_format"] = value
   16455 	}
   16456 }
   16457 
   16458 // FusedBatchNormV2IsTraining sets the optional is_training attribute to value.
   16459 //
   16460 // value: A bool value to indicate the operation is for training (default)
   16461 // or inference.
   16462 // If not specified, defaults to true
   16463 func FusedBatchNormV2IsTraining(value bool) FusedBatchNormV2Attr {
   16464 	return func(m optionalAttr) {
   16465 		m["is_training"] = value
   16466 	}
   16467 }
   16468 
   16469 // Batch normalization.
   16470 //
   16471 // Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
   16472 // The size of 1D Tensors matches the dimension C of the 4D Tensors.
   16473 //
   16474 // Arguments:
   16475 //	x: A 4D Tensor for input data.
   16476 //	scale: A 1D Tensor for scaling factor, to scale the normalized x.
   16477 //	offset: A 1D Tensor for offset, to shift to the normalized x.
   16478 //	mean: A 1D Tensor for population mean. Used for inference only;
   16479 // must be empty for training.
   16480 //	variance: A 1D Tensor for population variance. Used for inference only;
   16481 // must be empty for training.
   16482 //
   16483 // Returns A 4D Tensor for output data.A 1D Tensor for the computed batch mean, to be used by TensorFlow
   16484 // to compute the running mean.A 1D Tensor for the computed batch variance, to be used by
   16485 // TensorFlow to compute the running variance.A 1D Tensor for the computed batch mean, to be reused
   16486 // in the gradient computation.A 1D Tensor for the computed batch variance (inverted variance
   16487 // in the cuDNN case), to be reused in the gradient computation.
   16488 func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV2Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
   16489 	if scope.Err() != nil {
   16490 		return
   16491 	}
   16492 	attrs := map[string]interface{}{}
   16493 	for _, a := range optional {
   16494 		a(attrs)
   16495 	}
   16496 	opspec := tf.OpSpec{
   16497 		Type: "FusedBatchNormV2",
   16498 		Input: []tf.Input{
   16499 			x, scale, offset, mean, variance,
   16500 		},
   16501 		Attrs: attrs,
   16502 	}
   16503 	op := scope.AddOperation(opspec)
   16504 	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
   16505 }
   16506 
   16507 // MultinomialAttr is an optional argument to Multinomial.
   16508 type MultinomialAttr func(optionalAttr)
   16509 
   16510 // MultinomialSeed sets the optional seed attribute to value.
   16511 //
   16512 // value: If either seed or seed2 is set to be non-zero, the internal random number
   16513 // generator is seeded by the given seed.  Otherwise, a random seed is used.
   16514 // If not specified, defaults to 0
   16515 func MultinomialSeed(value int64) MultinomialAttr {
   16516 	return func(m optionalAttr) {
   16517 		m["seed"] = value
   16518 	}
   16519 }
   16520 
   16521 // MultinomialSeed2 sets the optional seed2 attribute to value.
   16522 //
   16523 // value: A second seed to avoid seed collision.
   16524 // If not specified, defaults to 0
   16525 func MultinomialSeed2(value int64) MultinomialAttr {
   16526 	return func(m optionalAttr) {
   16527 		m["seed2"] = value
   16528 	}
   16529 }
   16530 
   16531 // MultinomialOutputDtype sets the optional output_dtype attribute to value.
   16532 // If not specified, defaults to DT_INT64
   16533 func MultinomialOutputDtype(value tf.DataType) MultinomialAttr {
   16534 	return func(m optionalAttr) {
   16535 		m["output_dtype"] = value
   16536 	}
   16537 }
   16538 
   16539 // Draws samples from a multinomial distribution.
   16540 //
   16541 // Arguments:
   16542 //	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
   16543 // represents the unnormalized log probabilities for all classes.
   16544 //	num_samples: 0-D.  Number of independent samples to draw for each row slice.
   16545 //
   16546 // Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
   16547 // contains the drawn class labels with range `[0, num_classes)`.
   16548 func Multinomial(scope *Scope, logits tf.Output, num_samples tf.Output, optional ...MultinomialAttr) (output tf.Output) {
   16549 	if scope.Err() != nil {
   16550 		return
   16551 	}
   16552 	attrs := map[string]interface{}{}
   16553 	for _, a := range optional {
   16554 		a(attrs)
   16555 	}
   16556 	opspec := tf.OpSpec{
   16557 		Type: "Multinomial",
   16558 		Input: []tf.Input{
   16559 			logits, num_samples,
   16560 		},
   16561 		Attrs: attrs,
   16562 	}
   16563 	op := scope.AddOperation(opspec)
   16564 	return op.Output(0)
   16565 }
   16566 
   16567 // EncodeJpegAttr is an optional argument to EncodeJpeg.
   16568 type EncodeJpegAttr func(optionalAttr)
   16569 
   16570 // EncodeJpegFormat sets the optional format attribute to value.
   16571 //
   16572 // value: Per pixel image format.
   16573 // If not specified, defaults to ""
   16574 func EncodeJpegFormat(value string) EncodeJpegAttr {
   16575 	return func(m optionalAttr) {
   16576 		m["format"] = value
   16577 	}
   16578 }
   16579 
   16580 // EncodeJpegQuality sets the optional quality attribute to value.
   16581 //
   16582 // value: Quality of the compression from 0 to 100 (higher is better and slower).
   16583 // If not specified, defaults to 95
   16584 func EncodeJpegQuality(value int64) EncodeJpegAttr {
   16585 	return func(m optionalAttr) {
   16586 		m["quality"] = value
   16587 	}
   16588 }
   16589 
   16590 // EncodeJpegProgressive sets the optional progressive attribute to value.
   16591 //
   16592 // value: If True, create a JPEG that loads progressively (coarse to fine).
   16593 // If not specified, defaults to false
   16594 func EncodeJpegProgressive(value bool) EncodeJpegAttr {
   16595 	return func(m optionalAttr) {
   16596 		m["progressive"] = value
   16597 	}
   16598 }
   16599 
   16600 // EncodeJpegOptimizeSize sets the optional optimize_size attribute to value.
   16601 //
   16602 // value: If True, spend CPU/RAM to reduce size with no quality change.
   16603 // If not specified, defaults to false
   16604 func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr {
   16605 	return func(m optionalAttr) {
   16606 		m["optimize_size"] = value
   16607 	}
   16608 }
   16609 
   16610 // EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value.
   16611 //
   16612 // value: See http://en.wikipedia.org/wiki/Chroma_subsampling.
   16613 // If not specified, defaults to true
   16614 func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr {
   16615 	return func(m optionalAttr) {
   16616 		m["chroma_downsampling"] = value
   16617 	}
   16618 }
   16619 
   16620 // EncodeJpegDensityUnit sets the optional density_unit attribute to value.
   16621 //
   16622 // value: Unit used to specify `x_density` and `y_density`:
   16623 // pixels per inch (`'in'`) or centimeter (`'cm'`).
   16624 // If not specified, defaults to "in"
   16625 func EncodeJpegDensityUnit(value string) EncodeJpegAttr {
   16626 	return func(m optionalAttr) {
   16627 		m["density_unit"] = value
   16628 	}
   16629 }
   16630 
   16631 // EncodeJpegXDensity sets the optional x_density attribute to value.
   16632 //
   16633 // value: Horizontal pixels per density unit.
   16634 // If not specified, defaults to 300
   16635 func EncodeJpegXDensity(value int64) EncodeJpegAttr {
   16636 	return func(m optionalAttr) {
   16637 		m["x_density"] = value
   16638 	}
   16639 }
   16640 
   16641 // EncodeJpegYDensity sets the optional y_density attribute to value.
   16642 //
   16643 // value: Vertical pixels per density unit.
   16644 // If not specified, defaults to 300
   16645 func EncodeJpegYDensity(value int64) EncodeJpegAttr {
   16646 	return func(m optionalAttr) {
   16647 		m["y_density"] = value
   16648 	}
   16649 }
   16650 
   16651 // EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value.
   16652 //
   16653 // value: If not empty, embed this XMP metadata in the image header.
   16654 // If not specified, defaults to ""
   16655 func EncodeJpegXmpMetadata(value string) EncodeJpegAttr {
   16656 	return func(m optionalAttr) {
   16657 		m["xmp_metadata"] = value
   16658 	}
   16659 }
   16660 
   16661 // JPEG-encode an image.
   16662 //
   16663 // `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
   16664 //
   16665 // The attr `format` can be used to override the color format of the encoded
   16666 // output.  Values can be:
   16667 //
   16668 // *   `''`: Use a default format based on the number of channels in the image.
   16669 // *   `grayscale`: Output a grayscale JPEG image.  The `channels` dimension
   16670 //     of `image` must be 1.
   16671 // *   `rgb`: Output an RGB JPEG image. The `channels` dimension
   16672 //     of `image` must be 3.
   16673 //
   16674 // If `format` is not specified or is the empty string, a default format is picked
   16675 // in function of the number of channels in `image`:
   16676 //
   16677 // *   1: Output a grayscale image.
   16678 // *   3: Output an RGB image.
   16679 //
   16680 // Arguments:
   16681 //	image: 3-D with shape `[height, width, channels]`.
   16682 //
   16683 // Returns 0-D. JPEG-encoded image.
   16684 func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) {
   16685 	if scope.Err() != nil {
   16686 		return
   16687 	}
   16688 	attrs := map[string]interface{}{}
   16689 	for _, a := range optional {
   16690 		a(attrs)
   16691 	}
   16692 	opspec := tf.OpSpec{
   16693 		Type: "EncodeJpeg",
   16694 		Input: []tf.Input{
   16695 			image,
   16696 		},
   16697 		Attrs: attrs,
   16698 	}
   16699 	op := scope.AddOperation(opspec)
   16700 	return op.Output(0)
   16701 }
   16702 
   16703 // MaxPoolGradAttr is an optional argument to MaxPoolGrad.
   16704 type MaxPoolGradAttr func(optionalAttr)
   16705 
   16706 // MaxPoolGradDataFormat sets the optional data_format attribute to value.
   16707 //
   16708 // value: Specify the data format of the input and output data. With the
   16709 // default format "NHWC", the data is stored in the order of:
   16710 //     [batch, in_height, in_width, in_channels].
   16711 // Alternatively, the format could be "NCHW", the data storage order of:
   16712 //     [batch, in_channels, in_height, in_width].
   16713 // If not specified, defaults to "NHWC"
   16714 func MaxPoolGradDataFormat(value string) MaxPoolGradAttr {
   16715 	return func(m optionalAttr) {
   16716 		m["data_format"] = value
   16717 	}
   16718 }
   16719 
   16720 // Computes gradients of the maxpooling function.
   16721 //
   16722 // Arguments:
   16723 //	orig_input: The original input tensor.
   16724 //	orig_output: The original output tensor.
   16725 //	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
   16726 //	ksize: The size of the window for each dimension of the input tensor.
   16727 //	strides: The stride of the sliding window for each dimension of the
   16728 // input tensor.
   16729 //	padding: The type of padding algorithm to use.
   16730 //
   16731 // Returns Gradients w.r.t. the input to `max_pool`.
   16732 func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradAttr) (output tf.Output) {
   16733 	if scope.Err() != nil {
   16734 		return
   16735 	}
   16736 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   16737 	for _, a := range optional {
   16738 		a(attrs)
   16739 	}
   16740 	opspec := tf.OpSpec{
   16741 		Type: "MaxPoolGrad",
   16742 		Input: []tf.Input{
   16743 			orig_input, orig_output, grad,
   16744 		},
   16745 		Attrs: attrs,
   16746 	}
   16747 	op := scope.AddOperation(opspec)
   16748 	return op.Output(0)
   16749 }
   16750 
   16751 // CropAndResizeAttr is an optional argument to CropAndResize.
   16752 type CropAndResizeAttr func(optionalAttr)
   16753 
   16754 // CropAndResizeMethod sets the optional method attribute to value.
   16755 //
   16756 // value: A string specifying the interpolation method. Only 'bilinear' is
   16757 // supported for now.
   16758 // If not specified, defaults to "bilinear"
   16759 func CropAndResizeMethod(value string) CropAndResizeAttr {
   16760 	return func(m optionalAttr) {
   16761 		m["method"] = value
   16762 	}
   16763 }
   16764 
   16765 // CropAndResizeExtrapolationValue sets the optional extrapolation_value attribute to value.
   16766 //
   16767 // value: Value used for extrapolation, when applicable.
   16768 // If not specified, defaults to 0
   16769 func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr {
   16770 	return func(m optionalAttr) {
   16771 		m["extrapolation_value"] = value
   16772 	}
   16773 }
   16774 
   16775 // Extracts crops from the input image tensor and bilinearly resizes them (possibly
   16776 //
   16777 // with aspect ratio change) to a common output size specified by `crop_size`. This
   16778 // is more general than the `crop_to_bounding_box` op which extracts a fixed size
   16779 // slice from the input image and does not allow resizing or aspect ratio change.
   16780 //
   16781 // Returns a tensor with `crops` from the input `image` at positions defined at the
   16782 // bounding box locations in `boxes`. The cropped boxes are all resized (with
   16783 // bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
   16784 // result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The
   16785 // resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the
   16786 // method will give identical results to using `tf.image.resize_bilinear()`
   16787 // with `align_corners=True`.
   16788 //
   16789 // Arguments:
   16790 //	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
   16791 // Both `image_height` and `image_width` need to be positive.
   16792 //	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
   16793 // specifies the coordinates of a box in the `box_ind[i]` image and is specified
   16794 // in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
   16795 // `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
   16796 // `[0, 1]` interval of normalized image height is mapped to
   16797 // `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
   16798 // which case the sampled crop is an up-down flipped version of the original
   16799 // image. The width dimension is treated similarly. Normalized coordinates
   16800 // outside the `[0, 1]` range are allowed, in which case we use
   16801 // `extrapolation_value` to extrapolate the input image values.
   16802 //	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
   16803 // The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
   16804 //	crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
   16805 // cropped image patches are resized to this size. The aspect ratio of the image
   16806 // content is not preserved. Both `crop_height` and `crop_width` need to be
   16807 // positive.
   16808 //
   16809 // Returns A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
   16810 func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, crop_size tf.Output, optional ...CropAndResizeAttr) (crops tf.Output) {
   16811 	if scope.Err() != nil {
   16812 		return
   16813 	}
   16814 	attrs := map[string]interface{}{}
   16815 	for _, a := range optional {
   16816 		a(attrs)
   16817 	}
   16818 	opspec := tf.OpSpec{
   16819 		Type: "CropAndResize",
   16820 		Input: []tf.Input{
   16821 			image, boxes, box_ind, crop_size,
   16822 		},
   16823 		Attrs: attrs,
   16824 	}
   16825 	op := scope.AddOperation(opspec)
   16826 	return op.Output(0)
   16827 }
   16828 
   16829 // ResourceApplyPowerSignAttr is an optional argument to ResourceApplyPowerSign.
   16830 type ResourceApplyPowerSignAttr func(optionalAttr)
   16831 
   16832 // ResourceApplyPowerSignUseLocking sets the optional use_locking attribute to value.
   16833 //
   16834 // value: If `True`, updating of the var and m tensors is
   16835 // protected by a lock; otherwise the behavior is undefined, but may exhibit less
   16836 // contention.
   16837 // If not specified, defaults to false
   16838 func ResourceApplyPowerSignUseLocking(value bool) ResourceApplyPowerSignAttr {
   16839 	return func(m optionalAttr) {
   16840 		m["use_locking"] = value
   16841 	}
   16842 }
   16843 
   16844 // Update '*var' according to the AddSign update.
   16845 //
   16846 // m_t <- beta1 * m_{t-1} + (1 - beta1) * g
   16847 // update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
   16848 // variable <- variable - lr_t * update
   16849 //
   16850 // Arguments:
   16851 //	var_: Should be from a Variable().
   16852 //	m: Should be from a Variable().
   16853 //	lr: Scaling factor. Must be a scalar.
   16854 //	logbase: Must be a scalar.
   16855 //	sign_decay: Must be a scalar.
   16856 //	beta: Must be a scalar.
   16857 //	grad: The gradient.
   16858 //
   16859 // Returns the created operation.
   16860 func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyPowerSignAttr) (o *tf.Operation) {
   16861 	if scope.Err() != nil {
   16862 		return
   16863 	}
   16864 	attrs := map[string]interface{}{}
   16865 	for _, a := range optional {
   16866 		a(attrs)
   16867 	}
   16868 	opspec := tf.OpSpec{
   16869 		Type: "ResourceApplyPowerSign",
   16870 		Input: []tf.Input{
   16871 			var_, m, lr, logbase, sign_decay, beta, grad,
   16872 		},
   16873 		Attrs: attrs,
   16874 	}
   16875 	return scope.AddOperation(opspec)
   16876 }
   16877 
   16878 // Deprecated. Disallowed in GraphDef version >= 2.
   16879 //
   16880 // DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
   16881 func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) {
   16882 	if scope.Err() != nil {
   16883 		return
   16884 	}
   16885 	opspec := tf.OpSpec{
   16886 		Type: "AdjustContrast",
   16887 		Input: []tf.Input{
   16888 			images, contrast_factor, min_value, max_value,
   16889 		},
   16890 	}
   16891 	op := scope.AddOperation(opspec)
   16892 	return op.Output(0)
   16893 }
   16894 
   16895 // Table initializer that takes two tensors for keys and values respectively.
   16896 //
   16897 // Arguments:
   16898 //	table_handle: Handle to a table which will be initialized.
   16899 //	keys: Keys of type Tkey.
   16900 //	values: Values of type Tval.
   16901 //
   16902 // Returns the created operation.
   16903 func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
   16904 	if scope.Err() != nil {
   16905 		return
   16906 	}
   16907 	opspec := tf.OpSpec{
   16908 		Type: "InitializeTableV2",
   16909 		Input: []tf.Input{
   16910 			table_handle, keys, values,
   16911 		},
   16912 	}
   16913 	return scope.AddOperation(opspec)
   16914 }
   16915 
   16916 // PrintAttr is an optional argument to Print.
   16917 type PrintAttr func(optionalAttr)
   16918 
   16919 // PrintMessage sets the optional message attribute to value.
   16920 //
   16921 // value: A string, prefix of the error message.
   16922 // If not specified, defaults to ""
   16923 func PrintMessage(value string) PrintAttr {
   16924 	return func(m optionalAttr) {
   16925 		m["message"] = value
   16926 	}
   16927 }
   16928 
   16929 // PrintFirstN sets the optional first_n attribute to value.
   16930 //
   16931 // value: Only log `first_n` number of times. -1 disables logging.
   16932 // If not specified, defaults to -1
   16933 func PrintFirstN(value int64) PrintAttr {
   16934 	return func(m optionalAttr) {
   16935 		m["first_n"] = value
   16936 	}
   16937 }
   16938 
   16939 // PrintSummarize sets the optional summarize attribute to value.
   16940 //
   16941 // value: Only print this many entries of each tensor.
   16942 // If not specified, defaults to 3
   16943 func PrintSummarize(value int64) PrintAttr {
   16944 	return func(m optionalAttr) {
   16945 		m["summarize"] = value
   16946 	}
   16947 }
   16948 
   16949 // Prints a list of tensors.
   16950 //
   16951 // Passes `input` through to `output` and prints `data` when evaluating.
   16952 //
   16953 // Arguments:
   16954 //	input: The tensor passed to `output`
   16955 //	data: A list of tensors to print out when op is evaluated.
   16956 //
   16957 // Returns = The unmodified `input` tensor
   16958 func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output) {
   16959 	if scope.Err() != nil {
   16960 		return
   16961 	}
   16962 	attrs := map[string]interface{}{}
   16963 	for _, a := range optional {
   16964 		a(attrs)
   16965 	}
   16966 	opspec := tf.OpSpec{
   16967 		Type: "Print",
   16968 		Input: []tf.Input{
   16969 			input, tf.OutputList(data),
   16970 		},
   16971 		Attrs: attrs,
   16972 	}
   16973 	op := scope.AddOperation(opspec)
   16974 	return op.Output(0)
   16975 }
   16976 
   16977 // Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
   16978 //
   16979 // Arguments:
   16980 //	tag: A string attached to this summary. Used for organization in TensorBoard.
   16981 //	tensor: A tensor to serialize.
   16982 //	serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin
   16983 // data.
   16984 func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, serialized_summary_metadata tf.Output) (summary tf.Output) {
   16985 	if scope.Err() != nil {
   16986 		return
   16987 	}
   16988 	opspec := tf.OpSpec{
   16989 		Type: "TensorSummaryV2",
   16990 		Input: []tf.Input{
   16991 			tag, tensor, serialized_summary_metadata,
   16992 		},
   16993 	}
   16994 	op := scope.AddOperation(opspec)
   16995 	return op.Output(0)
   16996 }
   16997 
   16998 // Creates a dataset that asynchronously prefetches elements from `input_dataset`.
   16999 //
   17000 // Arguments:
   17001 //
   17002 //	buffer_size: The maximum number of elements to buffer in an iterator over
   17003 // this dataset.
   17004 //
   17005 //
   17006 func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   17007 	if scope.Err() != nil {
   17008 		return
   17009 	}
   17010 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   17011 	opspec := tf.OpSpec{
   17012 		Type: "PrefetchDataset",
   17013 		Input: []tf.Input{
   17014 			input_dataset, buffer_size,
   17015 		},
   17016 		Attrs: attrs,
   17017 	}
   17018 	op := scope.AddOperation(opspec)
   17019 	return op.Output(0)
   17020 }
   17021 
   17022 // TensorSummaryAttr is an optional argument to TensorSummary.
   17023 type TensorSummaryAttr func(optionalAttr)
   17024 
   17025 // TensorSummaryDescription sets the optional description attribute to value.
   17026 //
   17027 // value: A json-encoded SummaryDescription proto.
   17028 // If not specified, defaults to ""
   17029 func TensorSummaryDescription(value string) TensorSummaryAttr {
   17030 	return func(m optionalAttr) {
   17031 		m["description"] = value
   17032 	}
   17033 }
   17034 
   17035 // TensorSummaryLabels sets the optional labels attribute to value.
   17036 //
   17037 // value: An unused list of strings.
   17038 // If not specified, defaults to <>
   17039 func TensorSummaryLabels(value []string) TensorSummaryAttr {
   17040 	return func(m optionalAttr) {
   17041 		m["labels"] = value
   17042 	}
   17043 }
   17044 
   17045 // TensorSummaryDisplayName sets the optional display_name attribute to value.
   17046 //
   17047 // value: An unused string.
   17048 // If not specified, defaults to ""
   17049 func TensorSummaryDisplayName(value string) TensorSummaryAttr {
   17050 	return func(m optionalAttr) {
   17051 		m["display_name"] = value
   17052 	}
   17053 }
   17054 
   17055 // Outputs a `Summary` protocol buffer with a tensor.
   17056 //
   17057 // This op is being phased out in favor of TensorSummaryV2, which lets callers pass
   17058 // a tag as well as a serialized SummaryMetadata proto string that contains
   17059 // plugin-specific data. We will keep this op to maintain backwards compatibility.
   17060 //
   17061 // Arguments:
   17062 //	tensor: A tensor to serialize.
   17063 func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output) {
   17064 	if scope.Err() != nil {
   17065 		return
   17066 	}
   17067 	attrs := map[string]interface{}{}
   17068 	for _, a := range optional {
   17069 		a(attrs)
   17070 	}
   17071 	opspec := tf.OpSpec{
   17072 		Type: "TensorSummary",
   17073 		Input: []tf.Input{
   17074 			tensor,
   17075 		},
   17076 		Attrs: attrs,
   17077 	}
   17078 	op := scope.AddOperation(opspec)
   17079 	return op.Output(0)
   17080 }
   17081 
   17082 // Computes the gradient for the tanh of `x` wrt its input.
   17083 //
   17084 // Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
   17085 // is the corresponding input gradient.
   17086 func TanhGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
   17087 	if scope.Err() != nil {
   17088 		return
   17089 	}
   17090 	opspec := tf.OpSpec{
   17091 		Type: "TanhGrad",
   17092 		Input: []tf.Input{
   17093 			y, dy,
   17094 		},
   17095 	}
   17096 	op := scope.AddOperation(opspec)
   17097 	return op.Output(0)
   17098 }
   17099 
   17100 // Outputs a `Summary` protocol buffer with scalar values.
   17101 //
   17102 // The input `tags` and `values` must have the same shape.  The generated summary
   17103 // has a summary value for each tag-value pair in `tags` and `values`.
   17104 //
   17105 // Arguments:
   17106 //	tags: Tags for the summary.
   17107 //	values: Same shape as `tags.  Values for the summary.
   17108 //
   17109 // Returns Scalar.  Serialized `Summary` protocol buffer.
   17110 func ScalarSummary(scope *Scope, tags tf.Output, values tf.Output) (summary tf.Output) {
   17111 	if scope.Err() != nil {
   17112 		return
   17113 	}
   17114 	opspec := tf.OpSpec{
   17115 		Type: "ScalarSummary",
   17116 		Input: []tf.Input{
   17117 			tags, values,
   17118 		},
   17119 	}
   17120 	op := scope.AddOperation(opspec)
   17121 	return op.Output(0)
   17122 }
   17123 
   17124 // Outputs a `Summary` protocol buffer with a histogram.
   17125 //
   17126 // The generated
   17127 // [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
   17128 // has one summary value containing a histogram for `values`.
   17129 //
   17130 // This op reports an `InvalidArgument` error if any value is not finite.
   17131 //
   17132 // Arguments:
   17133 //	tag: Scalar.  Tag to use for the `Summary.Value`.
   17134 //	values: Any shape. Values to use to build the histogram.
   17135 //
   17136 // Returns Scalar. Serialized `Summary` protocol buffer.
   17137 func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {
   17138 	if scope.Err() != nil {
   17139 		return
   17140 	}
   17141 	opspec := tf.OpSpec{
   17142 		Type: "HistogramSummary",
   17143 		Input: []tf.Input{
   17144 			tag, values,
   17145 		},
   17146 	}
   17147 	op := scope.AddOperation(opspec)
   17148 	return op.Output(0)
   17149 }
   17150 
   17151 // Computes the number of elements in the given queue.
   17152 //
   17153 // Arguments:
   17154 //	handle: The handle to a queue.
   17155 //
   17156 // Returns The number of elements in the given queue.
   17157 func QueueSizeV2(scope *Scope, handle tf.Output) (size tf.Output) {
   17158 	if scope.Err() != nil {
   17159 		return
   17160 	}
   17161 	opspec := tf.OpSpec{
   17162 		Type: "QueueSizeV2",
   17163 		Input: []tf.Input{
   17164 			handle,
   17165 		},
   17166 	}
   17167 	op := scope.AddOperation(opspec)
   17168 	return op.Output(0)
   17169 }
   17170 
   17171 // ImageSummaryAttr is an optional argument to ImageSummary.
   17172 type ImageSummaryAttr func(optionalAttr)
   17173 
   17174 // ImageSummaryMaxImages sets the optional max_images attribute to value.
   17175 //
   17176 // value: Max number of batch elements to generate images for.
   17177 // If not specified, defaults to 3
   17178 //
   17179 // REQUIRES: value >= 1
   17180 func ImageSummaryMaxImages(value int64) ImageSummaryAttr {
   17181 	return func(m optionalAttr) {
   17182 		m["max_images"] = value
   17183 	}
   17184 }
   17185 
   17186 // ImageSummaryBadColor sets the optional bad_color attribute to value.
   17187 //
   17188 // value: Color to use for pixels with non-finite values.
   17189 // If not specified, defaults to <dtype:DT_UINT8 tensor_shape:<dim:<size:4 > > int_val:255 int_val:0 int_val:0 int_val:255 >
   17190 func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
   17191 	return func(m optionalAttr) {
   17192 		m["bad_color"] = value
   17193 	}
   17194 }
   17195 
   17196 // Outputs a `Summary` protocol buffer with images.
   17197 //
   17198 // The summary has up to `max_images` summary values containing images. The
   17199 // images are built from `tensor` which must be 4-D with shape `[batch_size,
   17200 // height, width, channels]` and where `channels` can be:
   17201 //
   17202 // *  1: `tensor` is interpreted as Grayscale.
   17203 // *  3: `tensor` is interpreted as RGB.
   17204 // *  4: `tensor` is interpreted as RGBA.
   17205 //
   17206 // The images have the same number of channels as the input tensor. For float
   17207 // input, the values are normalized one image at a time to fit in the range
   17208 // `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
   17209 // normalization algorithms:
   17210 //
   17211 // *  If the input values are all positive, they are rescaled so the largest one
   17212 //    is 255.
   17213 //
   17214 // *  If any input value is negative, the values are shifted so input value 0.0
   17215 //    is at 127.  They are then rescaled so that either the smallest value is 0,
   17216 //    or the largest one is 255.
   17217 //
   17218 // The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
   17219 // build the `tag` of the summary values:
   17220 //
   17221 // *  If `max_images` is 1, the summary value tag is '*tag*/image'.
   17222 // *  If `max_images` is greater than 1, the summary value tags are
   17223 //    generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
   17224 //
   17225 // The `bad_color` argument is the color to use in the generated images for
   17226 // non-finite input values.  It is a `unit8` 1-D tensor of length `channels`.
   17227 // Each element must be in the range `[0, 255]` (It represents the value of a
   17228 // pixel in the output image).  Non-finite values in the input tensor are
   17229 // replaced by this tensor in the output image.  The default value is the color
   17230 // red.
   17231 //
   17232 // Arguments:
   17233 //	tag: Scalar. Used to build the `tag` attribute of the summary values.
   17234 //	tensor: 4-D of shape `[batch_size, height, width, channels]` where
   17235 // `channels` is 1, 3, or 4.
   17236 //
   17237 // Returns Scalar. Serialized `Summary` protocol buffer.
   17238 func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output) {
   17239 	if scope.Err() != nil {
   17240 		return
   17241 	}
   17242 	attrs := map[string]interface{}{}
   17243 	for _, a := range optional {
   17244 		a(attrs)
   17245 	}
   17246 	opspec := tf.OpSpec{
   17247 		Type: "ImageSummary",
   17248 		Input: []tf.Input{
   17249 			tag, tensor,
   17250 		},
   17251 		Attrs: attrs,
   17252 	}
   17253 	op := scope.AddOperation(opspec)
   17254 	return op.Output(0)
   17255 }
   17256 
   17257 // AudioSummaryV2Attr is an optional argument to AudioSummaryV2.
   17258 type AudioSummaryV2Attr func(optionalAttr)
   17259 
   17260 // AudioSummaryV2MaxOutputs sets the optional max_outputs attribute to value.
   17261 //
   17262 // value: Max number of batch elements to generate audio for.
   17263 // If not specified, defaults to 3
   17264 //
   17265 // REQUIRES: value >= 1
   17266 func AudioSummaryV2MaxOutputs(value int64) AudioSummaryV2Attr {
   17267 	return func(m optionalAttr) {
   17268 		m["max_outputs"] = value
   17269 	}
   17270 }
   17271 
   17272 // Outputs a `Summary` protocol buffer with audio.
   17273 //
   17274 // The summary has up to `max_outputs` summary values containing audio. The
   17275 // audio is built from `tensor` which must be 3-D with shape `[batch_size,
   17276 // frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
   17277 // assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
   17278 //
   17279 // The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
   17280 // build the `tag` of the summary values:
   17281 //
   17282 // *  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
   17283 // *  If `max_outputs` is greater than 1, the summary value tags are
   17284 //    generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
   17285 //
   17286 // Arguments:
   17287 //	tag: Scalar. Used to build the `tag` attribute of the summary values.
   17288 //	tensor: 2-D of shape `[batch_size, frames]`.
   17289 //	sample_rate: The sample rate of the signal in hertz.
   17290 //
   17291 // Returns Scalar. Serialized `Summary` protocol buffer.
   17292 func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...AudioSummaryV2Attr) (summary tf.Output) {
   17293 	if scope.Err() != nil {
   17294 		return
   17295 	}
   17296 	attrs := map[string]interface{}{}
   17297 	for _, a := range optional {
   17298 		a(attrs)
   17299 	}
   17300 	opspec := tf.OpSpec{
   17301 		Type: "AudioSummaryV2",
   17302 		Input: []tf.Input{
   17303 			tag, tensor, sample_rate,
   17304 		},
   17305 		Attrs: attrs,
   17306 	}
   17307 	op := scope.AddOperation(opspec)
   17308 	return op.Output(0)
   17309 }
   17310 
   17311 // AvgPoolAttr is an optional argument to AvgPool.
   17312 type AvgPoolAttr func(optionalAttr)
   17313 
   17314 // AvgPoolDataFormat sets the optional data_format attribute to value.
   17315 //
   17316 // value: Specify the data format of the input and output data. With the
   17317 // default format "NHWC", the data is stored in the order of:
   17318 //     [batch, in_height, in_width, in_channels].
   17319 // Alternatively, the format could be "NCHW", the data storage order of:
   17320 //     [batch, in_channels, in_height, in_width].
   17321 // If not specified, defaults to "NHWC"
   17322 func AvgPoolDataFormat(value string) AvgPoolAttr {
   17323 	return func(m optionalAttr) {
   17324 		m["data_format"] = value
   17325 	}
   17326 }
   17327 
   17328 // Performs average pooling on the input.
   17329 //
   17330 // Each entry in `output` is the mean of the corresponding size `ksize`
   17331 // window in `value`.
   17332 //
   17333 // Arguments:
   17334 //	value: 4-D with shape `[batch, height, width, channels]`.
   17335 //	ksize: The size of the sliding window for each dimension of `value`.
   17336 //	strides: The stride of the sliding window for each dimension of `value`.
   17337 //	padding: The type of padding algorithm to use.
   17338 //
   17339 // Returns The average pooled output tensor.
   17340 func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolAttr) (output tf.Output) {
   17341 	if scope.Err() != nil {
   17342 		return
   17343 	}
   17344 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   17345 	for _, a := range optional {
   17346 		a(attrs)
   17347 	}
   17348 	opspec := tf.OpSpec{
   17349 		Type: "AvgPool",
   17350 		Input: []tf.Input{
   17351 			value,
   17352 		},
   17353 		Attrs: attrs,
   17354 	}
   17355 	op := scope.AddOperation(opspec)
   17356 	return op.Output(0)
   17357 }
   17358 
   17359 // Merges summaries.
   17360 //
   17361 // This op creates a
   17362 // [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
   17363 // protocol buffer that contains the union of all the values in the input
   17364 // summaries.
   17365 //
   17366 // When the Op is run, it reports an `InvalidArgument` error if multiple values
   17367 // in the summaries to merge use the same tag.
   17368 //
   17369 // Arguments:
   17370 //	inputs: Can be of any shape.  Each must contain serialized `Summary` protocol
   17371 // buffers.
   17372 //
   17373 // Returns Scalar. Serialized `Summary` protocol buffer.
   17374 func MergeSummary(scope *Scope, inputs []tf.Output) (summary tf.Output) {
   17375 	if scope.Err() != nil {
   17376 		return
   17377 	}
   17378 	opspec := tf.OpSpec{
   17379 		Type: "MergeSummary",
   17380 		Input: []tf.Input{
   17381 			tf.OutputList(inputs),
   17382 		},
   17383 	}
   17384 	op := scope.AddOperation(opspec)
   17385 	return op.Output(0)
   17386 }
   17387 
   17388 // Computes the gradient of morphological 2-D dilation with respect to the filter.
   17389 //
   17390 // Arguments:
   17391 //	input: 4-D with shape `[batch, in_height, in_width, depth]`.
   17392 //	filter: 3-D with shape `[filter_height, filter_width, depth]`.
   17393 //	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
   17394 //	strides: 1-D of length 4. The stride of the sliding window for each dimension of
   17395 // the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
   17396 //	rates: 1-D of length 4. The input stride for atrous morphological dilation.
   17397 // Must be: `[1, rate_height, rate_width, 1]`.
   17398 //	padding: The type of padding algorithm to use.
   17399 //
   17400 // Returns 3-D with shape `[filter_height, filter_width, depth]`.
   17401 func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (filter_backprop tf.Output) {
   17402 	if scope.Err() != nil {
   17403 		return
   17404 	}
   17405 	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
   17406 	opspec := tf.OpSpec{
   17407 		Type: "Dilation2DBackpropFilter",
   17408 		Input: []tf.Input{
   17409 			input, filter, out_backprop,
   17410 		},
   17411 		Attrs: attrs,
   17412 	}
   17413 	op := scope.AddOperation(opspec)
   17414 	return op.Output(0)
   17415 }
   17416 
   17417 // AddSparseToTensorsMapAttr is an optional argument to AddSparseToTensorsMap.
   17418 type AddSparseToTensorsMapAttr func(optionalAttr)
   17419 
   17420 // AddSparseToTensorsMapContainer sets the optional container attribute to value.
   17421 //
   17422 // value: The container name for the `SparseTensorsMap` created by this op.
   17423 // If not specified, defaults to ""
   17424 func AddSparseToTensorsMapContainer(value string) AddSparseToTensorsMapAttr {
   17425 	return func(m optionalAttr) {
   17426 		m["container"] = value
   17427 	}
   17428 }
   17429 
   17430 // AddSparseToTensorsMapSharedName sets the optional shared_name attribute to value.
   17431 //
   17432 // value: The shared name for the `SparseTensorsMap` created by this op.
   17433 // If blank, the new Operation's unique name is used.
   17434 // If not specified, defaults to ""
   17435 func AddSparseToTensorsMapSharedName(value string) AddSparseToTensorsMapAttr {
   17436 	return func(m optionalAttr) {
   17437 		m["shared_name"] = value
   17438 	}
   17439 }
   17440 
   17441 // Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
   17442 //
   17443 // A `SparseTensor` is represented by three tensors: `sparse_indices`,
   17444 // `sparse_values`, and `sparse_shape`.
   17445 //
   17446 // This operator takes the given `SparseTensor` and adds it to a container
   17447 // object (a `SparseTensorsMap`).  A unique key within this container is generated
   17448 // in the form of an `int64`, and this is the value that is returned.
   17449 //
   17450 // The `SparseTensor` can then be read out as part of a minibatch by passing
   17451 // the key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure
   17452 // the correct `SparseTensorsMap` is accessed, ensure that the same
   17453 // `container` and `shared_name` are passed to that Op.  If no `shared_name`
   17454 // is provided here, instead use the *name* of the Operation created by calling
   17455 // `AddSparseToTensorsMap` as the `shared_name` passed to
   17456 // `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
   17457 //
   17458 // Arguments:
   17459 //	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
   17460 //	sparse_values: 1-D.  The `values` of the `SparseTensor`.
   17461 //	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
   17462 //
   17463 // Returns 0-D.  The handle of the `SparseTensor` now stored in the
   17464 // `SparseTensorsMap`.
   17465 func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddSparseToTensorsMapAttr) (sparse_handle tf.Output) {
   17466 	if scope.Err() != nil {
   17467 		return
   17468 	}
   17469 	attrs := map[string]interface{}{}
   17470 	for _, a := range optional {
   17471 		a(attrs)
   17472 	}
   17473 	opspec := tf.OpSpec{
   17474 		Type: "AddSparseToTensorsMap",
   17475 		Input: []tf.Input{
   17476 			sparse_indices, sparse_values, sparse_shape,
   17477 		},
   17478 		Attrs: attrs,
   17479 	}
   17480 	op := scope.AddOperation(opspec)
   17481 	return op.Output(0)
   17482 }
   17483 
   17484 // Writes a `Summary` protocol buffer with scalar values.
   17485 //
   17486 // The input `tag` and `value` must have the scalars.
   17487 //
   17488 // Arguments:
   17489 //	writer: A handle to a summary writer.
   17490 //	step: The step to write the summary for.
   17491 //	tag: Tag for the summary.
   17492 //	value: Value for the summary.
   17493 //
   17494 // Returns the created operation.
   17495 func WriteScalarSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, value tf.Output) (o *tf.Operation) {
   17496 	if scope.Err() != nil {
   17497 		return
   17498 	}
   17499 	opspec := tf.OpSpec{
   17500 		Type: "WriteScalarSummary",
   17501 		Input: []tf.Input{
   17502 			writer, step, tag, value,
   17503 		},
   17504 	}
   17505 	return scope.AddOperation(opspec)
   17506 }
   17507 
   17508 // Computes the matrix exponential of one or more square matrices:
   17509 //
   17510 // exp(A) = \sum_{n=0}^\infty A^n/n!
   17511 //
   17512 // The exponential is computed using a combination of the scaling and squaring
   17513 // method and the Pade approximation. Details can be founds in:
   17514 // Nicholas J. Higham, "The scaling and squaring method for the matrix exponential
   17515 // revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
   17516 //
   17517 // The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
   17518 // form square matrices. The output is a tensor of the same shape as the input
   17519 // containing the exponential for all input submatrices `[..., :, :]`.
   17520 //
   17521 // Arguments:
   17522 //	input: Shape is `[..., M, M]`.
   17523 //
   17524 // Returns Shape is `[..., M, M]`.
   17525 //
   17526 // @compatibility(scipy)
   17527 // Equivalent to scipy.linalg.expm
   17528 // @end_compatibility
   17529 func MatrixExponential(scope *Scope, input tf.Output) (output tf.Output) {
   17530 	if scope.Err() != nil {
   17531 		return
   17532 	}
   17533 	opspec := tf.OpSpec{
   17534 		Type: "MatrixExponential",
   17535 		Input: []tf.Input{
   17536 			input,
   17537 		},
   17538 	}
   17539 	op := scope.AddOperation(opspec)
   17540 	return op.Output(0)
   17541 }
   17542 
   17543 // QueueDequeueUpToV2Attr is an optional argument to QueueDequeueUpToV2.
   17544 type QueueDequeueUpToV2Attr func(optionalAttr)
   17545 
   17546 // QueueDequeueUpToV2TimeoutMs sets the optional timeout_ms attribute to value.
   17547 //
   17548 // value: If the queue has fewer than n elements, this operation
   17549 // will block for up to timeout_ms milliseconds.
   17550 // Note: This option is not supported yet.
   17551 // If not specified, defaults to -1
   17552 func QueueDequeueUpToV2TimeoutMs(value int64) QueueDequeueUpToV2Attr {
   17553 	return func(m optionalAttr) {
   17554 		m["timeout_ms"] = value
   17555 	}
   17556 }
   17557 
   17558 // Dequeues `n` tuples of one or more tensors from the given queue.
   17559 //
   17560 // This operation is not supported by all queues.  If a queue does not support
   17561 // DequeueUpTo, then an Unimplemented error is returned.
   17562 //
   17563 // If the queue is closed and there are more than 0 but less than `n`
   17564 // elements remaining, then instead of returning an OutOfRange error like
   17565 // QueueDequeueMany, less than `n` elements are returned immediately.  If
   17566 // the queue is closed and there are 0 elements left in the queue, then
   17567 // an OutOfRange error is returned just like in QueueDequeueMany.
   17568 // Otherwise the behavior is identical to QueueDequeueMany:
   17569 //
   17570 // This operation concatenates queue-element component tensors along the
   17571 // 0th dimension to make a single component tensor.  All of the components
   17572 // in the dequeued tuple will have size n in the 0th dimension.
   17573 //
   17574 // This operation has `k` outputs, where `k` is the number of components in
   17575 // the tuples stored in the given queue, and output `i` is the ith
   17576 // component of the dequeued tuple.
   17577 //
   17578 // Arguments:
   17579 //	handle: The handle to a queue.
   17580 //	n: The number of tuples to dequeue.
   17581 //	component_types: The type of each component in a tuple.
   17582 //
   17583 // Returns One or more tensors that were dequeued as a tuple.
   17584 func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueUpToV2Attr) (components []tf.Output) {
   17585 	if scope.Err() != nil {
   17586 		return
   17587 	}
   17588 	attrs := map[string]interface{}{"component_types": component_types}
   17589 	for _, a := range optional {
   17590 		a(attrs)
   17591 	}
   17592 	opspec := tf.OpSpec{
   17593 		Type: "QueueDequeueUpToV2",
   17594 		Input: []tf.Input{
   17595 			handle, n,
   17596 		},
   17597 		Attrs: attrs,
   17598 	}
   17599 	op := scope.AddOperation(opspec)
   17600 	if scope.Err() != nil {
   17601 		return
   17602 	}
   17603 	var idx int
   17604 	var err error
   17605 	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
   17606 		scope.UpdateErr("QueueDequeueUpToV2", err)
   17607 		return
   17608 	}
   17609 	return components
   17610 }
   17611 
   17612 // Computes the Cholesky decomposition of one or more square matrices.
   17613 //
   17614 // The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
   17615 // form square matrices.
   17616 //
   17617 // The input has to be symmetric and positive definite. Only the lower-triangular
   17618 // part of the input will be used for this operation. The upper-triangular part
   17619 // will not be read.
   17620 //
   17621 // The output is a tensor of the same shape as the input
   17622 // containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
   17623 //
   17624 // **Note**: The gradient computation on GPU is faster for large matrices but
   17625 // not for large batch dimensions when the submatrices are small. In this
   17626 // case it might be faster to use the CPU.
   17627 //
   17628 // Arguments:
   17629 //	input: Shape is `[..., M, M]`.
   17630 //
   17631 // Returns Shape is `[..., M, M]`.
   17632 func Cholesky(scope *Scope, input tf.Output) (output tf.Output) {
   17633 	if scope.Err() != nil {
   17634 		return
   17635 	}
   17636 	opspec := tf.OpSpec{
   17637 		Type: "Cholesky",
   17638 		Input: []tf.Input{
   17639 			input,
   17640 		},
   17641 	}
   17642 	op := scope.AddOperation(opspec)
   17643 	return op.Output(0)
   17644 }
   17645 
   17646 // Writes contents to the file at input filename. Creates file and recursively
   17647 //
   17648 // creates directory if not existing.
   17649 //
   17650 // Arguments:
   17651 //	filename: scalar. The name of the file to which we write the contents.
   17652 //	contents: scalar. The content to be written to the output file.
   17653 //
   17654 // Returns the created operation.
   17655 func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) {
   17656 	if scope.Err() != nil {
   17657 		return
   17658 	}
   17659 	opspec := tf.OpSpec{
   17660 		Type: "WriteFile",
   17661 		Input: []tf.Input{
   17662 			filename, contents,
   17663 		},
   17664 	}
   17665 	return scope.AddOperation(opspec)
   17666 }
   17667 
   17668 // AllAttr is an optional argument to All.
   17669 type AllAttr func(optionalAttr)
   17670 
   17671 // AllKeepDims sets the optional keep_dims attribute to value.
   17672 //
   17673 // value: If true, retain reduced dimensions with length 1.
   17674 // If not specified, defaults to false
   17675 func AllKeepDims(value bool) AllAttr {
   17676 	return func(m optionalAttr) {
   17677 		m["keep_dims"] = value
   17678 	}
   17679 }
   17680 
   17681 // Computes the "logical and" of elements across dimensions of a tensor.
   17682 //
   17683 // Reduces `input` along the dimensions given in `axis`. Unless
   17684 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   17685 // `axis`. If `keep_dims` is true, the reduced dimensions are
   17686 // retained with length 1.
   17687 //
   17688 // Arguments:
   17689 //	input: The tensor to reduce.
   17690 //	axis: The dimensions to reduce. Must be in the range
   17691 // `[-rank(input), rank(input))`.
   17692 //
   17693 // Returns The reduced tensor.
   17694 func All(scope *Scope, input tf.Output, axis tf.Output, optional ...AllAttr) (output tf.Output) {
   17695 	if scope.Err() != nil {
   17696 		return
   17697 	}
   17698 	attrs := map[string]interface{}{}
   17699 	for _, a := range optional {
   17700 		a(attrs)
   17701 	}
   17702 	opspec := tf.OpSpec{
   17703 		Type: "All",
   17704 		Input: []tf.Input{
   17705 			input, axis,
   17706 		},
   17707 		Attrs: attrs,
   17708 	}
   17709 	op := scope.AddOperation(opspec)
   17710 	return op.Output(0)
   17711 }
   17712 
   17713 // Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
   17714 //
   17715 // DEPRECATED at GraphDef version 11: Use SelfAdjointEigV2 instead.
   17716 //
   17717 // The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
   17718 // form square matrices, with the same constraints as the single matrix
   17719 // SelfAdjointEig.
   17720 //
   17721 // The result is a [..., M+1, M] matrix with [..., 0,:] containing the
   17722 // eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
   17723 //
   17724 // Arguments:
   17725 //	input: Shape is `[..., M, M]`.
   17726 //
   17727 // Returns Shape is `[..., M+1, M]`.
   17728 func SelfAdjointEig(scope *Scope, input tf.Output) (output tf.Output) {
   17729 	if scope.Err() != nil {
   17730 		return
   17731 	}
   17732 	opspec := tf.OpSpec{
   17733 		Type: "SelfAdjointEig",
   17734 		Input: []tf.Input{
   17735 			input,
   17736 		},
   17737 	}
   17738 	op := scope.AddOperation(opspec)
   17739 	return op.Output(0)
   17740 }
   17741 
   17742 // Computes softplus gradients for a softplus operation.
   17743 //
   17744 // Arguments:
   17745 //	gradients: The backpropagated gradients to the corresponding softplus operation.
   17746 //	features: The features passed as input to the corresponding softplus operation.
   17747 //
   17748 // Returns The gradients: `gradients / (1 + exp(-features))`.
   17749 func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
   17750 	if scope.Err() != nil {
   17751 		return
   17752 	}
   17753 	opspec := tf.OpSpec{
   17754 		Type: "SoftplusGrad",
   17755 		Input: []tf.Input{
   17756 			gradients, features,
   17757 		},
   17758 	}
   17759 	op := scope.AddOperation(opspec)
   17760 	return op.Output(0)
   17761 }
   17762 
   17763 // Creates a dataset that contains the unique elements of `input_dataset`.
   17764 func UniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   17765 	if scope.Err() != nil {
   17766 		return
   17767 	}
   17768 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   17769 	opspec := tf.OpSpec{
   17770 		Type: "UniqueDataset",
   17771 		Input: []tf.Input{
   17772 			input_dataset,
   17773 		},
   17774 		Attrs: attrs,
   17775 	}
   17776 	op := scope.AddOperation(opspec)
   17777 	return op.Output(0)
   17778 }
   17779 
   17780 // SelfAdjointEigV2Attr is an optional argument to SelfAdjointEigV2.
   17781 type SelfAdjointEigV2Attr func(optionalAttr)
   17782 
   17783 // SelfAdjointEigV2ComputeV sets the optional compute_v attribute to value.
   17784 //
   17785 // value: If `True` then eigenvectors will be computed and returned in `v`.
   17786 // Otherwise, only the eigenvalues will be computed.
   17787 // If not specified, defaults to true
   17788 func SelfAdjointEigV2ComputeV(value bool) SelfAdjointEigV2Attr {
   17789 	return func(m optionalAttr) {
   17790 		m["compute_v"] = value
   17791 	}
   17792 }
   17793 
   17794 // Computes the eigen decomposition of one or more square self-adjoint matrices.
   17795 //
   17796 // Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
   17797 // `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
   17798 //
   17799 // ```python
   17800 // # a is a tensor.
   17801 // # e is a tensor of eigenvalues.
   17802 // # v is a tensor of eigenvectors.
   17803 // e, v = self_adjoint_eig(a)
   17804 // e = self_adjoint_eig(a, compute_v=False)
   17805 // ```
   17806 //
   17807 // Arguments:
   17808 //	input: `Tensor` input of shape `[N, N]`.
   17809 //
   17810 // Returns Eigenvalues. Shape is `[N]`.Eigenvectors. Shape is `[N, N]`.
   17811 func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output) {
   17812 	if scope.Err() != nil {
   17813 		return
   17814 	}
   17815 	attrs := map[string]interface{}{}
   17816 	for _, a := range optional {
   17817 		a(attrs)
   17818 	}
   17819 	opspec := tf.OpSpec{
   17820 		Type: "SelfAdjointEigV2",
   17821 		Input: []tf.Input{
   17822 			input,
   17823 		},
   17824 		Attrs: attrs,
   17825 	}
   17826 	op := scope.AddOperation(opspec)
   17827 	return op.Output(0), op.Output(1)
   17828 }
   17829 
   17830 // Adjust the saturation of one or more images.
   17831 //
   17832 // `images` is a tensor of at least 3 dimensions.  The last dimension is
   17833 // interpretted as channels, and must be three.
   17834 //
   17835 // The input image is considered in the RGB colorspace. Conceptually, the RGB
   17836 // colors are first mapped into HSV. A scale is then applied all the saturation
   17837 // values, and then remapped back to RGB colorspace.
   17838 //
   17839 // Arguments:
   17840 //	images: Images to adjust.  At least 3-D.
   17841 //	scale: A float scale to add to the saturation.
   17842 //
   17843 // Returns The hue-adjusted image or images.
   17844 func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output tf.Output) {
   17845 	if scope.Err() != nil {
   17846 		return
   17847 	}
   17848 	opspec := tf.OpSpec{
   17849 		Type: "AdjustSaturation",
   17850 		Input: []tf.Input{
   17851 			images, scale,
   17852 		},
   17853 	}
   17854 	op := scope.AddOperation(opspec)
   17855 	return op.Output(0)
   17856 }
   17857 
   17858 // Elementwise computes the bitwise OR of `x` and `y`.
   17859 //
   17860 // The result will have those bits set, that are set in `x`, `y` or both. The
   17861 // computation is performed on the underlying representations of `x` and `y`.
   17862 func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   17863 	if scope.Err() != nil {
   17864 		return
   17865 	}
   17866 	opspec := tf.OpSpec{
   17867 		Type: "BitwiseOr",
   17868 		Input: []tf.Input{
   17869 			x, y,
   17870 		},
   17871 	}
   17872 	op := scope.AddOperation(opspec)
   17873 	return op.Output(0)
   17874 }
   17875 
   17876 // MatrixSolveLsAttr is an optional argument to MatrixSolveLs.
   17877 type MatrixSolveLsAttr func(optionalAttr)
   17878 
   17879 // MatrixSolveLsFast sets the optional fast attribute to value.
   17880 // If not specified, defaults to true
   17881 func MatrixSolveLsFast(value bool) MatrixSolveLsAttr {
   17882 	return func(m optionalAttr) {
   17883 		m["fast"] = value
   17884 	}
   17885 }
   17886 
   17887 // Solves one or more linear least-squares problems.
   17888 //
   17889 // `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
   17890 // form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
   17891 // type as `matrix` and shape `[..., M, K]`.
   17892 // The output is a tensor shape `[..., N, K]` where each output matrix solves
   17893 // each of the equations
   17894 // `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
   17895 // in the least squares sense.
   17896 //
   17897 // We use the following notation for (complex) matrix and right-hand sides
   17898 // in the batch:
   17899 //
   17900 // `matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
   17901 // `rhs`=\\(B  \in \mathbb{C}^{m \times k}\\),
   17902 // `output`=\\(X  \in \mathbb{C}^{n \times k}\\),
   17903 // `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
   17904 //
   17905 // If `fast` is `True`, then the solution is computed by solving the normal
   17906 // equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
   17907 // \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
   17908 // problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 +
   17909 // \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
   17910 // \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
   17911 // minimum-norm solution to the under-determined linear system, i.e.
   17912 // \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
   17913 // subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
   17914 // when \\(A\\) is numerically full rank and has a condition number
   17915 // \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or\\(\lambda\\) is
   17916 // sufficiently large.
   17917 //
   17918 // If `fast` is `False` an algorithm based on the numerically robust complete
   17919 // orthogonal decomposition is used. This computes the minimum-norm
   17920 // least-squares solution, even when \\(A\\) is rank deficient. This path is
   17921 // typically 6-7 times slower than the fast path. If `fast` is `False` then
   17922 // `l2_regularizer` is ignored.
   17923 //
   17924 // Arguments:
   17925 //	matrix: Shape is `[..., M, N]`.
   17926 //	rhs: Shape is `[..., M, K]`.
   17927 //	l2_regularizer: Scalar tensor.
   17928 //
   17929 // @compatibility(numpy)
   17930 // Equivalent to np.linalg.lstsq
   17931 // @end_compatibility
   17932 //
   17933 // Returns Shape is `[..., N, K]`.
   17934 func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, optional ...MatrixSolveLsAttr) (output tf.Output) {
   17935 	if scope.Err() != nil {
   17936 		return
   17937 	}
   17938 	attrs := map[string]interface{}{}
   17939 	for _, a := range optional {
   17940 		a(attrs)
   17941 	}
   17942 	opspec := tf.OpSpec{
   17943 		Type: "MatrixSolveLs",
   17944 		Input: []tf.Input{
   17945 			matrix, rhs, l2_regularizer,
   17946 		},
   17947 		Attrs: attrs,
   17948 	}
   17949 	op := scope.AddOperation(opspec)
   17950 	return op.Output(0)
   17951 }
   17952 
   17953 // SvdAttr is an optional argument to Svd.
   17954 type SvdAttr func(optionalAttr)
   17955 
   17956 // SvdComputeUv sets the optional compute_uv attribute to value.
   17957 //
   17958 // value: If true, left and right singular vectors will be
   17959 // computed and returned in `u` and `v`, respectively.
   17960 // If false, `u` and `v` are not set and should never referenced.
   17961 // If not specified, defaults to true
   17962 func SvdComputeUv(value bool) SvdAttr {
   17963 	return func(m optionalAttr) {
   17964 		m["compute_uv"] = value
   17965 	}
   17966 }
   17967 
   17968 // SvdFullMatrices sets the optional full_matrices attribute to value.
   17969 //
   17970 // value: If true, compute full-sized `u` and `v`. If false
   17971 // (the default), compute only the leading `P` singular vectors.
   17972 // Ignored if `compute_uv` is `False`.
   17973 // If not specified, defaults to false
   17974 func SvdFullMatrices(value bool) SvdAttr {
   17975 	return func(m optionalAttr) {
   17976 		m["full_matrices"] = value
   17977 	}
   17978 }
   17979 
   17980 // Computes the singular value decompositions of one or more matrices.
   17981 //
   17982 // Computes the SVD of each inner matrix in `input` such that
   17983 // `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
   17984 //
   17985 // ```python
   17986 // # a is a tensor containing a batch of matrices.
   17987 // # s is a tensor of singular values for each matrix.
   17988 // # u is the tensor containing of left singular vectors for each matrix.
   17989 // # v is the tensor containing of right singular vectors for each matrix.
   17990 // s, u, v = svd(a)
   17991 // s, _, _ = svd(a, compute_uv=False)
   17992 // ```
   17993 //
   17994 // Arguments:
   17995 //	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
   17996 // form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
   17997 //
   17998 // Returns Singular values. Shape is `[..., P]`.Left singular vectors. If `full_matrices` is `False` then shape is
   17999 // `[..., M, P]`; if `full_matrices` is `True` then shape is
   18000 // `[..., M, M]`. Undefined if `compute_uv` is `False`.Left singular vectors. If `full_matrices` is `False` then shape is
   18001 // `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
   18002 // Undefined if `compute_uv` is false.
   18003 func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output) {
   18004 	if scope.Err() != nil {
   18005 		return
   18006 	}
   18007 	attrs := map[string]interface{}{}
   18008 	for _, a := range optional {
   18009 		a(attrs)
   18010 	}
   18011 	opspec := tf.OpSpec{
   18012 		Type: "Svd",
   18013 		Input: []tf.Input{
   18014 			input,
   18015 		},
   18016 		Attrs: attrs,
   18017 	}
   18018 	op := scope.AddOperation(opspec)
   18019 	return op.Output(0), op.Output(1), op.Output(2)
   18020 }
   18021 
   18022 // QueueEnqueueManyV2Attr is an optional argument to QueueEnqueueManyV2.
   18023 type QueueEnqueueManyV2Attr func(optionalAttr)
   18024 
   18025 // QueueEnqueueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
   18026 //
   18027 // value: If the queue is too full, this operation will block for up
   18028 // to timeout_ms milliseconds.
   18029 // Note: This option is not supported yet.
   18030 // If not specified, defaults to -1
   18031 func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr {
   18032 	return func(m optionalAttr) {
   18033 		m["timeout_ms"] = value
   18034 	}
   18035 }
   18036 
   18037 // Enqueues zero or more tuples of one or more tensors in the given queue.
   18038 //
   18039 // This operation slices each component tensor along the 0th dimension to
   18040 // make multiple queue elements. All of the tuple components must have the
   18041 // same size in the 0th dimension.
   18042 //
   18043 // The components input has k elements, which correspond to the components of
   18044 // tuples stored in the given queue.
   18045 //
   18046 // N.B. If the queue is full, this operation will block until the given
   18047 // elements have been enqueued (or 'timeout_ms' elapses, if specified).
   18048 //
   18049 // Arguments:
   18050 //	handle: The handle to a queue.
   18051 //	components: One or more tensors from which the enqueued tensors should
   18052 // be taken.
   18053 //
   18054 // Returns the created operation.
   18055 func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation) {
   18056 	if scope.Err() != nil {
   18057 		return
   18058 	}
   18059 	attrs := map[string]interface{}{}
   18060 	for _, a := range optional {
   18061 		a(attrs)
   18062 	}
   18063 	opspec := tf.OpSpec{
   18064 		Type: "QueueEnqueueManyV2",
   18065 		Input: []tf.Input{
   18066 			handle, tf.OutputList(components),
   18067 		},
   18068 		Attrs: attrs,
   18069 	}
   18070 	return scope.AddOperation(opspec)
   18071 }
   18072 
   18073 // Computes the product along segments of a tensor.
   18074 //
   18075 // Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
   18076 // segments.
   18077 //
   18078 // Computes a tensor such that
   18079 // \\(output_i = \prod_j data_j\\) where the product is over `j` such
   18080 // that `segment_ids[j] == i`.
   18081 //
   18082 // If the product is empty for a given segment ID `i`, `output[i] = 1`.
   18083 //
   18084 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   18085 // <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
   18086 // </div>
   18087 //
   18088 // Arguments:
   18089 //
   18090 //	segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
   18091 // first dimension.  Values should be sorted and can be repeated.
   18092 //
   18093 // Returns Has same shape as data, except for dimension 0 which
   18094 // has size `k`, the number of segments.
   18095 func SegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
   18096 	if scope.Err() != nil {
   18097 		return
   18098 	}
   18099 	opspec := tf.OpSpec{
   18100 		Type: "SegmentProd",
   18101 		Input: []tf.Input{
   18102 			data, segment_ids,
   18103 		},
   18104 	}
   18105 	op := scope.AddOperation(opspec)
   18106 	return op.Output(0)
   18107 }
   18108 
   18109 // Converts one or more images from RGB to HSV.
   18110 //
   18111 // Outputs a tensor of the same shape as the `images` tensor, containing the HSV
   18112 // value of the pixels. The output is only well defined if the value in `images`
   18113 // are in `[0,1]`.
   18114 //
   18115 // `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
   18116 // `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
   18117 // corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
   18118 //
   18119 // Arguments:
   18120 //	images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
   18121 //
   18122 // Returns `images` converted to HSV.
   18123 func RGBToHSV(scope *Scope, images tf.Output) (output tf.Output) {
   18124 	if scope.Err() != nil {
   18125 		return
   18126 	}
   18127 	opspec := tf.OpSpec{
   18128 		Type: "RGBToHSV",
   18129 		Input: []tf.Input{
   18130 			images,
   18131 		},
   18132 	}
   18133 	op := scope.AddOperation(opspec)
   18134 	return op.Output(0)
   18135 }
   18136 
   18137 // Does nothing. Only useful as a placeholder for control edges.
   18138 //
   18139 // Returns the created operation.
   18140 func NoOp(scope *Scope) (o *tf.Operation) {
   18141 	if scope.Err() != nil {
   18142 		return
   18143 	}
   18144 	opspec := tf.OpSpec{
   18145 		Type: "NoOp",
   18146 	}
   18147 	return scope.AddOperation(opspec)
   18148 }
   18149 
   18150 // MergeV2CheckpointsAttr is an optional argument to MergeV2Checkpoints.
   18151 type MergeV2CheckpointsAttr func(optionalAttr)
   18152 
   18153 // MergeV2CheckpointsDeleteOldDirs sets the optional delete_old_dirs attribute to value.
   18154 //
   18155 // value: see above.
   18156 // If not specified, defaults to true
   18157 func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr {
   18158 	return func(m optionalAttr) {
   18159 		m["delete_old_dirs"] = value
   18160 	}
   18161 }
   18162 
   18163 // V2 format specific: merges the metadata files of sharded checkpoints.  The
   18164 //
   18165 // result is one logical checkpoint, with one physical metadata file and renamed
   18166 // data files.
   18167 //
   18168 // Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
   18169 //
   18170 // If delete_old_dirs is true, attempts to delete recursively the dirname of each
   18171 // path in the input checkpoint_prefixes.  This is useful when those paths are non
   18172 // user-facing temporary locations.
   18173 //
   18174 // Arguments:
   18175 //	checkpoint_prefixes: prefixes of V2 checkpoints to merge.
   18176 //	destination_prefix: scalar.  The desired final prefix.  Allowed to be the same
   18177 // as one of the checkpoint_prefixes.
   18178 //
   18179 // Returns the created operation.
   18180 func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation) {
   18181 	if scope.Err() != nil {
   18182 		return
   18183 	}
   18184 	attrs := map[string]interface{}{}
   18185 	for _, a := range optional {
   18186 		a(attrs)
   18187 	}
   18188 	opspec := tf.OpSpec{
   18189 		Type: "MergeV2Checkpoints",
   18190 		Input: []tf.Input{
   18191 			checkpoint_prefixes, destination_prefix,
   18192 		},
   18193 		Attrs: attrs,
   18194 	}
   18195 	return scope.AddOperation(opspec)
   18196 }
   18197 
   18198 // Saves input tensors slices to disk.
   18199 //
   18200 // This is like `Save` except that tensors can be listed in the saved file as being
   18201 // a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
   18202 // larger tensor and the slice that this tensor covers. `shapes_and_slices` must
   18203 // have as many elements as `tensor_names`.
   18204 //
   18205 // Elements of the `shapes_and_slices` input must either be:
   18206 //
   18207 // *  The empty string, in which case the corresponding tensor is
   18208 //    saved normally.
   18209 // *  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
   18210 //    `dimI` are the dimensions of the larger tensor and `slice-spec`
   18211 //    specifies what part is covered by the tensor to save.
   18212 //
   18213 // `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
   18214 // where each `sliceI` is either:
   18215 //
   18216 // *  The string `-` meaning that the slice covers all indices of this dimension
   18217 // *  `start,length` where `start` and `length` are integers.  In that
   18218 //    case the slice covers `length` indices starting at `start`.
   18219 //
   18220 // See also `Save`.
   18221 //
   18222 // Arguments:
   18223 //	filename: Must have a single element. The name of the file to which we write the
   18224 // tensor.
   18225 //	tensor_names: Shape `[N]`. The names of the tensors to be saved.
   18226 //	shapes_and_slices: Shape `[N]`.  The shapes and slice specifications to use when
   18227 // saving the tensors.
   18228 //	data: `N` tensors to save.
   18229 //
   18230 // Returns the created operation.
   18231 func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation) {
   18232 	if scope.Err() != nil {
   18233 		return
   18234 	}
   18235 	opspec := tf.OpSpec{
   18236 		Type: "SaveSlices",
   18237 		Input: []tf.Input{
   18238 			filename, tensor_names, shapes_and_slices, tf.OutputList(data),
   18239 		},
   18240 	}
   18241 	return scope.AddOperation(opspec)
   18242 }
   18243 
   18244 // DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation.
   18245 type DenseToDenseSetOperationAttr func(optionalAttr)
   18246 
   18247 // DenseToDenseSetOperationValidateIndices sets the optional validate_indices attribute to value.
   18248 // If not specified, defaults to true
   18249 func DenseToDenseSetOperationValidateIndices(value bool) DenseToDenseSetOperationAttr {
   18250 	return func(m optionalAttr) {
   18251 		m["validate_indices"] = value
   18252 	}
   18253 }
   18254 
   18255 // Applies set operation along last dimension of 2 `Tensor` inputs.
   18256 //
   18257 // See SetOperationOp::SetOperationFromContext for values of `set_operation`.
   18258 //
   18259 // Output `result` is a `SparseTensor` represented by `result_indices`,
   18260 // `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
   18261 // has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
   18262 // dimension contains the result of `set_operation` applied to the corresponding
   18263 // `[0...n-1]` dimension of `set`.
   18264 //
   18265 // Arguments:
   18266 //	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
   18267 // Dimension `n` contains values in a set, duplicates are allowed but ignored.
   18268 //	set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
   18269 // Dimension `n` contains values in a set, duplicates are allowed but ignored.
   18270 //
   18271 //
   18272 // Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
   18273 // the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
   18274 // is the max result set size across all `0...n-1` dimensions.
   18275 func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, optional ...DenseToDenseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
   18276 	if scope.Err() != nil {
   18277 		return
   18278 	}
   18279 	attrs := map[string]interface{}{"set_operation": set_operation}
   18280 	for _, a := range optional {
   18281 		a(attrs)
   18282 	}
   18283 	opspec := tf.OpSpec{
   18284 		Type: "DenseToDenseSetOperation",
   18285 		Input: []tf.Input{
   18286 			set1, set2,
   18287 		},
   18288 		Attrs: attrs,
   18289 	}
   18290 	op := scope.AddOperation(opspec)
   18291 	return op.Output(0), op.Output(1), op.Output(2)
   18292 }
   18293 
   18294 // Generate a sharded filename. The filename is printf formatted as
   18295 //
   18296 //    %s-%05d-of-%05d, basename, shard, num_shards.
   18297 func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output) {
   18298 	if scope.Err() != nil {
   18299 		return
   18300 	}
   18301 	opspec := tf.OpSpec{
   18302 		Type: "ShardedFilename",
   18303 		Input: []tf.Input{
   18304 			basename, shard, num_shards,
   18305 		},
   18306 	}
   18307 	op := scope.AddOperation(opspec)
   18308 	return op.Output(0)
   18309 }
   18310 
   18311 // Generate a glob pattern matching all sharded file names.
   18312 func ShardedFilespec(scope *Scope, basename tf.Output, num_shards tf.Output) (filename tf.Output) {
   18313 	if scope.Err() != nil {
   18314 		return
   18315 	}
   18316 	opspec := tf.OpSpec{
   18317 		Type: "ShardedFilespec",
   18318 		Input: []tf.Input{
   18319 			basename, num_shards,
   18320 		},
   18321 	}
   18322 	op := scope.AddOperation(opspec)
   18323 	return op.Output(0)
   18324 }
   18325 
   18326 // TextLineReaderV2Attr is an optional argument to TextLineReaderV2.
   18327 type TextLineReaderV2Attr func(optionalAttr)
   18328 
   18329 // TextLineReaderV2SkipHeaderLines sets the optional skip_header_lines attribute to value.
   18330 //
   18331 // value: Number of lines to skip from the beginning of every file.
   18332 // If not specified, defaults to 0
   18333 func TextLineReaderV2SkipHeaderLines(value int64) TextLineReaderV2Attr {
   18334 	return func(m optionalAttr) {
   18335 		m["skip_header_lines"] = value
   18336 	}
   18337 }
   18338 
   18339 // TextLineReaderV2Container sets the optional container attribute to value.
   18340 //
   18341 // value: If non-empty, this reader is placed in the given container.
   18342 // Otherwise, a default container is used.
   18343 // If not specified, defaults to ""
   18344 func TextLineReaderV2Container(value string) TextLineReaderV2Attr {
   18345 	return func(m optionalAttr) {
   18346 		m["container"] = value
   18347 	}
   18348 }
   18349 
   18350 // TextLineReaderV2SharedName sets the optional shared_name attribute to value.
   18351 //
   18352 // value: If non-empty, this reader is named in the given bucket
   18353 // with this shared_name. Otherwise, the node name is used instead.
   18354 // If not specified, defaults to ""
   18355 func TextLineReaderV2SharedName(value string) TextLineReaderV2Attr {
   18356 	return func(m optionalAttr) {
   18357 		m["shared_name"] = value
   18358 	}
   18359 }
   18360 
   18361 // A Reader that outputs the lines of a file delimited by '\n'.
   18362 //
   18363 // Returns The handle to reference the Reader.
   18364 func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output) {
   18365 	if scope.Err() != nil {
   18366 		return
   18367 	}
   18368 	attrs := map[string]interface{}{}
   18369 	for _, a := range optional {
   18370 		a(attrs)
   18371 	}
   18372 	opspec := tf.OpSpec{
   18373 		Type: "TextLineReaderV2",
   18374 
   18375 		Attrs: attrs,
   18376 	}
   18377 	op := scope.AddOperation(opspec)
   18378 	return op.Output(0)
   18379 }
   18380 
   18381 // LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix.
   18382 type LoadAndRemapMatrixAttr func(optionalAttr)
   18383 
   18384 // LoadAndRemapMatrixMaxRowsInMemory sets the optional max_rows_in_memory attribute to value.
   18385 //
   18386 // value: The maximum number of rows to load from the checkpoint at
   18387 // once. If less than or equal to 0, the entire matrix will be loaded into
   18388 // memory. Setting this arg trades increased disk reads for lower memory usage.
   18389 // If not specified, defaults to -1
   18390 func LoadAndRemapMatrixMaxRowsInMemory(value int64) LoadAndRemapMatrixAttr {
   18391 	return func(m optionalAttr) {
   18392 		m["max_rows_in_memory"] = value
   18393 	}
   18394 }
   18395 
   18396 // Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
   18397 //
   18398 // at `ckpt_path` and potentially reorders its rows and columns using the
   18399 // specified remappings.
   18400 //
   18401 // Most users should use one of the wrapper initializers (such as
   18402 // `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
   18403 // function directly.
   18404 //
   18405 // The remappings are 1-D tensors with the following properties:
   18406 //
   18407 // * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
   18408 //   matrix will be initialized from the row corresponding to index
   18409 //   `row_remapping[i]` in the old `Tensor` from the checkpoint.
   18410 // * `col_remapping` must have either 0 entries (indicating that no column
   18411 //   reordering is needed) or `num_cols` entries. If specified, column `j` of the
   18412 //   output matrix will be initialized from the column corresponding to index
   18413 //   `col_remapping[j]` in the old `Tensor` from the checkpoint.
   18414 // * A value of -1 in either of the remappings signifies a "missing" entry. In that
   18415 //   case, values from the `initializing_values` tensor will be used to fill that
   18416 //   missing row or column. If `row_remapping` has `r` missing entries and
   18417 //   `col_remapping` has `c` missing entries, then the following condition must be
   18418 //   true:
   18419 //
   18420 // `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
   18421 //
   18422 // The remapping tensors can be generated using the GenerateVocabRemapping op.
   18423 //
   18424 // As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
   18425 // initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
   18426 // the value from row i, column j of the old tensor in the checkpoint, the output
   18427 // matrix will look like the following:
   18428 //
   18429 // [[w(1, 0),  w(1, 2),  0.5],
   18430 //  [w(0, 0),  w(0, 2), -0.5],
   18431 //  [0.25,    -0.25,      42]]
   18432 //
   18433 // Arguments:
   18434 //	ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
   18435 // which the old matrix `Tensor` will be loaded.
   18436 //	old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
   18437 //	row_remapping: An int `Tensor` of row remappings (generally created by
   18438 // `generate_vocab_remapping`).  Even if no row remapping is needed, this must
   18439 // still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
   18440 // index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
   18441 //	col_remapping: An int `Tensor` of column remappings (generally created by
   18442 // `generate_vocab_remapping`).  May be a size-0 `Tensor` if only row remapping
   18443 // is to be done (e.g. column ordering is the same).
   18444 //	initializing_values: A float `Tensor` containing  values to fill in for cells
   18445 // in the output matrix that are not loaded from the checkpoint. Length must be
   18446 // exactly the same as the number of missing / new cells.
   18447 //	num_rows: Number of rows (length of the 1st dimension) in the output matrix.
   18448 //	num_cols: Number of columns (length of the 2nd dimension) in the output matrix.
   18449 //
   18450 // Returns Output matrix containing existing values loaded from the
   18451 // checkpoint, and with any missing values filled in from initializing_values.
   18452 func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, row_remapping tf.Output, col_remapping tf.Output, initializing_values tf.Output, num_rows int64, num_cols int64, optional ...LoadAndRemapMatrixAttr) (output_matrix tf.Output) {
   18453 	if scope.Err() != nil {
   18454 		return
   18455 	}
   18456 	attrs := map[string]interface{}{"num_rows": num_rows, "num_cols": num_cols}
   18457 	for _, a := range optional {
   18458 		a(attrs)
   18459 	}
   18460 	opspec := tf.OpSpec{
   18461 		Type: "LoadAndRemapMatrix",
   18462 		Input: []tf.Input{
   18463 			ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values,
   18464 		},
   18465 		Attrs: attrs,
   18466 	}
   18467 	op := scope.AddOperation(opspec)
   18468 	return op.Output(0)
   18469 }
   18470 
   18471 // TFRecordReaderV2Attr is an optional argument to TFRecordReaderV2.
   18472 type TFRecordReaderV2Attr func(optionalAttr)
   18473 
   18474 // TFRecordReaderV2Container sets the optional container attribute to value.
   18475 //
   18476 // value: If non-empty, this reader is placed in the given container.
   18477 // Otherwise, a default container is used.
   18478 // If not specified, defaults to ""
   18479 func TFRecordReaderV2Container(value string) TFRecordReaderV2Attr {
   18480 	return func(m optionalAttr) {
   18481 		m["container"] = value
   18482 	}
   18483 }
   18484 
   18485 // TFRecordReaderV2SharedName sets the optional shared_name attribute to value.
   18486 //
   18487 // value: If non-empty, this reader is named in the given bucket
   18488 // with this shared_name. Otherwise, the node name is used instead.
   18489 // If not specified, defaults to ""
   18490 func TFRecordReaderV2SharedName(value string) TFRecordReaderV2Attr {
   18491 	return func(m optionalAttr) {
   18492 		m["shared_name"] = value
   18493 	}
   18494 }
   18495 
   18496 // TFRecordReaderV2CompressionType sets the optional compression_type attribute to value.
   18497 // If not specified, defaults to ""
   18498 func TFRecordReaderV2CompressionType(value string) TFRecordReaderV2Attr {
   18499 	return func(m optionalAttr) {
   18500 		m["compression_type"] = value
   18501 	}
   18502 }
   18503 
   18504 // A Reader that outputs the records from a TensorFlow Records file.
   18505 //
   18506 // Returns The handle to reference the Reader.
   18507 func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output) {
   18508 	if scope.Err() != nil {
   18509 		return
   18510 	}
   18511 	attrs := map[string]interface{}{}
   18512 	for _, a := range optional {
   18513 		a(attrs)
   18514 	}
   18515 	opspec := tf.OpSpec{
   18516 		Type: "TFRecordReaderV2",
   18517 
   18518 		Attrs: attrs,
   18519 	}
   18520 	op := scope.AddOperation(opspec)
   18521 	return op.Output(0)
   18522 }
   18523 
   18524 // QuantizeAndDequantizeV3Attr is an optional argument to QuantizeAndDequantizeV3.
   18525 type QuantizeAndDequantizeV3Attr func(optionalAttr)
   18526 
   18527 // QuantizeAndDequantizeV3SignedInput sets the optional signed_input attribute to value.
   18528 // If not specified, defaults to true
   18529 func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr {
   18530 	return func(m optionalAttr) {
   18531 		m["signed_input"] = value
   18532 	}
   18533 }
   18534 
   18535 // QuantizeAndDequantizeV3RangeGiven sets the optional range_given attribute to value.
   18536 // If not specified, defaults to true
   18537 func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr {
   18538 	return func(m optionalAttr) {
   18539 		m["range_given"] = value
   18540 	}
   18541 }
   18542 
   18543 // Quantizes then dequantizes a tensor.
   18544 //
   18545 // This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
   18546 // tensor, so its value can change during training.
   18547 func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, num_bits tf.Output, optional ...QuantizeAndDequantizeV3Attr) (output tf.Output) {
   18548 	if scope.Err() != nil {
   18549 		return
   18550 	}
   18551 	attrs := map[string]interface{}{}
   18552 	for _, a := range optional {
   18553 		a(attrs)
   18554 	}
   18555 	opspec := tf.OpSpec{
   18556 		Type: "QuantizeAndDequantizeV3",
   18557 		Input: []tf.Input{
   18558 			input, input_min, input_max, num_bits,
   18559 		},
   18560 		Attrs: attrs,
   18561 	}
   18562 	op := scope.AddOperation(opspec)
   18563 	return op.Output(0)
   18564 }
   18565 
   18566 // IdentityReaderV2Attr is an optional argument to IdentityReaderV2.
   18567 type IdentityReaderV2Attr func(optionalAttr)
   18568 
   18569 // IdentityReaderV2Container sets the optional container attribute to value.
   18570 //
   18571 // value: If non-empty, this reader is placed in the given container.
   18572 // Otherwise, a default container is used.
   18573 // If not specified, defaults to ""
   18574 func IdentityReaderV2Container(value string) IdentityReaderV2Attr {
   18575 	return func(m optionalAttr) {
   18576 		m["container"] = value
   18577 	}
   18578 }
   18579 
   18580 // IdentityReaderV2SharedName sets the optional shared_name attribute to value.
   18581 //
   18582 // value: If non-empty, this reader is named in the given bucket
   18583 // with this shared_name. Otherwise, the node name is used instead.
   18584 // If not specified, defaults to ""
   18585 func IdentityReaderV2SharedName(value string) IdentityReaderV2Attr {
   18586 	return func(m optionalAttr) {
   18587 		m["shared_name"] = value
   18588 	}
   18589 }
   18590 
   18591 // A Reader that outputs the queued work as both the key and value.
   18592 //
   18593 // To use, enqueue strings in a Queue.  ReaderRead will take the front
   18594 // work string and output (work, work).
   18595 //
   18596 // Returns The handle to reference the Reader.
   18597 func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output) {
   18598 	if scope.Err() != nil {
   18599 		return
   18600 	}
   18601 	attrs := map[string]interface{}{}
   18602 	for _, a := range optional {
   18603 		a(attrs)
   18604 	}
   18605 	opspec := tf.OpSpec{
   18606 		Type: "IdentityReaderV2",
   18607 
   18608 		Attrs: attrs,
   18609 	}
   18610 	op := scope.AddOperation(opspec)
   18611 	return op.Output(0)
   18612 }
   18613 
   18614 // ResourceApplyGradientDescentAttr is an optional argument to ResourceApplyGradientDescent.
   18615 type ResourceApplyGradientDescentAttr func(optionalAttr)
   18616 
   18617 // ResourceApplyGradientDescentUseLocking sets the optional use_locking attribute to value.
   18618 //
   18619 // value: If `True`, the subtraction will be protected by a lock;
   18620 // otherwise the behavior is undefined, but may exhibit less contention.
   18621 // If not specified, defaults to false
   18622 func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDescentAttr {
   18623 	return func(m optionalAttr) {
   18624 		m["use_locking"] = value
   18625 	}
   18626 }
   18627 
   18628 // Update '*var' by subtracting 'alpha' * 'delta' from it.
   18629 //
   18630 // Arguments:
   18631 //	var_: Should be from a Variable().
   18632 //	alpha: Scaling factor. Must be a scalar.
   18633 //	delta: The change.
   18634 //
   18635 // Returns the created operation.
   18636 func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation) {
   18637 	if scope.Err() != nil {
   18638 		return
   18639 	}
   18640 	attrs := map[string]interface{}{}
   18641 	for _, a := range optional {
   18642 		a(attrs)
   18643 	}
   18644 	opspec := tf.OpSpec{
   18645 		Type: "ResourceApplyGradientDescent",
   18646 		Input: []tf.Input{
   18647 			var_, alpha, delta,
   18648 		},
   18649 		Attrs: attrs,
   18650 	}
   18651 	return scope.AddOperation(opspec)
   18652 }
   18653 
   18654 // Returns the next record (key, value pair) produced by a Reader.
   18655 //
   18656 // Will dequeue from the input queue if necessary (e.g. when the
   18657 // Reader needs to start reading from a new file since it has finished
   18658 // with the previous file).
   18659 //
   18660 // Arguments:
   18661 //	reader_handle: Handle to a Reader.
   18662 //	queue_handle: Handle to a Queue, with string work items.
   18663 //
   18664 // Returns A scalar.A scalar.
   18665 func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output) {
   18666 	if scope.Err() != nil {
   18667 		return
   18668 	}
   18669 	opspec := tf.OpSpec{
   18670 		Type: "ReaderReadV2",
   18671 		Input: []tf.Input{
   18672 			reader_handle, queue_handle,
   18673 		},
   18674 	}
   18675 	op := scope.AddOperation(opspec)
   18676 	return op.Output(0), op.Output(1)
   18677 }
   18678 
   18679 // Returns up to `num_records` (key, value) pairs produced by a Reader.
   18680 //
   18681 // Will dequeue from the input queue if necessary (e.g. when the
   18682 // Reader needs to start reading from a new file since it has finished
   18683 // with the previous file).
   18684 // It may return less than `num_records` even before the last batch.
   18685 //
   18686 // Arguments:
   18687 //	reader_handle: Handle to a `Reader`.
   18688 //	queue_handle: Handle to a `Queue`, with string work items.
   18689 //	num_records: number of records to read from `Reader`.
   18690 //
   18691 // Returns A 1-D tensor.A 1-D tensor.
   18692 func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output) {
   18693 	if scope.Err() != nil {
   18694 		return
   18695 	}
   18696 	opspec := tf.OpSpec{
   18697 		Type: "ReaderReadUpToV2",
   18698 		Input: []tf.Input{
   18699 			reader_handle, queue_handle, num_records,
   18700 		},
   18701 	}
   18702 	op := scope.AddOperation(opspec)
   18703 	return op.Output(0), op.Output(1)
   18704 }
   18705 
   18706 // Restore a Reader to its initial clean state.
   18707 //
   18708 // Arguments:
   18709 //	reader_handle: Handle to a Reader.
   18710 //
   18711 // Returns the created operation.
   18712 func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) {
   18713 	if scope.Err() != nil {
   18714 		return
   18715 	}
   18716 	opspec := tf.OpSpec{
   18717 		Type: "ReaderResetV2",
   18718 		Input: []tf.Input{
   18719 			reader_handle,
   18720 		},
   18721 	}
   18722 	return scope.AddOperation(opspec)
   18723 }
   18724 
   18725 // ResourceApplyAdamAttr is an optional argument to ResourceApplyAdam.
   18726 type ResourceApplyAdamAttr func(optionalAttr)
   18727 
   18728 // ResourceApplyAdamUseLocking sets the optional use_locking attribute to value.
   18729 //
   18730 // value: If `True`, updating of the var, m, and v tensors will be protected
   18731 // by a lock; otherwise the behavior is undefined, but may exhibit less
   18732 // contention.
   18733 // If not specified, defaults to false
   18734 func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr {
   18735 	return func(m optionalAttr) {
   18736 		m["use_locking"] = value
   18737 	}
   18738 }
   18739 
   18740 // ResourceApplyAdamUseNesterov sets the optional use_nesterov attribute to value.
   18741 //
   18742 // value: If `True`, uses the nesterov update.
   18743 // If not specified, defaults to false
   18744 func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr {
   18745 	return func(m optionalAttr) {
   18746 		m["use_nesterov"] = value
   18747 	}
   18748 }
   18749 
   18750 // Update '*var' according to the Adam algorithm.
   18751 //
   18752 // lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
   18753 // m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
   18754 // v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
   18755 // variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
   18756 //
   18757 // Arguments:
   18758 //	var_: Should be from a Variable().
   18759 //	m: Should be from a Variable().
   18760 //	v: Should be from a Variable().
   18761 //	beta1_power: Must be a scalar.
   18762 //	beta2_power: Must be a scalar.
   18763 //	lr: Scaling factor. Must be a scalar.
   18764 //	beta1: Momentum factor. Must be a scalar.
   18765 //	beta2: Momentum factor. Must be a scalar.
   18766 //	epsilon: Ridge term. Must be a scalar.
   18767 //	grad: The gradient.
   18768 //
   18769 // Returns the created operation.
   18770 func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation) {
   18771 	if scope.Err() != nil {
   18772 		return
   18773 	}
   18774 	attrs := map[string]interface{}{}
   18775 	for _, a := range optional {
   18776 		a(attrs)
   18777 	}
   18778 	opspec := tf.OpSpec{
   18779 		Type: "ResourceApplyAdam",
   18780 		Input: []tf.Input{
   18781 			var_, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
   18782 		},
   18783 		Attrs: attrs,
   18784 	}
   18785 	return scope.AddOperation(opspec)
   18786 }
   18787 
   18788 // Store the input tensor in the state of the current session.
   18789 //
   18790 // Arguments:
   18791 //	value: The tensor to be stored.
   18792 //
   18793 // Returns The handle for the tensor stored in the session state, represented
   18794 // as a ResourceHandle object.
   18795 func GetSessionHandleV2(scope *Scope, value tf.Output) (handle tf.Output) {
   18796 	if scope.Err() != nil {
   18797 		return
   18798 	}
   18799 	opspec := tf.OpSpec{
   18800 		Type: "GetSessionHandleV2",
   18801 		Input: []tf.Input{
   18802 			value,
   18803 		},
   18804 	}
   18805 	op := scope.AddOperation(opspec)
   18806 	return op.Output(0)
   18807 }
   18808 
   18809 // Returns the set of files matching one or more glob patterns.
   18810 //
   18811 // Note that this routine only supports wildcard characters in the
   18812 // basename portion of the pattern, not in the directory portion.
   18813 //
   18814 // Arguments:
   18815 //	pattern: Shell wildcard pattern(s). Scalar or vector of type string.
   18816 //
   18817 // Returns A vector of matching filenames.
   18818 func MatchingFiles(scope *Scope, pattern tf.Output) (filenames tf.Output) {
   18819 	if scope.Err() != nil {
   18820 		return
   18821 	}
   18822 	opspec := tf.OpSpec{
   18823 		Type: "MatchingFiles",
   18824 		Input: []tf.Input{
   18825 			pattern,
   18826 		},
   18827 	}
   18828 	op := scope.AddOperation(opspec)
   18829 	return op.Output(0)
   18830 }
   18831 
   18832 // ResizeBicubicGradAttr is an optional argument to ResizeBicubicGrad.
   18833 type ResizeBicubicGradAttr func(optionalAttr)
   18834 
   18835 // ResizeBicubicGradAlignCorners sets the optional align_corners attribute to value.
   18836 //
   18837 // value: If true, rescale grads by (orig_height - 1) / (height - 1), which
   18838 // exactly aligns the 4 corners of grads and original_image. If false, rescale by
   18839 // orig_height / height. Treat similarly the width dimension.
   18840 // If not specified, defaults to false
   18841 func ResizeBicubicGradAlignCorners(value bool) ResizeBicubicGradAttr {
   18842 	return func(m optionalAttr) {
   18843 		m["align_corners"] = value
   18844 	}
   18845 }
   18846 
   18847 // Computes the gradient of bicubic interpolation.
   18848 //
   18849 // Arguments:
   18850 //	grads: 4-D with shape `[batch, height, width, channels]`.
   18851 //	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
   18852 // The image tensor that was resized.
   18853 //
   18854 // Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
   18855 // Gradients with respect to the input image. Input image must have been
   18856 // float or double.
   18857 func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBicubicGradAttr) (output tf.Output) {
   18858 	if scope.Err() != nil {
   18859 		return
   18860 	}
   18861 	attrs := map[string]interface{}{}
   18862 	for _, a := range optional {
   18863 		a(attrs)
   18864 	}
   18865 	opspec := tf.OpSpec{
   18866 		Type: "ResizeBicubicGrad",
   18867 		Input: []tf.Input{
   18868 			grads, original_image,
   18869 		},
   18870 		Attrs: attrs,
   18871 	}
   18872 	op := scope.AddOperation(opspec)
   18873 	return op.Output(0)
   18874 }
   18875 
   18876 // ResizeNearestNeighborAttr is an optional argument to ResizeNearestNeighbor.
   18877 type ResizeNearestNeighborAttr func(optionalAttr)
   18878 
   18879 // ResizeNearestNeighborAlignCorners sets the optional align_corners attribute to value.
   18880 //
   18881 // value: If true, rescale input by (new_height - 1) / (height - 1), which
   18882 // exactly aligns the 4 corners of images and resized images. If false, rescale
   18883 // by new_height / height. Treat similarly the width dimension.
   18884 // If not specified, defaults to false
   18885 func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr {
   18886 	return func(m optionalAttr) {
   18887 		m["align_corners"] = value
   18888 	}
   18889 }
   18890 
   18891 // Resize `images` to `size` using nearest neighbor interpolation.
   18892 //
   18893 // Arguments:
   18894 //	images: 4-D with shape `[batch, height, width, channels]`.
   18895 //	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
   18896 // new size for the images.
   18897 //
   18898 // Returns 4-D with shape
   18899 // `[batch, new_height, new_width, channels]`.
   18900 func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeNearestNeighborAttr) (resized_images tf.Output) {
   18901 	if scope.Err() != nil {
   18902 		return
   18903 	}
   18904 	attrs := map[string]interface{}{}
   18905 	for _, a := range optional {
   18906 		a(attrs)
   18907 	}
   18908 	opspec := tf.OpSpec{
   18909 		Type: "ResizeNearestNeighbor",
   18910 		Input: []tf.Input{
   18911 			images, size,
   18912 		},
   18913 		Attrs: attrs,
   18914 	}
   18915 	op := scope.AddOperation(opspec)
   18916 	return op.Output(0)
   18917 }
   18918 
   18919 // ResizeNearestNeighborGradAttr is an optional argument to ResizeNearestNeighborGrad.
   18920 type ResizeNearestNeighborGradAttr func(optionalAttr)
   18921 
   18922 // ResizeNearestNeighborGradAlignCorners sets the optional align_corners attribute to value.
   18923 //
   18924 // value: If true, rescale grads by (orig_height - 1) / (height - 1), which
   18925 // exactly aligns the 4 corners of grads and original_image. If false, rescale by
   18926 // orig_height / height. Treat similarly the width dimension.
   18927 // If not specified, defaults to false
   18928 func ResizeNearestNeighborGradAlignCorners(value bool) ResizeNearestNeighborGradAttr {
   18929 	return func(m optionalAttr) {
   18930 		m["align_corners"] = value
   18931 	}
   18932 }
   18933 
   18934 // Computes the gradient of nearest neighbor interpolation.
   18935 //
   18936 // Arguments:
   18937 //	grads: 4-D with shape `[batch, height, width, channels]`.
   18938 //	size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
   18939 // original input size.
   18940 //
   18941 // Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
   18942 // with respect to the input image.
   18943 func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, optional ...ResizeNearestNeighborGradAttr) (output tf.Output) {
   18944 	if scope.Err() != nil {
   18945 		return
   18946 	}
   18947 	attrs := map[string]interface{}{}
   18948 	for _, a := range optional {
   18949 		a(attrs)
   18950 	}
   18951 	opspec := tf.OpSpec{
   18952 		Type: "ResizeNearestNeighborGrad",
   18953 		Input: []tf.Input{
   18954 			grads, size,
   18955 		},
   18956 		Attrs: attrs,
   18957 	}
   18958 	op := scope.AddOperation(opspec)
   18959 	return op.Output(0)
   18960 }
   18961 
   18962 // DecodeJpegAttr is an optional argument to DecodeJpeg.
   18963 type DecodeJpegAttr func(optionalAttr)
   18964 
   18965 // DecodeJpegChannels sets the optional channels attribute to value.
   18966 //
   18967 // value: Number of color channels for the decoded image.
   18968 // If not specified, defaults to 0
   18969 func DecodeJpegChannels(value int64) DecodeJpegAttr {
   18970 	return func(m optionalAttr) {
   18971 		m["channels"] = value
   18972 	}
   18973 }
   18974 
   18975 // DecodeJpegRatio sets the optional ratio attribute to value.
   18976 //
   18977 // value: Downscaling ratio.
   18978 // If not specified, defaults to 1
   18979 func DecodeJpegRatio(value int64) DecodeJpegAttr {
   18980 	return func(m optionalAttr) {
   18981 		m["ratio"] = value
   18982 	}
   18983 }
   18984 
   18985 // DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
   18986 //
   18987 // value: If true use a slower but nicer upscaling of the
   18988 // chroma planes (yuv420/422 only).
   18989 // If not specified, defaults to true
   18990 func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr {
   18991 	return func(m optionalAttr) {
   18992 		m["fancy_upscaling"] = value
   18993 	}
   18994 }
   18995 
   18996 // DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
   18997 //
   18998 // value: If true try to recover an image from truncated input.
   18999 // If not specified, defaults to false
   19000 func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr {
   19001 	return func(m optionalAttr) {
   19002 		m["try_recover_truncated"] = value
   19003 	}
   19004 }
   19005 
   19006 // DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
   19007 //
   19008 // value: The minimum required fraction of lines before a truncated
   19009 // input is accepted.
   19010 // If not specified, defaults to 1
   19011 func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr {
   19012 	return func(m optionalAttr) {
   19013 		m["acceptable_fraction"] = value
   19014 	}
   19015 }
   19016 
   19017 // DecodeJpegDctMethod sets the optional dct_method attribute to value.
   19018 //
   19019 // value: string specifying a hint about the algorithm used for
   19020 // decompression.  Defaults to "" which maps to a system-specific
   19021 // default.  Currently valid values are ["INTEGER_FAST",
   19022 // "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
   19023 // jpeg library changes to a version that does not have that specific
   19024 // option.)
   19025 // If not specified, defaults to ""
   19026 func DecodeJpegDctMethod(value string) DecodeJpegAttr {
   19027 	return func(m optionalAttr) {
   19028 		m["dct_method"] = value
   19029 	}
   19030 }
   19031 
   19032 // Decode a JPEG-encoded image to a uint8 tensor.
   19033 //
   19034 // The attr `channels` indicates the desired number of color channels for the
   19035 // decoded image.
   19036 //
   19037 // Accepted values are:
   19038 //
   19039 // *   0: Use the number of channels in the JPEG-encoded image.
   19040 // *   1: output a grayscale image.
   19041 // *   3: output an RGB image.
   19042 //
   19043 // If needed, the JPEG-encoded image is transformed to match the requested number
   19044 // of color channels.
   19045 //
   19046 // The attr `ratio` allows downscaling the image by an integer factor during
   19047 // decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
   19048 // downscaling the image later.
   19049 //
   19050 //
   19051 // This op also supports decoding PNGs and non-animated GIFs since the interface is
   19052 // the same, though it is cleaner to use `tf.image.decode_image`.
   19053 //
   19054 // Arguments:
   19055 //	contents: 0-D.  The JPEG-encoded image.
   19056 //
   19057 // Returns 3-D with shape `[height, width, channels]`..
   19058 func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) {
   19059 	if scope.Err() != nil {
   19060 		return
   19061 	}
   19062 	attrs := map[string]interface{}{}
   19063 	for _, a := range optional {
   19064 		a(attrs)
   19065 	}
   19066 	opspec := tf.OpSpec{
   19067 		Type: "DecodeJpeg",
   19068 		Input: []tf.Input{
   19069 			contents,
   19070 		},
   19071 		Attrs: attrs,
   19072 	}
   19073 	op := scope.AddOperation(opspec)
   19074 	return op.Output(0)
   19075 }
   19076 
   19077 // ExtractJpegShapeAttr is an optional argument to ExtractJpegShape.
   19078 type ExtractJpegShapeAttr func(optionalAttr)
   19079 
   19080 // ExtractJpegShapeOutputType sets the optional output_type attribute to value.
   19081 //
   19082 // value: (Optional) The output type of the operation (int32 or int64).
   19083 // Defaults to int32.
   19084 // If not specified, defaults to DT_INT32
   19085 func ExtractJpegShapeOutputType(value tf.DataType) ExtractJpegShapeAttr {
   19086 	return func(m optionalAttr) {
   19087 		m["output_type"] = value
   19088 	}
   19089 }
   19090 
   19091 // Extract the shape information of a JPEG-encoded image.
   19092 //
   19093 // This op only parses the image header, so it is much faster than DecodeJpeg.
   19094 //
   19095 // Arguments:
   19096 //	contents: 0-D. The JPEG-encoded image.
   19097 //
   19098 // Returns 1-D. The image shape with format [height, width, channels].
   19099 func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output) {
   19100 	if scope.Err() != nil {
   19101 		return
   19102 	}
   19103 	attrs := map[string]interface{}{}
   19104 	for _, a := range optional {
   19105 		a(attrs)
   19106 	}
   19107 	opspec := tf.OpSpec{
   19108 		Type: "ExtractJpegShape",
   19109 		Input: []tf.Input{
   19110 			contents,
   19111 		},
   19112 		Attrs: attrs,
   19113 	}
   19114 	op := scope.AddOperation(opspec)
   19115 	return op.Output(0)
   19116 }
   19117 
   19118 // PaddingFIFOQueueV2Attr is an optional argument to PaddingFIFOQueueV2.
   19119 type PaddingFIFOQueueV2Attr func(optionalAttr)
   19120 
   19121 // PaddingFIFOQueueV2Shapes sets the optional shapes attribute to value.
   19122 //
   19123 // value: The shape of each component in a value. The length of this attr must
   19124 // be either 0 or the same as the length of component_types.
   19125 // Shapes of fixed rank but variable size are allowed by setting
   19126 // any shape dimension to -1.  In this case, the inputs' shape may vary along
   19127 // the given dimension, and DequeueMany will pad the given dimension with
   19128 // zeros up to the maximum shape of all elements in the given batch.
   19129 // If the length of this attr is 0, different queue elements may have
   19130 // different ranks and shapes, but only one element may be dequeued at a time.
   19131 // If not specified, defaults to <>
   19132 //
   19133 // REQUIRES: len(value) >= 0
   19134 func PaddingFIFOQueueV2Shapes(value []tf.Shape) PaddingFIFOQueueV2Attr {
   19135 	return func(m optionalAttr) {
   19136 		m["shapes"] = value
   19137 	}
   19138 }
   19139 
   19140 // PaddingFIFOQueueV2Capacity sets the optional capacity attribute to value.
   19141 //
   19142 // value: The upper bound on the number of elements in this queue.
   19143 // Negative numbers mean no limit.
   19144 // If not specified, defaults to -1
   19145 func PaddingFIFOQueueV2Capacity(value int64) PaddingFIFOQueueV2Attr {
   19146 	return func(m optionalAttr) {
   19147 		m["capacity"] = value
   19148 	}
   19149 }
   19150 
   19151 // PaddingFIFOQueueV2Container sets the optional container attribute to value.
   19152 //
   19153 // value: If non-empty, this queue is placed in the given container.
   19154 // Otherwise, a default container is used.
   19155 // If not specified, defaults to ""
   19156 func PaddingFIFOQueueV2Container(value string) PaddingFIFOQueueV2Attr {
   19157 	return func(m optionalAttr) {
   19158 		m["container"] = value
   19159 	}
   19160 }
   19161 
   19162 // PaddingFIFOQueueV2SharedName sets the optional shared_name attribute to value.
   19163 //
   19164 // value: If non-empty, this queue will be shared under the given name
   19165 // across multiple sessions.
   19166 // If not specified, defaults to ""
   19167 func PaddingFIFOQueueV2SharedName(value string) PaddingFIFOQueueV2Attr {
   19168 	return func(m optionalAttr) {
   19169 		m["shared_name"] = value
   19170 	}
   19171 }
   19172 
   19173 // A queue that produces elements in first-in first-out order.
   19174 //
   19175 // Variable-size shapes are allowed by setting the corresponding shape dimensions
   19176 // to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
   19177 // size of any given element in the minibatch.  See below for details.
   19178 //
   19179 // Arguments:
   19180 //	component_types: The type of each component in a value.
   19181 //
   19182 // Returns The handle to the queue.
   19183 func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...PaddingFIFOQueueV2Attr) (handle tf.Output) {
   19184 	if scope.Err() != nil {
   19185 		return
   19186 	}
   19187 	attrs := map[string]interface{}{"component_types": component_types}
   19188 	for _, a := range optional {
   19189 		a(attrs)
   19190 	}
   19191 	opspec := tf.OpSpec{
   19192 		Type: "PaddingFIFOQueueV2",
   19193 
   19194 		Attrs: attrs,
   19195 	}
   19196 	op := scope.AddOperation(opspec)
   19197 	return op.Output(0)
   19198 }
   19199 
   19200 // DecodePngAttr is an optional argument to DecodePng.
   19201 type DecodePngAttr func(optionalAttr)
   19202 
   19203 // DecodePngChannels sets the optional channels attribute to value.
   19204 //
   19205 // value: Number of color channels for the decoded image.
   19206 // If not specified, defaults to 0
   19207 func DecodePngChannels(value int64) DecodePngAttr {
   19208 	return func(m optionalAttr) {
   19209 		m["channels"] = value
   19210 	}
   19211 }
   19212 
   19213 // DecodePngDtype sets the optional dtype attribute to value.
   19214 // If not specified, defaults to DT_UINT8
   19215 func DecodePngDtype(value tf.DataType) DecodePngAttr {
   19216 	return func(m optionalAttr) {
   19217 		m["dtype"] = value
   19218 	}
   19219 }
   19220 
   19221 // Decode a PNG-encoded image to a uint8 or uint16 tensor.
   19222 //
   19223 // The attr `channels` indicates the desired number of color channels for the
   19224 // decoded image.
   19225 //
   19226 // Accepted values are:
   19227 //
   19228 // *   0: Use the number of channels in the PNG-encoded image.
   19229 // *   1: output a grayscale image.
   19230 // *   3: output an RGB image.
   19231 // *   4: output an RGBA image.
   19232 //
   19233 // If needed, the PNG-encoded image is transformed to match the requested number
   19234 // of color channels.
   19235 //
   19236 // This op also supports decoding JPEGs and non-animated GIFs since the interface
   19237 // is the same, though it is cleaner to use `tf.image.decode_image`.
   19238 //
   19239 // Arguments:
   19240 //	contents: 0-D.  The PNG-encoded image.
   19241 //
   19242 // Returns 3-D with shape `[height, width, channels]`.
   19243 func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (image tf.Output) {
   19244 	if scope.Err() != nil {
   19245 		return
   19246 	}
   19247 	attrs := map[string]interface{}{}
   19248 	for _, a := range optional {
   19249 		a(attrs)
   19250 	}
   19251 	opspec := tf.OpSpec{
   19252 		Type: "DecodePng",
   19253 		Input: []tf.Input{
   19254 			contents,
   19255 		},
   19256 		Attrs: attrs,
   19257 	}
   19258 	op := scope.AddOperation(opspec)
   19259 	return op.Output(0)
   19260 }
   19261 
   19262 // Decode the first frame of a GIF-encoded image to a uint8 tensor.
   19263 //
   19264 // GIF with frame or transparency compression are not supported
   19265 // convert animated GIF from compressed to uncompressed by:
   19266 //
   19267 //     convert $src.gif -coalesce $dst.gif
   19268 //
   19269 // This op also supports decoding JPEGs and PNGs, though it is cleaner to use
   19270 // `tf.image.decode_image`.
   19271 //
   19272 // Arguments:
   19273 //	contents: 0-D.  The GIF-encoded image.
   19274 //
   19275 // Returns 4-D with shape `[num_frames, height, width, 3]`. RGB order
   19276 func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output) {
   19277 	if scope.Err() != nil {
   19278 		return
   19279 	}
   19280 	opspec := tf.OpSpec{
   19281 		Type: "DecodeGif",
   19282 		Input: []tf.Input{
   19283 			contents,
   19284 		},
   19285 	}
   19286 	op := scope.AddOperation(opspec)
   19287 	return op.Output(0)
   19288 }
   19289 
   19290 // ResourceApplyCenteredRMSPropAttr is an optional argument to ResourceApplyCenteredRMSProp.
   19291 type ResourceApplyCenteredRMSPropAttr func(optionalAttr)
   19292 
   19293 // ResourceApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
   19294 //
   19295 // value: If `True`, updating of the var, mg, ms, and mom tensors is
   19296 // protected by a lock; otherwise the behavior is undefined, but may exhibit less
   19297 // contention.
   19298 // If not specified, defaults to false
   19299 func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMSPropAttr {
   19300 	return func(m optionalAttr) {
   19301 		m["use_locking"] = value
   19302 	}
   19303 }
   19304 
   19305 // Update '*var' according to the centered RMSProp algorithm.
   19306 //
   19307 // The centered RMSProp algorithm uses an estimate of the centered second moment
   19308 // (i.e., the variance) for normalization, as opposed to regular RMSProp, which
   19309 // uses the (uncentered) second moment. This often helps with training, but is
   19310 // slightly more expensive in terms of computation and memory.
   19311 //
   19312 // Note that in dense implementation of this algorithm, mg, ms, and mom will
   19313 // update even if the grad is zero, but in this sparse implementation, mg, ms,
   19314 // and mom will not update in iterations during which the grad is zero.
   19315 //
   19316 // mean_square = decay * mean_square + (1-decay) * gradient ** 2
   19317 // mean_grad = decay * mean_grad + (1-decay) * gradient
   19318 //
   19319 // Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
   19320 //
   19321 // mg <- rho * mg_{t-1} + (1-rho) * grad
   19322 // ms <- rho * ms_{t-1} + (1-rho) * grad * grad
   19323 // mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
   19324 // var <- var - mom
   19325 //
   19326 // Arguments:
   19327 //	var_: Should be from a Variable().
   19328 //	mg: Should be from a Variable().
   19329 //	ms: Should be from a Variable().
   19330 //	mom: Should be from a Variable().
   19331 //	lr: Scaling factor. Must be a scalar.
   19332 //	rho: Decay rate. Must be a scalar.
   19333 //
   19334 //	epsilon: Ridge term. Must be a scalar.
   19335 //	grad: The gradient.
   19336 //
   19337 // Returns the created operation.
   19338 func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation) {
   19339 	if scope.Err() != nil {
   19340 		return
   19341 	}
   19342 	attrs := map[string]interface{}{}
   19343 	for _, a := range optional {
   19344 		a(attrs)
   19345 	}
   19346 	opspec := tf.OpSpec{
   19347 		Type: "ResourceApplyCenteredRMSProp",
   19348 		Input: []tf.Input{
   19349 			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad,
   19350 		},
   19351 		Attrs: attrs,
   19352 	}
   19353 	return scope.AddOperation(opspec)
   19354 }
   19355 
   19356 // Returns a list of tensors with the same shapes and contents as the input
   19357 //
   19358 // tensors.
   19359 //
   19360 // This op can be used to override the gradient for complicated functions. For
   19361 // example, suppose y = f(x) and we wish to apply a custom function g for backprop
   19362 // such that dx = g(dy). In Python,
   19363 //
   19364 // ```python
   19365 // with tf.get_default_graph().gradient_override_map(
   19366 //     {'IdentityN': 'OverrideGradientWithG'}):
   19367 //   y, _ = identity_n([f(x), x])
   19368 //
   19369 // @tf.RegisterGradient('OverrideGradientWithG')
   19370 // def ApplyG(op, dy, _):
   19371 //   return [None, g(dy)]  # Do not backprop to f(x).
   19372 // ```
   19373 func IdentityN(scope *Scope, input []tf.Output) (output []tf.Output) {
   19374 	if scope.Err() != nil {
   19375 		return
   19376 	}
   19377 	opspec := tf.OpSpec{
   19378 		Type: "IdentityN",
   19379 		Input: []tf.Input{
   19380 			tf.OutputList(input),
   19381 		},
   19382 	}
   19383 	op := scope.AddOperation(opspec)
   19384 	if scope.Err() != nil {
   19385 		return
   19386 	}
   19387 	var idx int
   19388 	var err error
   19389 	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
   19390 		scope.UpdateErr("IdentityN", err)
   19391 		return
   19392 	}
   19393 	return output
   19394 }
   19395 
   19396 // Computes the gradient of the sigmoid of `x` wrt its input.
   19397 //
   19398 // Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
   19399 // `dy` is the corresponding input gradient.
   19400 func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
   19401 	if scope.Err() != nil {
   19402 		return
   19403 	}
   19404 	opspec := tf.OpSpec{
   19405 		Type: "SigmoidGrad",
   19406 		Input: []tf.Input{
   19407 			y, dy,
   19408 		},
   19409 	}
   19410 	op := scope.AddOperation(opspec)
   19411 	return op.Output(0)
   19412 }
   19413 
   19414 // Convert one or more images from HSV to RGB.
   19415 //
   19416 // Outputs a tensor of the same shape as the `images` tensor, containing the RGB
   19417 // value of the pixels. The output is only well defined if the value in `images`
   19418 // are in `[0,1]`.
   19419 //
   19420 // See `rgb_to_hsv` for a description of the HSV encoding.
   19421 //
   19422 // Arguments:
   19423 //	images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
   19424 //
   19425 // Returns `images` converted to RGB.
   19426 func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
   19427 	if scope.Err() != nil {
   19428 		return
   19429 	}
   19430 	opspec := tf.OpSpec{
   19431 		Type: "HSVToRGB",
   19432 		Input: []tf.Input{
   19433 			images,
   19434 		},
   19435 	}
   19436 	op := scope.AddOperation(opspec)
   19437 	return op.Output(0)
   19438 }
   19439 
   19440 // SampleDistortedBoundingBoxV2Attr is an optional argument to SampleDistortedBoundingBoxV2.
   19441 type SampleDistortedBoundingBoxV2Attr func(optionalAttr)
   19442 
   19443 // SampleDistortedBoundingBoxV2Seed sets the optional seed attribute to value.
   19444 //
   19445 // value: If either `seed` or `seed2` are set to non-zero, the random number
   19446 // generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
   19447 // seed.
   19448 // If not specified, defaults to 0
   19449 func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr {
   19450 	return func(m optionalAttr) {
   19451 		m["seed"] = value
   19452 	}
   19453 }
   19454 
   19455 // SampleDistortedBoundingBoxV2Seed2 sets the optional seed2 attribute to value.
   19456 //
   19457 // value: A second seed to avoid seed collision.
   19458 // If not specified, defaults to 0
   19459 func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr {
   19460 	return func(m optionalAttr) {
   19461 		m["seed2"] = value
   19462 	}
   19463 }
   19464 
   19465 // SampleDistortedBoundingBoxV2AspectRatioRange sets the optional aspect_ratio_range attribute to value.
   19466 //
   19467 // value: The cropped area of the image must have an aspect ratio =
   19468 // width / height within this range.
   19469 // If not specified, defaults to <f:0.75 f:1.33 >
   19470 func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr {
   19471 	return func(m optionalAttr) {
   19472 		m["aspect_ratio_range"] = value
   19473 	}
   19474 }
   19475 
   19476 // SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
   19477 //
   19478 // value: The cropped area of the image must contain a fraction of the
   19479 // supplied image within in this range.
   19480 // If not specified, defaults to <f:0.05 f:1 >
   19481 func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
   19482 	return func(m optionalAttr) {
   19483 		m["area_range"] = value
   19484 	}
   19485 }
   19486 
   19487 // SampleDistortedBoundingBoxV2MaxAttempts sets the optional max_attempts attribute to value.
   19488 //
   19489 // value: Number of attempts at generating a cropped region of the image
   19490 // of the specified constraints. After `max_attempts` failures, return the entire
   19491 // image.
   19492 // If not specified, defaults to 100
   19493 func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr {
   19494 	return func(m optionalAttr) {
   19495 		m["max_attempts"] = value
   19496 	}
   19497 }
   19498 
   19499 // SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
   19500 //
   19501 // value: Controls behavior if no bounding boxes supplied.
   19502 // If true, assume an implicit bounding box covering the whole input. If false,
   19503 // raise an error.
   19504 // If not specified, defaults to false
   19505 func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr {
   19506 	return func(m optionalAttr) {
   19507 		m["use_image_if_no_bounding_boxes"] = value
   19508 	}
   19509 }
   19510 
   19511 // Generate a single randomly distorted bounding box for an image.
   19512 //
   19513 // Bounding box annotations are often supplied in addition to ground-truth labels
   19514 // in image recognition or object localization tasks. A common technique for
   19515 // training such a system is to randomly distort an image while preserving
   19516 // its content, i.e. *data augmentation*. This Op outputs a randomly distorted
   19517 // localization of an object, i.e. bounding box, given an `image_size`,
   19518 // `bounding_boxes` and a series of constraints.
   19519 //
   19520 // The output of this Op is a single bounding box that may be used to crop the
   19521 // original image. The output is returned as 3 tensors: `begin`, `size` and
   19522 // `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
   19523 // image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
   19524 // what the bounding box looks like.
   19525 //
   19526 // Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
   19527 // bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
   19528 // height of the underlying image.
   19529 //
   19530 // For example,
   19531 //
   19532 // ```python
   19533 //     # Generate a single distorted bounding box.
   19534 //     begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
   19535 //         tf.shape(image),
   19536 //         bounding_boxes=bounding_boxes)
   19537 //
   19538 //     # Draw the bounding box in an image summary.
   19539 //     image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
   19540 //                                                   bbox_for_draw)
   19541 //     tf.summary.image('images_with_box', image_with_box)
   19542 //
   19543 //     # Employ the bounding box to distort the image.
   19544 //     distorted_image = tf.slice(image, begin, size)
   19545 // ```
   19546 //
   19547 // Note that if no bounding box information is available, setting
   19548 // `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
   19549 // bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
   19550 // false and no bounding boxes are supplied, an error is raised.
   19551 //
   19552 // Arguments:
   19553 //	image_size: 1-D, containing `[height, width, channels]`.
   19554 //	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
   19555 // associated with the image.
   19556 //	min_object_covered: The cropped area of the image must contain at least this
   19557 // fraction of any bounding box supplied. The value of this parameter should be
   19558 // non-negative. In the case of 0, the cropped area does not need to overlap
   19559 // any of the bounding boxes supplied.
   19560 //
   19561 // Returns 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
   19562 // `tf.slice`.1-D, containing `[target_height, target_width, -1]`. Provide as input to
   19563 // `tf.slice`.3-D with shape `[1, 1, 4]` containing the distorted bounding box.
   19564 // Provide as input to `tf.image.draw_bounding_boxes`.
   19565 func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, optional ...SampleDistortedBoundingBoxV2Attr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
   19566 	if scope.Err() != nil {
   19567 		return
   19568 	}
   19569 	attrs := map[string]interface{}{}
   19570 	for _, a := range optional {
   19571 		a(attrs)
   19572 	}
   19573 	opspec := tf.OpSpec{
   19574 		Type: "SampleDistortedBoundingBoxV2",
   19575 		Input: []tf.Input{
   19576 			image_size, bounding_boxes, min_object_covered,
   19577 		},
   19578 		Attrs: attrs,
   19579 	}
   19580 	op := scope.AddOperation(opspec)
   19581 	return op.Output(0), op.Output(1), op.Output(2)
   19582 }
   19583 
   19584 // ExtractGlimpseAttr is an optional argument to ExtractGlimpse.
   19585 type ExtractGlimpseAttr func(optionalAttr)
   19586 
   19587 // ExtractGlimpseCentered sets the optional centered attribute to value.
   19588 //
   19589 // value: indicates if the offset coordinates are centered relative to
   19590 // the image, in which case the (0, 0) offset is relative to the center
   19591 // of the input images. If false, the (0,0) offset corresponds to the
   19592 // upper left corner of the input images.
   19593 // If not specified, defaults to true
   19594 func ExtractGlimpseCentered(value bool) ExtractGlimpseAttr {
   19595 	return func(m optionalAttr) {
   19596 		m["centered"] = value
   19597 	}
   19598 }
   19599 
   19600 // ExtractGlimpseNormalized sets the optional normalized attribute to value.
   19601 //
   19602 // value: indicates if the offset coordinates are normalized.
   19603 // If not specified, defaults to true
   19604 func ExtractGlimpseNormalized(value bool) ExtractGlimpseAttr {
   19605 	return func(m optionalAttr) {
   19606 		m["normalized"] = value
   19607 	}
   19608 }
   19609 
   19610 // ExtractGlimpseUniformNoise sets the optional uniform_noise attribute to value.
   19611 //
   19612 // value: indicates if the noise should be generated using a
   19613 // uniform distribution or a Gaussian distribution.
   19614 // If not specified, defaults to true
   19615 func ExtractGlimpseUniformNoise(value bool) ExtractGlimpseAttr {
   19616 	return func(m optionalAttr) {
   19617 		m["uniform_noise"] = value
   19618 	}
   19619 }
   19620 
   19621 // Extracts a glimpse from the input tensor.
   19622 //
   19623 // Returns a set of windows called glimpses extracted at location
   19624 // `offsets` from the input tensor. If the windows only partially
   19625 // overlaps the inputs, the non overlapping areas will be filled with
   19626 // random noise.
   19627 //
   19628 // The result is a 4-D tensor of shape `[batch_size, glimpse_height,
   19629 // glimpse_width, channels]`. The channels and batch dimensions are the
   19630 // same as that of the input tensor. The height and width of the output
   19631 // windows are specified in the `size` parameter.
   19632 //
   19633 // The argument `normalized` and `centered` controls how the windows are built:
   19634 //
   19635 // * If the coordinates are normalized but not centered, 0.0 and 1.0
   19636 //   correspond to the minimum and maximum of each height and width
   19637 //   dimension.
   19638 // * If the coordinates are both normalized and centered, they range from
   19639 //   -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
   19640 //   left corner, the lower right corner is located at (1.0, 1.0) and the
   19641 //   center is at (0, 0).
   19642 // * If the coordinates are not normalized they are interpreted as
   19643 //   numbers of pixels.
   19644 //
   19645 // Arguments:
   19646 //	input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
   19647 //	size: A 1-D tensor of 2 elements containing the size of the glimpses
   19648 // to extract.  The glimpse height must be specified first, following
   19649 // by the glimpse width.
   19650 //	offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
   19651 // the y, x locations of the center of each window.
   19652 //
   19653 // Returns A tensor representing the glimpses `[batch_size,
   19654 // glimpse_height, glimpse_width, channels]`.
   19655 func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseAttr) (glimpse tf.Output) {
   19656 	if scope.Err() != nil {
   19657 		return
   19658 	}
   19659 	attrs := map[string]interface{}{}
   19660 	for _, a := range optional {
   19661 		a(attrs)
   19662 	}
   19663 	opspec := tf.OpSpec{
   19664 		Type: "ExtractGlimpse",
   19665 		Input: []tf.Input{
   19666 			input, size, offsets,
   19667 		},
   19668 		Attrs: attrs,
   19669 	}
   19670 	op := scope.AddOperation(opspec)
   19671 	return op.Output(0)
   19672 }
   19673 
   19674 // A container for an iterator resource.
   19675 //
   19676 // Returns A handle to the iterator that can be passed to a "MakeIterator"
   19677 // or "IteratorGetNext" op.
   19678 func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   19679 	if scope.Err() != nil {
   19680 		return
   19681 	}
   19682 	attrs := map[string]interface{}{"shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
   19683 	opspec := tf.OpSpec{
   19684 		Type: "Iterator",
   19685 
   19686 		Attrs: attrs,
   19687 	}
   19688 	op := scope.AddOperation(opspec)
   19689 	return op.Output(0)
   19690 }
   19691 
   19692 // ShuffleDatasetAttr is an optional argument to ShuffleDataset.
   19693 type ShuffleDatasetAttr func(optionalAttr)
   19694 
   19695 // ShuffleDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
   19696 //
   19697 // value: If true, each iterator over this dataset will be given
   19698 // a different pseudorandomly generated seed, based on a sequence seeded by the
   19699 // `seed` and `seed2` inputs. If false, each iterator will be given the same
   19700 // seed, and repeated iteration over this dataset will yield the exact same
   19701 // sequence of results.
   19702 // If not specified, defaults to true
   19703 func ShuffleDatasetReshuffleEachIteration(value bool) ShuffleDatasetAttr {
   19704 	return func(m optionalAttr) {
   19705 		m["reshuffle_each_iteration"] = value
   19706 	}
   19707 }
   19708 
   19709 // Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
   19710 //
   19711 // Arguments:
   19712 //
   19713 //	buffer_size: The number of output elements to buffer in an iterator over
   19714 // this dataset. Compare with the `min_after_dequeue` attr when creating a
   19715 // `RandomShuffleQueue`.
   19716 //	seed: A scalar seed for the random number generator. If either `seed` or
   19717 // `seed2` is set to be non-zero, the random number generator is seeded
   19718 // by the given seed.  Otherwise, a random seed is used.
   19719 //	seed2: A second scalar seed to avoid seed collision.
   19720 //
   19721 //
   19722 func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleDatasetAttr) (handle tf.Output) {
   19723 	if scope.Err() != nil {
   19724 		return
   19725 	}
   19726 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   19727 	for _, a := range optional {
   19728 		a(attrs)
   19729 	}
   19730 	opspec := tf.OpSpec{
   19731 		Type: "ShuffleDataset",
   19732 		Input: []tf.Input{
   19733 			input_dataset, buffer_size, seed, seed2,
   19734 		},
   19735 		Attrs: attrs,
   19736 	}
   19737 	op := scope.AddOperation(opspec)
   19738 	return op.Output(0)
   19739 }
   19740 
   19741 // 3D fast Fourier transform.
   19742 //
   19743 // Computes the 3-dimensional discrete Fourier transform over the inner-most 3
   19744 // dimensions of `input`.
   19745 //
   19746 // Arguments:
   19747 //	input: A complex64 tensor.
   19748 //
   19749 // Returns A complex64 tensor of the same shape as `input`. The inner-most 3
   19750 //   dimensions of `input` are replaced with their 3D Fourier transform.
   19751 //
   19752 // @compatibility(numpy)
   19753 // Equivalent to np.fft.fftn with 3 dimensions.
   19754 // @end_compatibility
   19755 func FFT3D(scope *Scope, input tf.Output) (output tf.Output) {
   19756 	if scope.Err() != nil {
   19757 		return
   19758 	}
   19759 	opspec := tf.OpSpec{
   19760 		Type: "FFT3D",
   19761 		Input: []tf.Input{
   19762 			input,
   19763 		},
   19764 	}
   19765 	op := scope.AddOperation(opspec)
   19766 	return op.Output(0)
   19767 }
   19768 
   19769 // CropAndResizeGradBoxesAttr is an optional argument to CropAndResizeGradBoxes.
   19770 type CropAndResizeGradBoxesAttr func(optionalAttr)
   19771 
   19772 // CropAndResizeGradBoxesMethod sets the optional method attribute to value.
   19773 //
   19774 // value: A string specifying the interpolation method. Only 'bilinear' is
   19775 // supported for now.
   19776 // If not specified, defaults to "bilinear"
   19777 func CropAndResizeGradBoxesMethod(value string) CropAndResizeGradBoxesAttr {
   19778 	return func(m optionalAttr) {
   19779 		m["method"] = value
   19780 	}
   19781 }
   19782 
   19783 // Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
   19784 //
   19785 // Arguments:
   19786 //	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
   19787 //	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
   19788 // Both `image_height` and `image_width` need to be positive.
   19789 //	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
   19790 // specifies the coordinates of a box in the `box_ind[i]` image and is specified
   19791 // in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
   19792 // `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
   19793 // `[0, 1]` interval of normalized image height is mapped to
   19794 // `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
   19795 // which case the sampled crop is an up-down flipped version of the original
   19796 // image. The width dimension is treated similarly. Normalized coordinates
   19797 // outside the `[0, 1]` range are allowed, in which case we use
   19798 // `extrapolation_value` to extrapolate the input image values.
   19799 //	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
   19800 // The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
   19801 //
   19802 // Returns A 2-D tensor of shape `[num_boxes, 4]`.
   19803 func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, box_ind tf.Output, optional ...CropAndResizeGradBoxesAttr) (output tf.Output) {
   19804 	if scope.Err() != nil {
   19805 		return
   19806 	}
   19807 	attrs := map[string]interface{}{}
   19808 	for _, a := range optional {
   19809 		a(attrs)
   19810 	}
   19811 	opspec := tf.OpSpec{
   19812 		Type: "CropAndResizeGradBoxes",
   19813 		Input: []tf.Input{
   19814 			grads, image, boxes, box_ind,
   19815 		},
   19816 		Attrs: attrs,
   19817 	}
   19818 	op := scope.AddOperation(opspec)
   19819 	return op.Output(0)
   19820 }
   19821 
   19822 // Saves tensors in V2 checkpoint format.
   19823 //
   19824 // By default, saves the named tensors in full.  If the caller wishes to save
   19825 // specific slices of full tensors, "shape_and_slices" should be non-empty strings
   19826 // and correspondingly well-formed.
   19827 //
   19828 // Arguments:
   19829 //	prefix: Must have a single element. The prefix of the V2 checkpoint to which we
   19830 // write the tensors.
   19831 //	tensor_names: shape {N}. The names of the tensors to be saved.
   19832 //	shape_and_slices: shape {N}.  The slice specs of the tensors to be saved.
   19833 // Empty strings indicate that they are non-partitioned tensors.
   19834 //	tensors: `N` tensors to save.
   19835 //
   19836 // Returns the created operation.
   19837 func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation) {
   19838 	if scope.Err() != nil {
   19839 		return
   19840 	}
   19841 	opspec := tf.OpSpec{
   19842 		Type: "SaveV2",
   19843 		Input: []tf.Input{
   19844 			prefix, tensor_names, shape_and_slices, tf.OutputList(tensors),
   19845 		},
   19846 	}
   19847 	return scope.AddOperation(opspec)
   19848 }
   19849 
   19850 // StatsAggregatorHandleAttr is an optional argument to StatsAggregatorHandle.
   19851 type StatsAggregatorHandleAttr func(optionalAttr)
   19852 
   19853 // StatsAggregatorHandleContainer sets the optional container attribute to value.
   19854 // If not specified, defaults to ""
   19855 func StatsAggregatorHandleContainer(value string) StatsAggregatorHandleAttr {
   19856 	return func(m optionalAttr) {
   19857 		m["container"] = value
   19858 	}
   19859 }
   19860 
   19861 // StatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
   19862 // If not specified, defaults to ""
   19863 func StatsAggregatorHandleSharedName(value string) StatsAggregatorHandleAttr {
   19864 	return func(m optionalAttr) {
   19865 		m["shared_name"] = value
   19866 	}
   19867 }
   19868 
   19869 // Creates a statistics manager resource.
   19870 func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr) (handle tf.Output) {
   19871 	if scope.Err() != nil {
   19872 		return
   19873 	}
   19874 	attrs := map[string]interface{}{}
   19875 	for _, a := range optional {
   19876 		a(attrs)
   19877 	}
   19878 	opspec := tf.OpSpec{
   19879 		Type: "StatsAggregatorHandle",
   19880 
   19881 		Attrs: attrs,
   19882 	}
   19883 	op := scope.AddOperation(opspec)
   19884 	return op.Output(0)
   19885 }
   19886 
   19887 // Greedily selects a subset of bounding boxes in descending order of score,
   19888 //
   19889 // pruning away boxes that have high intersection-over-union (IOU) overlap
   19890 // with previously selected boxes.  Bounding boxes are supplied as
   19891 // [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
   19892 // diagonal pair of box corners and the coordinates can be provided as normalized
   19893 // (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
   19894 // is agnostic to where the origin is in the coordinate system.  Note that this
   19895 // algorithm is invariant to orthogonal transformations and translations
   19896 // of the coordinate system; thus translating or reflections of the coordinate
   19897 // system result in the same boxes being selected by the algorithm.
   19898 //
   19899 // The output of this operation is a set of integers indexing into the input
   19900 // collection of bounding boxes representing the selected boxes.  The bounding
   19901 // box coordinates corresponding to the selected indices can then be obtained
   19902 // using the `tf.gather operation`.  For example:
   19903 //
   19904 //   selected_indices = tf.image.non_max_suppression_v2(
   19905 //       boxes, scores, max_output_size, iou_threshold)
   19906 //   selected_boxes = tf.gather(boxes, selected_indices)
   19907 //
   19908 // Arguments:
   19909 //	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
   19910 //	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
   19911 // score corresponding to each box (each row of boxes).
   19912 //	max_output_size: A scalar integer tensor representing the maximum number of
   19913 // boxes to be selected by non max suppression.
   19914 //	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
   19915 // boxes overlap too much with respect to IOU.
   19916 //
   19917 // Returns A 1-D integer tensor of shape `[M]` representing the selected
   19918 // indices from the boxes tensor, where `M <= max_output_size`.
   19919 func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output) (selected_indices tf.Output) {
   19920 	if scope.Err() != nil {
   19921 		return
   19922 	}
   19923 	opspec := tf.OpSpec{
   19924 		Type: "NonMaxSuppressionV2",
   19925 		Input: []tf.Input{
   19926 			boxes, scores, max_output_size, iou_threshold,
   19927 		},
   19928 	}
   19929 	op := scope.AddOperation(opspec)
   19930 	return op.Output(0)
   19931 }
   19932 
   19933 // Reshapes a tensor.
   19934 //
   19935 // Given `tensor`, this operation returns a tensor that has the same values
   19936 // as `tensor` with shape `shape`.
   19937 //
   19938 // If one component of `shape` is the special value -1, the size of that dimension
   19939 // is computed so that the total size remains constant.  In particular, a `shape`
   19940 // of `[-1]` flattens into 1-D.  At most one component of `shape` can be -1.
   19941 //
   19942 // If `shape` is 1-D or higher, then the operation returns a tensor with shape
   19943 // `shape` filled with the values of `tensor`. In this case, the number of elements
   19944 // implied by `shape` must be the same as the number of elements in `tensor`.
   19945 //
   19946 // For example:
   19947 //
   19948 // ```
   19949 // # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
   19950 // # tensor 't' has shape [9]
   19951 // reshape(t, [3, 3]) ==> [[1, 2, 3],
   19952 //                         [4, 5, 6],
   19953 //                         [7, 8, 9]]
   19954 //
   19955 // # tensor 't' is [[[1, 1], [2, 2]],
   19956 // #                [[3, 3], [4, 4]]]
   19957 // # tensor 't' has shape [2, 2, 2]
   19958 // reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
   19959 //                         [3, 3, 4, 4]]
   19960 //
   19961 // # tensor 't' is [[[1, 1, 1],
   19962 // #                 [2, 2, 2]],
   19963 // #                [[3, 3, 3],
   19964 // #                 [4, 4, 4]],
   19965 // #                [[5, 5, 5],
   19966 // #                 [6, 6, 6]]]
   19967 // # tensor 't' has shape [3, 2, 3]
   19968 // # pass '[-1]' to flatten 't'
   19969 // reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
   19970 //
   19971 // # -1 can also be used to infer the shape
   19972 //
   19973 // # -1 is inferred to be 9:
   19974 // reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
   19975 //                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
   19976 // # -1 is inferred to be 2:
   19977 // reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
   19978 //                          [4, 4, 4, 5, 5, 5, 6, 6, 6]]
   19979 // # -1 is inferred to be 3:
   19980 // reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
   19981 //                               [2, 2, 2],
   19982 //                               [3, 3, 3]],
   19983 //                              [[4, 4, 4],
   19984 //                               [5, 5, 5],
   19985 //                               [6, 6, 6]]]
   19986 //
   19987 // # tensor 't' is [7]
   19988 // # shape `[]` reshapes to a scalar
   19989 // reshape(t, []) ==> 7
   19990 // ```
   19991 //
   19992 // Arguments:
   19993 //
   19994 //	shape: Defines the shape of the output tensor.
   19995 func Reshape(scope *Scope, tensor tf.Output, shape tf.Output) (output tf.Output) {
   19996 	if scope.Err() != nil {
   19997 		return
   19998 	}
   19999 	opspec := tf.OpSpec{
   20000 		Type: "Reshape",
   20001 		Input: []tf.Input{
   20002 			tensor, shape,
   20003 		},
   20004 	}
   20005 	op := scope.AddOperation(opspec)
   20006 	return op.Output(0)
   20007 }
   20008 
   20009 // Creates a dataset that splits a SparseTensor into elements row-wise.
   20010 func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output) {
   20011 	if scope.Err() != nil {
   20012 		return
   20013 	}
   20014 	opspec := tf.OpSpec{
   20015 		Type: "SparseTensorSliceDataset",
   20016 		Input: []tf.Input{
   20017 			indices, values, dense_shape,
   20018 		},
   20019 	}
   20020 	op := scope.AddOperation(opspec)
   20021 	return op.Output(0)
   20022 }
   20023 
   20024 // Creates a dataset that concatenates `input_dataset` with `another_dataset`.
   20025 func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   20026 	if scope.Err() != nil {
   20027 		return
   20028 	}
   20029 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20030 	opspec := tf.OpSpec{
   20031 		Type: "ConcatenateDataset",
   20032 		Input: []tf.Input{
   20033 			input_dataset, another_dataset,
   20034 		},
   20035 		Attrs: attrs,
   20036 	}
   20037 	op := scope.AddOperation(opspec)
   20038 	return op.Output(0)
   20039 }
   20040 
   20041 // Adds a value to the current value of a variable.
   20042 //
   20043 // Any ReadVariableOp which depends directly or indirectly on this assign is
   20044 // guaranteed to see the incremented value or a subsequent newer one.
   20045 //
   20046 // Outputs the incremented value, which can be used to totally order the
   20047 // increments to this variable.
   20048 //
   20049 // Arguments:
   20050 //	resource: handle to the resource in which to store the variable.
   20051 //	value: the value by which the variable will be incremented.
   20052 //
   20053 // Returns the created operation.
   20054 func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
   20055 	if scope.Err() != nil {
   20056 		return
   20057 	}
   20058 	opspec := tf.OpSpec{
   20059 		Type: "AssignAddVariableOp",
   20060 		Input: []tf.Input{
   20061 			resource, value,
   20062 		},
   20063 	}
   20064 	return scope.AddOperation(opspec)
   20065 }
   20066 
   20067 // Records the latency of producing `input_dataset` elements in a StatsAggregator.
   20068 func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   20069 	if scope.Err() != nil {
   20070 		return
   20071 	}
   20072 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20073 	opspec := tf.OpSpec{
   20074 		Type: "LatencyStatsDataset",
   20075 		Input: []tf.Input{
   20076 			input_dataset, tag,
   20077 		},
   20078 		Attrs: attrs,
   20079 	}
   20080 	op := scope.AddOperation(opspec)
   20081 	return op.Output(0)
   20082 }
   20083 
   20084 // Convert JSON-encoded Example records to binary protocol buffer strings.
   20085 //
   20086 // This op translates a tensor containing Example records, encoded using
   20087 // the [standard JSON
   20088 // mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
   20089 // into a tensor containing the same records encoded as binary protocol
   20090 // buffers. The resulting tensor can then be fed to any of the other
   20091 // Example-parsing ops.
   20092 //
   20093 // Arguments:
   20094 //	json_examples: Each string is a JSON object serialized according to the JSON
   20095 // mapping of the Example proto.
   20096 //
   20097 // Returns Each string is a binary Example protocol buffer corresponding
   20098 // to the respective element of `json_examples`.
   20099 func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples tf.Output) {
   20100 	if scope.Err() != nil {
   20101 		return
   20102 	}
   20103 	opspec := tf.OpSpec{
   20104 		Type: "DecodeJSONExample",
   20105 		Input: []tf.Input{
   20106 			json_examples,
   20107 		},
   20108 	}
   20109 	op := scope.AddOperation(opspec)
   20110 	return op.Output(0)
   20111 }
   20112 
   20113 // Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
   20114 //
   20115 // The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
   20116 // `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
   20117 // input channel is processed independently of the others with its own structuring
   20118 // function. The `output` tensor has shape
   20119 // `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
   20120 // tensor depend on the `padding` algorithm. We currently only support the default
   20121 // "NHWC" `data_format`.
   20122 //
   20123 // In detail, the grayscale morphological 2-D dilation is the max-sum correlation
   20124 // (for consistency with `conv2d`, we use unmirrored filters):
   20125 //
   20126 //     output[b, y, x, c] =
   20127 //        max_{dy, dx} input[b,
   20128 //                           strides[1] * y + rates[1] * dy,
   20129 //                           strides[2] * x + rates[2] * dx,
   20130 //                           c] +
   20131 //                     filter[dy, dx, c]
   20132 //
   20133 // Max-pooling is a special case when the filter has size equal to the pooling
   20134 // kernel size and contains all zeros.
   20135 //
   20136 // Note on duality: The dilation of `input` by the `filter` is equal to the
   20137 // negation of the erosion of `-input` by the reflected `filter`.
   20138 //
   20139 // Arguments:
   20140 //	input: 4-D with shape `[batch, in_height, in_width, depth]`.
   20141 //	filter: 3-D with shape `[filter_height, filter_width, depth]`.
   20142 //	strides: The stride of the sliding window for each dimension of the input
   20143 // tensor. Must be: `[1, stride_height, stride_width, 1]`.
   20144 //	rates: The input stride for atrous morphological dilation. Must be:
   20145 // `[1, rate_height, rate_width, 1]`.
   20146 //	padding: The type of padding algorithm to use.
   20147 //
   20148 // Returns 4-D with shape `[batch, out_height, out_width, depth]`.
   20149 func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, rates []int64, padding string) (output tf.Output) {
   20150 	if scope.Err() != nil {
   20151 		return
   20152 	}
   20153 	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
   20154 	opspec := tf.OpSpec{
   20155 		Type: "Dilation2D",
   20156 		Input: []tf.Input{
   20157 			input, filter,
   20158 		},
   20159 		Attrs: attrs,
   20160 	}
   20161 	op := scope.AddOperation(opspec)
   20162 	return op.Output(0)
   20163 }
   20164 
   20165 // Converts the given variant tensor to an iterator and stores it in the given resource.
   20166 //
   20167 // Arguments:
   20168 //	resource_handle: A handle to an iterator resource.
   20169 //	serialized: A variant tensor storing the state of the iterator contained in the
   20170 // resource.
   20171 //
   20172 // Returns the created operation.
   20173 func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation) {
   20174 	if scope.Err() != nil {
   20175 		return
   20176 	}
   20177 	opspec := tf.OpSpec{
   20178 		Type: "DeserializeIterator",
   20179 		Input: []tf.Input{
   20180 			resource_handle, serialized,
   20181 		},
   20182 	}
   20183 	return scope.AddOperation(opspec)
   20184 }
   20185 
   20186 // TensorArrayConcatV2Attr is an optional argument to TensorArrayConcatV2.
   20187 type TensorArrayConcatV2Attr func(optionalAttr)
   20188 
   20189 // TensorArrayConcatV2ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
   20190 // If not specified, defaults to <unknown_rank:true >
   20191 func TensorArrayConcatV2ElementShapeExcept0(value tf.Shape) TensorArrayConcatV2Attr {
   20192 	return func(m optionalAttr) {
   20193 		m["element_shape_except0"] = value
   20194 	}
   20195 }
   20196 
   20197 // Deprecated. Use TensorArrayConcatV3
   20198 func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV2Attr) (value tf.Output, lengths tf.Output) {
   20199 	if scope.Err() != nil {
   20200 		return
   20201 	}
   20202 	attrs := map[string]interface{}{"dtype": dtype}
   20203 	for _, a := range optional {
   20204 		a(attrs)
   20205 	}
   20206 	opspec := tf.OpSpec{
   20207 		Type: "TensorArrayConcatV2",
   20208 		Input: []tf.Input{
   20209 			handle, flow_in,
   20210 		},
   20211 		Attrs: attrs,
   20212 	}
   20213 	op := scope.AddOperation(opspec)
   20214 	return op.Output(0), op.Output(1)
   20215 }
   20216 
   20217 // Creates a dataset that batches and pads `batch_size` elements from the input.
   20218 //
   20219 // Arguments:
   20220 //
   20221 //	batch_size: A scalar representing the number of elements to accumulate in a
   20222 // batch.
   20223 //	padded_shapes: A list of int64 tensors representing the desired padded shapes
   20224 // of the corresponding output components. These shapes may be partially
   20225 // specified, using `-1` to indicate that a particular dimension should be
   20226 // padded to the maximum size of all batch elements.
   20227 //	padding_values: A list of scalars containing the padding value to use for
   20228 // each of the outputs.
   20229 //
   20230 func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
   20231 	if scope.Err() != nil {
   20232 		return
   20233 	}
   20234 	attrs := map[string]interface{}{"output_shapes": output_shapes}
   20235 	opspec := tf.OpSpec{
   20236 		Type: "PaddedBatchDataset",
   20237 		Input: []tf.Input{
   20238 			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values),
   20239 		},
   20240 		Attrs: attrs,
   20241 	}
   20242 	op := scope.AddOperation(opspec)
   20243 	return op.Output(0)
   20244 }
   20245 
   20246 // Creates a dataset that batches input elements into a SparseTensor.
   20247 //
   20248 // Arguments:
   20249 //	input_dataset: A handle to an input dataset. Must have a single component.
   20250 //	batch_size: A scalar representing the number of elements to accumulate in a
   20251 // batch.
   20252 //	row_shape: A vector representing the dense shape of each row in the produced
   20253 // SparseTensor. The shape may be partially specified, using `-1` to indicate
   20254 // that a particular dimension should use the maximum size of all batch elements.
   20255 //
   20256 //
   20257 func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   20258 	if scope.Err() != nil {
   20259 		return
   20260 	}
   20261 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20262 	opspec := tf.OpSpec{
   20263 		Type: "DenseToSparseBatchDataset",
   20264 		Input: []tf.Input{
   20265 			input_dataset, batch_size, row_shape,
   20266 		},
   20267 		Attrs: attrs,
   20268 	}
   20269 	op := scope.AddOperation(opspec)
   20270 	return op.Output(0)
   20271 }
   20272 
   20273 // Deprecated. Use TensorArrayGradV3
   20274 //
   20275 // DEPRECATED at GraphDef version 26: Use TensorArrayGradV3
   20276 func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output) {
   20277 	if scope.Err() != nil {
   20278 		return
   20279 	}
   20280 	attrs := map[string]interface{}{"source": source}
   20281 	opspec := tf.OpSpec{
   20282 		Type: "TensorArrayGradV2",
   20283 		Input: []tf.Input{
   20284 			handle, flow_in,
   20285 		},
   20286 		Attrs: attrs,
   20287 	}
   20288 	op := scope.AddOperation(opspec)
   20289 	return op.Output(0)
   20290 }
   20291 
   20292 // ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
   20293 type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
   20294 
   20295 // ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
   20296 //
   20297 // value: If True, updating of the var and accum tensors will be protected by
   20298 // a lock; otherwise the behavior is undefined, but may exhibit less contention.
   20299 // If not specified, defaults to false
   20300 func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
   20301 	return func(m optionalAttr) {
   20302 		m["use_locking"] = value
   20303 	}
   20304 }
   20305 
   20306 // var: Should be from a Variable().
   20307 //
   20308 // Arguments:
   20309 //
   20310 //	accum: Should be from a Variable().
   20311 //	accum_update: : Should be from a Variable().
   20312 //	lr: Learning rate. Must be a scalar.
   20313 //	rho: Decay factor. Must be a scalar.
   20314 //	epsilon: Constant factor. Must be a scalar.
   20315 //	grad: The gradient.
   20316 //	indices: A vector of indices into the first dimension of var and accum.
   20317 //
   20318 // Returns the created operation.
   20319 func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
   20320 	if scope.Err() != nil {
   20321 		return
   20322 	}
   20323 	attrs := map[string]interface{}{}
   20324 	for _, a := range optional {
   20325 		a(attrs)
   20326 	}
   20327 	opspec := tf.OpSpec{
   20328 		Type: "ResourceSparseApplyAdadelta",
   20329 		Input: []tf.Input{
   20330 			var_, accum, accum_update, lr, rho, epsilon, grad, indices,
   20331 		},
   20332 		Attrs: attrs,
   20333 	}
   20334 	return scope.AddOperation(opspec)
   20335 }
   20336 
   20337 // Identity op for gradient debugging.
   20338 //
   20339 // This op is hidden from public in Python. It is used by TensorFlow Debugger to
   20340 // register gradient tensors for gradient debugging.
   20341 // This op operates on non-reference-type tensors.
   20342 func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
   20343 	if scope.Err() != nil {
   20344 		return
   20345 	}
   20346 	opspec := tf.OpSpec{
   20347 		Type: "DebugGradientIdentity",
   20348 		Input: []tf.Input{
   20349 			input,
   20350 		},
   20351 	}
   20352 	op := scope.AddOperation(opspec)
   20353 	return op.Output(0)
   20354 }
   20355 
   20356 // Return substrings from `Tensor` of strings.
   20357 //
   20358 // For each string in the input `Tensor`, creates a substring starting at index
   20359 // `pos` with a total length of `len`.
   20360 //
   20361 // If `len` defines a substring that would extend beyond the length of the input
   20362 // string, then as many characters as possible are used.
   20363 //
   20364 // If `pos` is negative or specifies a character index larger than any of the input
   20365 // strings, then an `InvalidArgumentError` is thrown.
   20366 //
   20367 // `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
   20368 // Op creation.
   20369 //
   20370 // *NOTE*: `Substr` supports broadcasting up to two dimensions. More about
   20371 // broadcasting
   20372 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   20373 //
   20374 // ---
   20375 //
   20376 // Examples
   20377 //
   20378 // Using scalar `pos` and `len`:
   20379 //
   20380 // ```python
   20381 // input = [b'Hello', b'World']
   20382 // position = 1
   20383 // length = 3
   20384 //
   20385 // output = [b'ell', b'orl']
   20386 // ```
   20387 //
   20388 // Using `pos` and `len` with same shape as `input`:
   20389 //
   20390 // ```python
   20391 // input = [[b'ten', b'eleven', b'twelve'],
   20392 //          [b'thirteen', b'fourteen', b'fifteen'],
   20393 //          [b'sixteen', b'seventeen', b'eighteen']]
   20394 // position = [[1, 2, 3],
   20395 //             [1, 2, 3],
   20396 //             [1, 2, 3]]
   20397 // length =   [[2, 3, 4],
   20398 //             [4, 3, 2],
   20399 //             [5, 5, 5]]
   20400 //
   20401 // output = [[b'en', b'eve', b'lve'],
   20402 //           [b'hirt', b'urt', b'te'],
   20403 //           [b'ixtee', b'vente', b'hteen']]
   20404 // ```
   20405 //
   20406 // Broadcasting `pos` and `len` onto `input`:
   20407 //
   20408 // ```
   20409 // input = [[b'ten', b'eleven', b'twelve'],
   20410 //          [b'thirteen', b'fourteen', b'fifteen'],
   20411 //          [b'sixteen', b'seventeen', b'eighteen'],
   20412 //          [b'nineteen', b'twenty', b'twentyone']]
   20413 // position = [1, 2, 3]
   20414 // length =   [1, 2, 3]
   20415 //
   20416 // output = [[b'e', b'ev', b'lve'],
   20417 //           [b'h', b'ur', b'tee'],
   20418 //           [b'i', b've', b'hte'],
   20419 //           [b'i', b'en', b'nty']]
   20420 // ```
   20421 //
   20422 // Broadcasting `input` onto `pos` and `len`:
   20423 //
   20424 // ```
   20425 // input = b'thirteen'
   20426 // position = [1, 5, 7]
   20427 // length =   [3, 2, 1]
   20428 //
   20429 // output = [b'hir', b'ee', b'n']
   20430 // ```
   20431 //
   20432 // Arguments:
   20433 //	input: Tensor of strings
   20434 //	pos: Scalar defining the position of first character in each substring
   20435 //	len: Scalar defining the number of characters to include in each substring
   20436 //
   20437 // Returns Tensor of substrings
   20438 func Substr(scope *Scope, input tf.Output, pos tf.Output, len tf.Output) (output tf.Output) {
   20439 	if scope.Err() != nil {
   20440 		return
   20441 	}
   20442 	opspec := tf.OpSpec{
   20443 		Type: "Substr",
   20444 		Input: []tf.Input{
   20445 			input, pos, len,
   20446 		},
   20447 	}
   20448 	op := scope.AddOperation(opspec)
   20449 	return op.Output(0)
   20450 }
   20451 
   20452 // Creates a Dataset that returns pseudorandom numbers.
   20453 //
   20454 // Arguments:
   20455 //	seed: A scalar seed for the random number generator. If either seed or
   20456 // seed2 is set to be non-zero, the random number generator is seeded
   20457 // by the given seed.  Otherwise, a random seed is used.
   20458 //	seed2: A second scalar seed to avoid seed collision.
   20459 //
   20460 //
   20461 func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   20462 	if scope.Err() != nil {
   20463 		return
   20464 	}
   20465 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20466 	opspec := tf.OpSpec{
   20467 		Type: "RandomDataset",
   20468 		Input: []tf.Input{
   20469 			seed, seed2,
   20470 		},
   20471 		Attrs: attrs,
   20472 	}
   20473 	op := scope.AddOperation(opspec)
   20474 	return op.Output(0)
   20475 }
   20476 
   20477 // Creates a dataset that shuffles and repeats elements from `input_dataset`
   20478 //
   20479 // pseudorandomly.
   20480 //
   20481 // Arguments:
   20482 //
   20483 //	buffer_size: The number of output elements to buffer in an iterator over
   20484 // this dataset. Compare with the `min_after_dequeue` attr when creating a
   20485 // `RandomShuffleQueue`.
   20486 //	seed: A scalar seed for the random number generator. If either `seed` or
   20487 // `seed2` is set to be non-zero, the random number generator is seeded
   20488 // by the given seed.  Otherwise, a random seed is used.
   20489 //	seed2: A second scalar seed to avoid seed collision.
   20490 //	count: A scalar representing the number of times the underlying dataset
   20491 // should be repeated. The default is `-1`, which results in infinite repetition.
   20492 //
   20493 //
   20494 func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   20495 	if scope.Err() != nil {
   20496 		return
   20497 	}
   20498 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20499 	opspec := tf.OpSpec{
   20500 		Type: "ShuffleAndRepeatDataset",
   20501 		Input: []tf.Input{
   20502 			input_dataset, buffer_size, seed, seed2, count,
   20503 		},
   20504 		Attrs: attrs,
   20505 	}
   20506 	op := scope.AddOperation(opspec)
   20507 	return op.Output(0)
   20508 }
   20509 
   20510 // Creates a dataset that caches elements from `input_dataset`.
   20511 //
   20512 // A CacheDataset will iterate over the input_dataset, and store tensors. If the
   20513 // cache already exists, the cache will be used. If the cache is inappropriate
   20514 // (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
   20515 // will the returned when used.
   20516 //
   20517 // Arguments:
   20518 //
   20519 //	filename: A path on the filesystem where we should cache the dataset. Note: this
   20520 // will be a directory.
   20521 //
   20522 //
   20523 func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   20524 	if scope.Err() != nil {
   20525 		return
   20526 	}
   20527 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20528 	opspec := tf.OpSpec{
   20529 		Type: "CacheDataset",
   20530 		Input: []tf.Input{
   20531 			input_dataset, filename,
   20532 		},
   20533 		Attrs: attrs,
   20534 	}
   20535 	op := scope.AddOperation(opspec)
   20536 	return op.Output(0)
   20537 }
   20538 
   20539 // PlaceholderAttr is an optional argument to Placeholder.
   20540 type PlaceholderAttr func(optionalAttr)
   20541 
   20542 // PlaceholderShape sets the optional shape attribute to value.
   20543 //
   20544 // value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the
   20545 // shape is unconstrained.
   20546 // If not specified, defaults to <unknown_rank:true >
   20547 func PlaceholderShape(value tf.Shape) PlaceholderAttr {
   20548 	return func(m optionalAttr) {
   20549 		m["shape"] = value
   20550 	}
   20551 }
   20552 
   20553 // A placeholder op for a value that will be fed into the computation.
   20554 //
   20555 // N.B. This operation will fail with an error if it is executed. It is
   20556 // intended as a way to represent a value that will always be fed, and to
   20557 // provide attrs that enable the fed value to be checked at runtime.
   20558 //
   20559 // Arguments:
   20560 //	dtype: The type of elements in the tensor.
   20561 //
   20562 // Returns A placeholder tensor that must be replaced using the feed mechanism.
   20563 func Placeholder(scope *Scope, dtype tf.DataType, optional ...PlaceholderAttr) (output tf.Output) {
   20564 	if scope.Err() != nil {
   20565 		return
   20566 	}
   20567 	attrs := map[string]interface{}{"dtype": dtype}
   20568 	for _, a := range optional {
   20569 		a(attrs)
   20570 	}
   20571 	opspec := tf.OpSpec{
   20572 		Type: "Placeholder",
   20573 
   20574 		Attrs: attrs,
   20575 	}
   20576 	op := scope.AddOperation(opspec)
   20577 	return op.Output(0)
   20578 }
   20579 
   20580 // Creates a dataset that executes a SQL query and emits rows of the result set.
   20581 //
   20582 // Arguments:
   20583 //	driver_name: The database type. Currently, the only supported type is 'sqlite'.
   20584 //	data_source_name: A connection string to connect to the database.
   20585 //	query: A SQL query to execute.
   20586 //
   20587 //
   20588 func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
   20589 	if scope.Err() != nil {
   20590 		return
   20591 	}
   20592 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20593 	opspec := tf.OpSpec{
   20594 		Type: "SqlDataset",
   20595 		Input: []tf.Input{
   20596 			driver_name, data_source_name, query,
   20597 		},
   20598 		Attrs: attrs,
   20599 	}
   20600 	op := scope.AddOperation(opspec)
   20601 	return op.Output(0)
   20602 }
   20603 
   20604 // Creates a dataset that emits the records from one or more binary files.
   20605 //
   20606 // Arguments:
   20607 //	filenames: A scalar or a vector containing the name(s) of the file(s) to be
   20608 // read.
   20609 //	header_bytes: A scalar representing the number of bytes to skip at the
   20610 // beginning of a file.
   20611 //	record_bytes: A scalar representing the number of bytes in each record.
   20612 //	footer_bytes: A scalar representing the number of bytes to skip at the end
   20613 // of a file.
   20614 //	buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
   20615 func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, record_bytes tf.Output, footer_bytes tf.Output, buffer_size tf.Output) (handle tf.Output) {
   20616 	if scope.Err() != nil {
   20617 		return
   20618 	}
   20619 	opspec := tf.OpSpec{
   20620 		Type: "FixedLengthRecordDataset",
   20621 		Input: []tf.Input{
   20622 			filenames, header_bytes, record_bytes, footer_bytes, buffer_size,
   20623 		},
   20624 	}
   20625 	op := scope.AddOperation(opspec)
   20626 	return op.Output(0)
   20627 }
   20628 
   20629 // Slice a `SparseTensor` based on the `start` and `size`.
   20630 //
   20631 // For example, if the input is
   20632 //
   20633 //     input_tensor = shape = [2, 7]
   20634 //     [    a   d e  ]
   20635 //     [b c          ]
   20636 //
   20637 // Graphically the output tensors are:
   20638 //
   20639 //     sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
   20640 //     [    a  ]
   20641 //     [b c    ]
   20642 //
   20643 //     sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
   20644 //     [ d e  ]
   20645 //     [      ]
   20646 //
   20647 // Arguments:
   20648 //	indices: 2-D tensor represents the indices of the sparse tensor.
   20649 //	values: 1-D tensor represents the values of the sparse tensor.
   20650 //	shape: 1-D. tensor represents the shape of the sparse tensor.
   20651 //	start: 1-D. tensor represents the start of the slice.
   20652 //	size: 1-D. tensor represents the size of the slice.
   20653 // output indices: A list of 1-D tensors represents the indices of the output
   20654 // sparse tensors.
   20655 //
   20656 // Returns A list of 1-D tensors represents the values of the output sparse
   20657 // tensors.A list of 1-D tensors represents the shape of the output sparse
   20658 // tensors.
   20659 func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, start tf.Output, size tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
   20660 	if scope.Err() != nil {
   20661 		return
   20662 	}
   20663 	opspec := tf.OpSpec{
   20664 		Type: "SparseSlice",
   20665 		Input: []tf.Input{
   20666 			indices, values, shape, start, size,
   20667 		},
   20668 	}
   20669 	op := scope.AddOperation(opspec)
   20670 	return op.Output(0), op.Output(1), op.Output(2)
   20671 }
   20672 
   20673 // Concatenates quantized tensors along one dimension.
   20674 //
   20675 // Arguments:
   20676 //	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
   20677 // range [0, rank(values)).
   20678 //	values: The `N` Tensors to concatenate. Their ranks and types must match,
   20679 // and their sizes must match in all dimensions except `concat_dim`.
   20680 //	input_mins: The minimum scalar values for each of the input tensors.
   20681 //	input_maxes: The maximum scalar values for each of the input tensors.
   20682 //
   20683 // Returns A `Tensor` with the concatenation of values stacked along the
   20684 // `concat_dim` dimension.  This tensor's shape matches that of `values` except
   20685 // in `concat_dim` where it has the sum of the sizes.The float value that the minimum quantized output value represents.The float value that the maximum quantized output value represents.
   20686 func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, input_maxes []tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
   20687 	if scope.Err() != nil {
   20688 		return
   20689 	}
   20690 	opspec := tf.OpSpec{
   20691 		Type: "QuantizedConcat",
   20692 		Input: []tf.Input{
   20693 			concat_dim, tf.OutputList(values), tf.OutputList(input_mins), tf.OutputList(input_maxes),
   20694 		},
   20695 	}
   20696 	op := scope.AddOperation(opspec)
   20697 	return op.Output(0), op.Output(1), op.Output(2)
   20698 }
   20699 
   20700 // Gradients for batch normalization.
   20701 //
   20702 // DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
   20703 //
   20704 // This op is deprecated. See `tf.nn.batch_normalization`.
   20705 //
   20706 // Arguments:
   20707 //	t: A 4D input Tensor.
   20708 //	m: A 1D mean Tensor with size matching the last dimension of t.
   20709 // This is the first output from tf.nn.moments,
   20710 // or a saved moving average thereof.
   20711 //	v: A 1D variance Tensor with size matching the last dimension of t.
   20712 // This is the second output from tf.nn.moments,
   20713 // or a saved moving average thereof.
   20714 //	gamma: A 1D gamma Tensor with size matching the last dimension of t.
   20715 // If "scale_after_normalization" is true, this Tensor will be multiplied
   20716 // with the normalized Tensor.
   20717 //	backprop: 4D backprop Tensor.
   20718 //	variance_epsilon: A small float number to avoid dividing by 0.
   20719 //	scale_after_normalization: A bool indicating whether the resulted tensor
   20720 // needs to be multiplied with gamma.
   20721 //
   20722 // Returns 4D backprop tensor for input.1D backprop tensor for mean.1D backprop tensor for variance.1D backprop tensor for beta.1D backprop tensor for gamma.
   20723 func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) {
   20724 	if scope.Err() != nil {
   20725 		return
   20726 	}
   20727 	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
   20728 	opspec := tf.OpSpec{
   20729 		Type: "BatchNormWithGlobalNormalizationGrad",
   20730 		Input: []tf.Input{
   20731 			t, m, v, gamma, backprop,
   20732 		},
   20733 		Attrs: attrs,
   20734 	}
   20735 	op := scope.AddOperation(opspec)
   20736 	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
   20737 }
   20738 
   20739 // Creates a dataset that emits the records from one or more TFRecord files.
   20740 //
   20741 // Arguments:
   20742 //	filenames: A scalar or vector containing the name(s) of the file(s) to be
   20743 // read.
   20744 //	compression_type: A scalar containing either (i) the empty string (no
   20745 // compression), (ii) "ZLIB", or (iii) "GZIP".
   20746 //	buffer_size: A scalar representing the number of bytes to buffer. A value of
   20747 // 0 means no buffering will be performed.
   20748 func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
   20749 	if scope.Err() != nil {
   20750 		return
   20751 	}
   20752 	opspec := tf.OpSpec{
   20753 		Type: "TFRecordDataset",
   20754 		Input: []tf.Input{
   20755 			filenames, compression_type, buffer_size,
   20756 		},
   20757 	}
   20758 	op := scope.AddOperation(opspec)
   20759 	return op.Output(0)
   20760 }
   20761 
   20762 // BatchToSpace for 4-D tensors of type T.
   20763 //
   20764 // This is a legacy version of the more general BatchToSpaceND.
   20765 //
   20766 // Rearranges (permutes) data from batch into blocks of spatial data, followed by
   20767 // cropping. This is the reverse transformation of SpaceToBatch. More specifically,
   20768 // this op outputs a copy of the input tensor where values from the `batch`
   20769 // dimension are moved in spatial blocks to the `height` and `width` dimensions,
   20770 // followed by cropping along the `height` and `width` dimensions.
   20771 //
   20772 // Arguments:
   20773 //	input: 4-D tensor with shape
   20774 // `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
   20775 //   depth]`. Note that the batch size of the input tensor must be divisible by
   20776 // `block_size * block_size`.
   20777 //	crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
   20778 // how many elements to crop from the intermediate result across the spatial
   20779 // dimensions as follows:
   20780 //
   20781 //     crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
   20782 //
   20783 //
   20784 // Returns 4-D with shape `[batch, height, width, depth]`, where:
   20785 //
   20786 //       height = height_pad - crop_top - crop_bottom
   20787 //       width = width_pad - crop_left - crop_right
   20788 //
   20789 // The attr `block_size` must be greater than one. It indicates the block size.
   20790 //
   20791 // Some examples:
   20792 //
   20793 // (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
   20794 //
   20795 // ```
   20796 // [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
   20797 // ```
   20798 //
   20799 // The output tensor has shape `[1, 2, 2, 1]` and value:
   20800 //
   20801 // ```
   20802 // x = [[[[1], [2]], [[3], [4]]]]
   20803 // ```
   20804 //
   20805 // (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
   20806 //
   20807 // ```
   20808 // [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
   20809 // ```
   20810 //
   20811 // The output tensor has shape `[1, 2, 2, 3]` and value:
   20812 //
   20813 // ```
   20814 // x = [[[[1, 2, 3], [4, 5, 6]],
   20815 //       [[7, 8, 9], [10, 11, 12]]]]
   20816 // ```
   20817 //
   20818 // (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
   20819 //
   20820 // ```
   20821 // x = [[[[1], [3]], [[9], [11]]],
   20822 //      [[[2], [4]], [[10], [12]]],
   20823 //      [[[5], [7]], [[13], [15]]],
   20824 //      [[[6], [8]], [[14], [16]]]]
   20825 // ```
   20826 //
   20827 // The output tensor has shape `[1, 4, 4, 1]` and value:
   20828 //
   20829 // ```
   20830 // x = [[[1],   [2],  [3],  [4]],
   20831 //      [[5],   [6],  [7],  [8]],
   20832 //      [[9],  [10], [11],  [12]],
   20833 //      [[13], [14], [15],  [16]]]
   20834 // ```
   20835 //
   20836 // (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
   20837 //
   20838 // ```
   20839 // x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
   20840 //      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
   20841 // ```
   20842 //
   20843 // The output tensor has shape `[2, 2, 4, 1]` and value:
   20844 //
   20845 // ```
   20846 // x = [[[[1], [3]], [[5], [7]]],
   20847 //      [[[2], [4]], [[10], [12]]],
   20848 //      [[[5], [7]], [[13], [15]]],
   20849 //      [[[6], [8]], [[14], [16]]]]
   20850 // ```
   20851 func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output) {
   20852 	if scope.Err() != nil {
   20853 		return
   20854 	}
   20855 	attrs := map[string]interface{}{"block_size": block_size}
   20856 	opspec := tf.OpSpec{
   20857 		Type: "BatchToSpace",
   20858 		Input: []tf.Input{
   20859 			input, crops,
   20860 		},
   20861 		Attrs: attrs,
   20862 	}
   20863 	op := scope.AddOperation(opspec)
   20864 	return op.Output(0)
   20865 }
   20866 
   20867 // Makes a new iterator from the given `dataset` and stores it in `iterator`.
   20868 //
   20869 // This operation may be executed multiple times. Each execution will reset the
   20870 // iterator in `iterator` to the first element of `dataset`.
   20871 //
   20872 // Returns the created operation.
   20873 func MakeIterator(scope *Scope, dataset tf.Output, iterator tf.Output) (o *tf.Operation) {
   20874 	if scope.Err() != nil {
   20875 		return
   20876 	}
   20877 	opspec := tf.OpSpec{
   20878 		Type: "MakeIterator",
   20879 		Input: []tf.Input{
   20880 			dataset, iterator,
   20881 		},
   20882 	}
   20883 	return scope.AddOperation(opspec)
   20884 }
   20885 
   20886 // Adjust the contrast of one or more images.
   20887 //
   20888 // `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
   20889 // interpreted as `[height, width, channels]`.  The other dimensions only
   20890 // represent a collection of images, such as `[batch, height, width, channels].`
   20891 //
   20892 // Contrast is adjusted independently for each channel of each image.
   20893 //
   20894 // For each channel, the Op first computes the mean of the image pixels in the
   20895 // channel and then adjusts each component of each pixel to
   20896 // `(x - mean) * contrast_factor + mean`.
   20897 //
   20898 // Arguments:
   20899 //	images: Images to adjust.  At least 3-D.
   20900 //	contrast_factor: A float multiplier for adjusting contrast.
   20901 //
   20902 // Returns The contrast-adjusted image or images.
   20903 func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) {
   20904 	if scope.Err() != nil {
   20905 		return
   20906 	}
   20907 	opspec := tf.OpSpec{
   20908 		Type: "AdjustContrastv2",
   20909 		Input: []tf.Input{
   20910 			images, contrast_factor,
   20911 		},
   20912 	}
   20913 	op := scope.AddOperation(opspec)
   20914 	return op.Output(0)
   20915 }
   20916 
   20917 // Gets the next output from the given iterator.
   20918 func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
   20919 	if scope.Err() != nil {
   20920 		return
   20921 	}
   20922 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20923 	opspec := tf.OpSpec{
   20924 		Type: "IteratorGetNext",
   20925 		Input: []tf.Input{
   20926 			iterator,
   20927 		},
   20928 		Attrs: attrs,
   20929 	}
   20930 	op := scope.AddOperation(opspec)
   20931 	if scope.Err() != nil {
   20932 		return
   20933 	}
   20934 	var idx int
   20935 	var err error
   20936 	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
   20937 		scope.UpdateErr("IteratorGetNext", err)
   20938 		return
   20939 	}
   20940 	return components
   20941 }
   20942 
   20943 // Outputs the single element from the given dataset.
   20944 //
   20945 // Arguments:
   20946 //	dataset: A handle to a dataset that contains a single element.
   20947 //
   20948 //
   20949 //
   20950 // Returns The components of the single element of `input`.
   20951 func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
   20952 	if scope.Err() != nil {
   20953 		return
   20954 	}
   20955 	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
   20956 	opspec := tf.OpSpec{
   20957 		Type: "DatasetToSingleElement",
   20958 		Input: []tf.Input{
   20959 			dataset,
   20960 		},
   20961 		Attrs: attrs,
   20962 	}
   20963 	op := scope.AddOperation(opspec)
   20964 	if scope.Err() != nil {
   20965 		return
   20966 	}
   20967 	var idx int
   20968 	var err error
   20969 	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
   20970 		scope.UpdateErr("DatasetToSingleElement", err)
   20971 		return
   20972 	}
   20973 	return components
   20974 }
   20975 
   20976 // Converts the given `resource_handle` representing an iterator to a string.
   20977 //
   20978 // Arguments:
   20979 //	resource_handle: A handle to an iterator resource.
   20980 //
   20981 // Returns A string representation of the given handle.
   20982 func IteratorToStringHandle(scope *Scope, resource_handle tf.Output) (string_handle tf.Output) {
   20983 	if scope.Err() != nil {
   20984 		return
   20985 	}
   20986 	opspec := tf.OpSpec{
   20987 		Type: "IteratorToStringHandle",
   20988 		Input: []tf.Input{
   20989 			resource_handle,
   20990 		},
   20991 	}
   20992 	op := scope.AddOperation(opspec)
   20993 	return op.Output(0)
   20994 }
   20995 
   20996 // ShapeNAttr is an optional argument to ShapeN.
   20997 type ShapeNAttr func(optionalAttr)
   20998 
   20999 // ShapeNOutType sets the optional out_type attribute to value.
   21000 // If not specified, defaults to DT_INT32
   21001 func ShapeNOutType(value tf.DataType) ShapeNAttr {
   21002 	return func(m optionalAttr) {
   21003 		m["out_type"] = value
   21004 	}
   21005 }
   21006 
   21007 // Returns shape of tensors.
   21008 //
   21009 // This operation returns N 1-D integer tensors representing shape of `input[i]s`.
   21010 func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []tf.Output) {
   21011 	if scope.Err() != nil {
   21012 		return
   21013 	}
   21014 	attrs := map[string]interface{}{}
   21015 	for _, a := range optional {
   21016 		a(attrs)
   21017 	}
   21018 	opspec := tf.OpSpec{
   21019 		Type: "ShapeN",
   21020 		Input: []tf.Input{
   21021 			tf.OutputList(input),
   21022 		},
   21023 		Attrs: attrs,
   21024 	}
   21025 	op := scope.AddOperation(opspec)
   21026 	if scope.Err() != nil {
   21027 		return
   21028 	}
   21029 	var idx int
   21030 	var err error
   21031 	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
   21032 		scope.UpdateErr("ShapeN", err)
   21033 		return
   21034 	}
   21035 	return output
   21036 }
   21037 
   21038 // IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle.
   21039 type IteratorFromStringHandleAttr func(optionalAttr)
   21040 
   21041 // IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
   21042 //
   21043 // value: If specified, defines the type of each tuple component in an
   21044 // element produced by the resulting iterator.
   21045 // If not specified, defaults to <>
   21046 //
   21047 // REQUIRES: len(value) >= 0
   21048 func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr {
   21049 	return func(m optionalAttr) {
   21050 		m["output_types"] = value
   21051 	}
   21052 }
   21053 
   21054 // IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
   21055 //
   21056 // value: If specified, defines the shape of each tuple component in an
   21057 // element produced by the resulting iterator.
   21058 // If not specified, defaults to <>
   21059 //
   21060 // REQUIRES: len(value) >= 0
   21061 func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr {
   21062 	return func(m optionalAttr) {
   21063 		m["output_shapes"] = value
   21064 	}
   21065 }
   21066 
   21067 // Converts the given string representing a handle to an iterator to a resource.
   21068 //
   21069 // Arguments:
   21070 //	string_handle: A string representation of the given handle.
   21071 //
   21072 // Returns A handle to an iterator resource.
   21073 func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) {
   21074 	if scope.Err() != nil {
   21075 		return
   21076 	}
   21077 	attrs := map[string]interface{}{}
   21078 	for _, a := range optional {
   21079 		a(attrs)
   21080 	}
   21081 	opspec := tf.OpSpec{
   21082 		Type: "IteratorFromStringHandle",
   21083 		Input: []tf.Input{
   21084 			string_handle,
   21085 		},
   21086 		Attrs: attrs,
   21087 	}
   21088 	op := scope.AddOperation(opspec)
   21089 	return op.Output(0)
   21090 }
   21091 
   21092 // Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
   21093 //
   21094 // This is the angle \( \theta \in [-\pi, \pi] \) such that
   21095 // \[ x = r \cos(\theta) \]
   21096 // and
   21097 // \[ y = r \sin(\theta) \]
   21098 // where \(r = \sqrt(x^2 + y^2) \).
   21099 func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output) {
   21100 	if scope.Err() != nil {
   21101 		return
   21102 	}
   21103 	opspec := tf.OpSpec{
   21104 		Type: "Atan2",
   21105 		Input: []tf.Input{
   21106 			y, x,
   21107 		},
   21108 	}
   21109 	op := scope.AddOperation(opspec)
   21110 	return op.Output(0)
   21111 }
   21112 
   21113 // Return a tensor with the same shape and contents as the input tensor or value.
   21114 func Identity(scope *Scope, input tf.Output) (output tf.Output) {
   21115 	if scope.Err() != nil {
   21116 		return
   21117 	}
   21118 	opspec := tf.OpSpec{
   21119 		Type: "Identity",
   21120 		Input: []tf.Input{
   21121 			input,
   21122 		},
   21123 	}
   21124 	op := scope.AddOperation(opspec)
   21125 	return op.Output(0)
   21126 }
   21127 
   21128 // Gather slices from `params` axis `axis` according to `indices`.
   21129 //
   21130 // `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
   21131 // Produces an output tensor with shape `params.shape[:axis] + indices.shape +
   21132 // params.shape[axis + 1:]` where:
   21133 //
   21134 // ```python
   21135 //     # Scalar indices (output is rank(params) - 1).
   21136 //     output[a_0, ..., a_n, b_0, ..., b_n] =
   21137 //       params[a_0, ..., a_n, indices, b_0, ..., b_n]
   21138 //
   21139 //     # Vector indices (output is rank(params)).
   21140 //     output[a_0, ..., a_n, i, b_0, ..., b_n] =
   21141 //       params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
   21142 //
   21143 //     # Higher rank indices (output is rank(params) + rank(indices) - 1).
   21144 //     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
   21145 //       params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
   21146 // ```
   21147 //
   21148 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   21149 // <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
   21150 // </div>
   21151 //
   21152 // Arguments:
   21153 //	params: The tensor from which to gather values. Must be at least rank
   21154 // `axis + 1`.
   21155 //	indices: Index tensor. Must be in range `[0, params.shape[axis])`.
   21156 //	axis: The axis in `params` to gather `indices` from. Defaults to the first
   21157 // dimension. Supports negative indexes.
   21158 //
   21159 // Returns Values from `params` gathered from indices given by `indices`, with
   21160 // shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
   21161 func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output) (output tf.Output) {
   21162 	if scope.Err() != nil {
   21163 		return
   21164 	}
   21165 	opspec := tf.OpSpec{
   21166 		Type: "GatherV2",
   21167 		Input: []tf.Input{
   21168 			params, indices, axis,
   21169 		},
   21170 	}
   21171 	op := scope.AddOperation(opspec)
   21172 	return op.Output(0)
   21173 }
   21174 
   21175 // Converts the given `resource_handle` representing an iterator to a variant tensor.
   21176 //
   21177 // Arguments:
   21178 //	resource_handle: A handle to an iterator resource.
   21179 //
   21180 // Returns A variant tensor storing the state of the iterator contained in the
   21181 // resource.
   21182 func SerializeIterator(scope *Scope, resource_handle tf.Output) (serialized tf.Output) {
   21183 	if scope.Err() != nil {
   21184 		return
   21185 	}
   21186 	opspec := tf.OpSpec{
   21187 		Type: "SerializeIterator",
   21188 		Input: []tf.Input{
   21189 			resource_handle,
   21190 		},
   21191 	}
   21192 	op := scope.AddOperation(opspec)
   21193 	return op.Output(0)
   21194 }
   21195 
   21196 // FIFOQueueV2Attr is an optional argument to FIFOQueueV2.
   21197 type FIFOQueueV2Attr func(optionalAttr)
   21198 
   21199 // FIFOQueueV2Shapes sets the optional shapes attribute to value.
   21200 //
   21201 // value: The shape of each component in a value. The length of this attr must
   21202 // be either 0 or the same as the length of component_types. If the length of
   21203 // this attr is 0, the shapes of queue elements are not constrained, and
   21204 // only one element may be dequeued at a time.
   21205 // If not specified, defaults to <>
   21206 //
   21207 // REQUIRES: len(value) >= 0
   21208 func FIFOQueueV2Shapes(value []tf.Shape) FIFOQueueV2Attr {
   21209 	return func(m optionalAttr) {
   21210 		m["shapes"] = value
   21211 	}
   21212 }
   21213 
   21214 // FIFOQueueV2Capacity sets the optional capacity attribute to value.
   21215 //
   21216 // value: The upper bound on the number of elements in this queue.
   21217 // Negative numbers mean no limit.
   21218 // If not specified, defaults to -1
   21219 func FIFOQueueV2Capacity(value int64) FIFOQueueV2Attr {
   21220 	return func(m optionalAttr) {
   21221 		m["capacity"] = value
   21222 	}
   21223 }
   21224 
   21225 // FIFOQueueV2Container sets the optional container attribute to value.
   21226 //
   21227 // value: If non-empty, this queue is placed in the given container.
   21228 // Otherwise, a default container is used.
   21229 // If not specified, defaults to ""
   21230 func FIFOQueueV2Container(value string) FIFOQueueV2Attr {
   21231 	return func(m optionalAttr) {
   21232 		m["container"] = value
   21233 	}
   21234 }
   21235 
   21236 // FIFOQueueV2SharedName sets the optional shared_name attribute to value.
   21237 //
   21238 // value: If non-empty, this queue will be shared under the given name
   21239 // across multiple sessions.
   21240 // If not specified, defaults to ""
   21241 func FIFOQueueV2SharedName(value string) FIFOQueueV2Attr {
   21242 	return func(m optionalAttr) {
   21243 		m["shared_name"] = value
   21244 	}
   21245 }
   21246 
   21247 // A queue that produces elements in first-in first-out order.
   21248 //
   21249 // Arguments:
   21250 //	component_types: The type of each component in a value.
   21251 //
   21252 // Returns The handle to the queue.
   21253 func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output) {
   21254 	if scope.Err() != nil {
   21255 		return
   21256 	}
   21257 	attrs := map[string]interface{}{"component_types": component_types}
   21258 	for _, a := range optional {
   21259 		a(attrs)
   21260 	}
   21261 	opspec := tf.OpSpec{
   21262 		Type: "FIFOQueueV2",
   21263 
   21264 		Attrs: attrs,
   21265 	}
   21266 	op := scope.AddOperation(opspec)
   21267 	return op.Output(0)
   21268 }
   21269 
   21270 // Produces a summary of any statistics recorded by the given statistics manager.
   21271 func StatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
   21272 	if scope.Err() != nil {
   21273 		return
   21274 	}
   21275 	opspec := tf.OpSpec{
   21276 		Type: "StatsAggregatorSummary",
   21277 		Input: []tf.Input{
   21278 			iterator,
   21279 		},
   21280 	}
   21281 	op := scope.AddOperation(opspec)
   21282 	return op.Output(0)
   21283 }
   21284 
   21285 // Compute the pairwise cross product.
   21286 //
   21287 // `a` and `b` must be the same shape; they can either be simple 3-element vectors,
   21288 // or any shape where the innermost dimension is 3. In the latter case, each pair
   21289 // of corresponding 3-element vectors is cross-multiplied independently.
   21290 //
   21291 // Arguments:
   21292 //	a: A tensor containing 3-element vectors.
   21293 //	b: Another tensor, of same type and shape as `a`.
   21294 //
   21295 // Returns Pairwise cross product of the vectors in `a` and `b`.
   21296 func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
   21297 	if scope.Err() != nil {
   21298 		return
   21299 	}
   21300 	opspec := tf.OpSpec{
   21301 		Type: "Cross",
   21302 		Input: []tf.Input{
   21303 			a, b,
   21304 		},
   21305 	}
   21306 	op := scope.AddOperation(opspec)
   21307 	return op.Output(0)
   21308 }
   21309 
   21310 // Performs a padding as a preprocess during a convolution.
   21311 //
   21312 // Similar to FusedResizeAndPadConv2d, this op allows for an optimized
   21313 // implementation where the spatial padding transformation stage is fused with the
   21314 // im2col lookup, but in this case without the bilinear filtering required for
   21315 // resizing. Fusing the padding prevents the need to write out the intermediate
   21316 // results as whole tensors, reducing memory pressure, and we can get some latency
   21317 // gains by merging the transformation calculations.
   21318 // The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
   21319 // order is used instead.
   21320 // Internally this op uses a single per-graph scratch buffer, which means that it
   21321 // will block if multiple versions are being run in parallel. This is because this
   21322 // operator is primarily an optimization to minimize memory usage.
   21323 //
   21324 // Arguments:
   21325 //	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
   21326 //	paddings: A two-column matrix specifying the padding sizes. The number of
   21327 // rows must be the same as the rank of `input`.
   21328 //	filter: 4-D with shape
   21329 // `[filter_height, filter_width, in_channels, out_channels]`.
   21330 //
   21331 //	strides: 1-D of length 4.  The stride of the sliding window for each dimension
   21332 // of `input`. Must be in the same order as the dimension specified with format.
   21333 //	padding: The type of padding algorithm to use.
   21334 func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string) (output tf.Output) {
   21335 	if scope.Err() != nil {
   21336 		return
   21337 	}
   21338 	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
   21339 	opspec := tf.OpSpec{
   21340 		Type: "FusedPadConv2D",
   21341 		Input: []tf.Input{
   21342 			input, paddings, filter,
   21343 		},
   21344 		Attrs: attrs,
   21345 	}
   21346 	op := scope.AddOperation(opspec)
   21347 	return op.Output(0)
   21348 }
   21349 
   21350 // Conv2DBackpropInputAttr is an optional argument to Conv2DBackpropInput.
   21351 type Conv2DBackpropInputAttr func(optionalAttr)
   21352 
   21353 // Conv2DBackpropInputUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
   21354 // If not specified, defaults to true
   21355 func Conv2DBackpropInputUseCudnnOnGpu(value bool) Conv2DBackpropInputAttr {
   21356 	return func(m optionalAttr) {
   21357 		m["use_cudnn_on_gpu"] = value
   21358 	}
   21359 }
   21360 
   21361 // Conv2DBackpropInputDataFormat sets the optional data_format attribute to value.
   21362 //
   21363 // value: Specify the data format of the input and output data. With the
   21364 // default format "NHWC", the data is stored in the order of:
   21365 //     [batch, in_height, in_width, in_channels].
   21366 // Alternatively, the format could be "NCHW", the data storage order of:
   21367 //     [batch, in_channels, in_height, in_width].
   21368 // If not specified, defaults to "NHWC"
   21369 func Conv2DBackpropInputDataFormat(value string) Conv2DBackpropInputAttr {
   21370 	return func(m optionalAttr) {
   21371 		m["data_format"] = value
   21372 	}
   21373 }
   21374 
   21375 // Conv2DBackpropInputDilations sets the optional dilations attribute to value.
   21376 //
   21377 // value: 1-D tensor of length 4.  The dilation factor for each dimension of
   21378 // `input`. If set to k > 1, there will be k-1 skipped cells between each filter
   21379 // element on that dimension. The dimension order is determined by the value of
   21380 // `data_format`, see above for details. Dilations in the batch and depth
   21381 // dimensions must be 1.
   21382 // If not specified, defaults to <i:1 i:1 i:1 i:1 >
   21383 func Conv2DBackpropInputDilations(value []int64) Conv2DBackpropInputAttr {
   21384 	return func(m optionalAttr) {
   21385 		m["dilations"] = value
   21386 	}
   21387 }
   21388 
   21389 // Computes the gradients of convolution with respect to the input.
   21390 //
   21391 // Arguments:
   21392 //	input_sizes: An integer vector representing the shape of `input`,
   21393 // where `input` is a 4-D `[batch, height, width, channels]` tensor.
   21394 //	filter: 4-D with shape
   21395 // `[filter_height, filter_width, in_channels, out_channels]`.
   21396 //	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
   21397 // Gradients w.r.t. the output of the convolution.
   21398 //	strides: The stride of the sliding window for each dimension of the input
   21399 // of the convolution. Must be in the same order as the dimension specified with
   21400 // format.
   21401 //	padding: The type of padding algorithm to use.
   21402 //
   21403 // Returns 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
   21404 // w.r.t. the input of the convolution.
   21405 func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputAttr) (output tf.Output) {
   21406 	if scope.Err() != nil {
   21407 		return
   21408 	}
   21409 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   21410 	for _, a := range optional {
   21411 		a(attrs)
   21412 	}
   21413 	opspec := tf.OpSpec{
   21414 		Type: "Conv2DBackpropInput",
   21415 		Input: []tf.Input{
   21416 			input_sizes, filter, out_backprop,
   21417 		},
   21418 		Attrs: attrs,
   21419 	}
   21420 	op := scope.AddOperation(opspec)
   21421 	return op.Output(0)
   21422 }
   21423 
   21424 // Interleave the values from the `data` tensors into a single tensor.
   21425 //
   21426 // Builds a merged tensor such that
   21427 //
   21428 // ```python
   21429 //     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
   21430 // ```
   21431 //
   21432 // For example, if each `indices[m]` is scalar or vector, we have
   21433 //
   21434 // ```python
   21435 //     # Scalar indices:
   21436 //     merged[indices[m], ...] = data[m][...]
   21437 //
   21438 //     # Vector indices:
   21439 //     merged[indices[m][i], ...] = data[m][i, ...]
   21440 // ```
   21441 //
   21442 // Each `data[i].shape` must start with the corresponding `indices[i].shape`,
   21443 // and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
   21444 // must have `data[i].shape = indices[i].shape + constant`.  In terms of this
   21445 // `constant`, the output shape is
   21446 //
   21447 //     merged.shape = [max(indices)] + constant
   21448 //
   21449 // Values are merged in order, so if an index appears in both `indices[m][i]` and
   21450 // `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
   21451 // merged result. If you do not need this guarantee, ParallelDynamicStitch might
   21452 // perform better on some devices.
   21453 //
   21454 // For example:
   21455 //
   21456 // ```python
   21457 //     indices[0] = 6
   21458 //     indices[1] = [4, 1]
   21459 //     indices[2] = [[5, 2], [0, 3]]
   21460 //     data[0] = [61, 62]
   21461 //     data[1] = [[41, 42], [11, 12]]
   21462 //     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
   21463 //     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
   21464 //               [51, 52], [61, 62]]
   21465 // ```
   21466 //
   21467 // This method can be used to merge partitions created by `dynamic_partition`
   21468 // as illustrated on the following example:
   21469 //
   21470 // ```python
   21471 //     # Apply function (increments x_i) on elements for which a certain condition
   21472 //     # apply (x_i != -1 in this example).
   21473 //     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
   21474 //     condition_mask=tf.not_equal(x,tf.constant(-1.))
   21475 //     partitioned_data = tf.dynamic_partition(
   21476 //         x, tf.cast(condition_mask, tf.int32) , 2)
   21477 //     partitioned_data[1] = partitioned_data[1] + 1.0
   21478 //     condition_indices = tf.dynamic_partition(
   21479 //         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
   21480 //     x = tf.dynamic_stitch(condition_indices, partitioned_data)
   21481 //     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
   21482 //     # unchanged.
   21483 // ```
   21484 //
   21485 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   21486 // <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
   21487 // </div>
   21488 func DynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
   21489 	if scope.Err() != nil {
   21490 		return
   21491 	}
   21492 	opspec := tf.OpSpec{
   21493 		Type: "DynamicStitch",
   21494 		Input: []tf.Input{
   21495 			tf.OutputList(indices), tf.OutputList(data),
   21496 		},
   21497 	}
   21498 	op := scope.AddOperation(opspec)
   21499 	return op.Output(0)
   21500 }
   21501 
   21502 // Returns the truth value of (x == y) element-wise.
   21503 //
   21504 // *NOTE*: `Equal` supports broadcasting. More about broadcasting
   21505 // [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
   21506 func Equal(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   21507 	if scope.Err() != nil {
   21508 		return
   21509 	}
   21510 	opspec := tf.OpSpec{
   21511 		Type: "Equal",
   21512 		Input: []tf.Input{
   21513 			x, y,
   21514 		},
   21515 	}
   21516 	op := scope.AddOperation(opspec)
   21517 	return op.Output(0)
   21518 }
   21519 
   21520 // TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2.
   21521 type TensorArrayGatherV2Attr func(optionalAttr)
   21522 
   21523 // TensorArrayGatherV2ElementShape sets the optional element_shape attribute to value.
   21524 // If not specified, defaults to <unknown_rank:true >
   21525 func TensorArrayGatherV2ElementShape(value tf.Shape) TensorArrayGatherV2Attr {
   21526 	return func(m optionalAttr) {
   21527 		m["element_shape"] = value
   21528 	}
   21529 }
   21530 
   21531 // Deprecated. Use TensorArrayGatherV3
   21532 //
   21533 // DEPRECATED at GraphDef version 26: Use TensorArrayGatherV3
   21534 func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV2Attr) (value tf.Output) {
   21535 	if scope.Err() != nil {
   21536 		return
   21537 	}
   21538 	attrs := map[string]interface{}{"dtype": dtype}
   21539 	for _, a := range optional {
   21540 		a(attrs)
   21541 	}
   21542 	opspec := tf.OpSpec{
   21543 		Type: "TensorArrayGatherV2",
   21544 		Input: []tf.Input{
   21545 			handle, indices, flow_in,
   21546 		},
   21547 		Attrs: attrs,
   21548 	}
   21549 	op := scope.AddOperation(opspec)
   21550 	return op.Output(0)
   21551 }
   21552 
   21553 // Interleave the values from the `data` tensors into a single tensor.
   21554 //
   21555 // Builds a merged tensor such that
   21556 //
   21557 // ```python
   21558 //     merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
   21559 // ```
   21560 //
   21561 // For example, if each `indices[m]` is scalar or vector, we have
   21562 //
   21563 // ```python
   21564 //     # Scalar indices:
   21565 //     merged[indices[m], ...] = data[m][...]
   21566 //
   21567 //     # Vector indices:
   21568 //     merged[indices[m][i], ...] = data[m][i, ...]
   21569 // ```
   21570 //
   21571 // Each `data[i].shape` must start with the corresponding `indices[i].shape`,
   21572 // and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
   21573 // must have `data[i].shape = indices[i].shape + constant`.  In terms of this
   21574 // `constant`, the output shape is
   21575 //
   21576 //     merged.shape = [max(indices)] + constant
   21577 //
   21578 // Values may be merged in parallel, so if an index appears in both `indices[m][i]`
   21579 // and `indices[n][j]`, the result may be invalid. This differs from the normal
   21580 // DynamicStitch operator that defines the behavior in that case.
   21581 //
   21582 // For example:
   21583 //
   21584 // ```python
   21585 //     indices[0] = 6
   21586 //     indices[1] = [4, 1]
   21587 //     indices[2] = [[5, 2], [0, 3]]
   21588 //     data[0] = [61, 62]
   21589 //     data[1] = [[41, 42], [11, 12]]
   21590 //     data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
   21591 //     merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
   21592 //               [51, 52], [61, 62]]
   21593 // ```
   21594 //
   21595 // This method can be used to merge partitions created by `dynamic_partition`
   21596 // as illustrated on the following example:
   21597 //
   21598 // ```python
   21599 //     # Apply function (increments x_i) on elements for which a certain condition
   21600 //     # apply (x_i != -1 in this example).
   21601 //     x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
   21602 //     condition_mask=tf.not_equal(x,tf.constant(-1.))
   21603 //     partitioned_data = tf.dynamic_partition(
   21604 //         x, tf.cast(condition_mask, tf.int32) , 2)
   21605 //     partitioned_data[1] = partitioned_data[1] + 1.0
   21606 //     condition_indices = tf.dynamic_partition(
   21607 //         tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
   21608 //     x = tf.dynamic_stitch(condition_indices, partitioned_data)
   21609 //     # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
   21610 //     # unchanged.
   21611 // ```
   21612 //
   21613 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   21614 // <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
   21615 // </div>
   21616 func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
   21617 	if scope.Err() != nil {
   21618 		return
   21619 	}
   21620 	opspec := tf.OpSpec{
   21621 		Type: "ParallelDynamicStitch",
   21622 		Input: []tf.Input{
   21623 			tf.OutputList(indices), tf.OutputList(data),
   21624 		},
   21625 	}
   21626 	op := scope.AddOperation(opspec)
   21627 	return op.Output(0)
   21628 }
   21629 
   21630 // Computes the gradient for the inverse of `x` wrt its input.
   21631 //
   21632 // Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
   21633 // is the corresponding input gradient.
   21634 func InvGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
   21635 	if scope.Err() != nil {
   21636 		return
   21637 	}
   21638 	opspec := tf.OpSpec{
   21639 		Type: "InvGrad",
   21640 		Input: []tf.Input{
   21641 			y, dy,
   21642 		},
   21643 	}
   21644 	op := scope.AddOperation(opspec)
   21645 	return op.Output(0)
   21646 }
   21647 
   21648 // StridedSliceAttr is an optional argument to StridedSlice.
   21649 type StridedSliceAttr func(optionalAttr)
   21650 
   21651 // StridedSliceBeginMask sets the optional begin_mask attribute to value.
   21652 //
   21653 // value: a bitmask where a bit i being 1 means to ignore the begin
   21654 // value and instead use the largest interval possible. At runtime
   21655 // begin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or
   21656 // `[-1, n-1]` if `stride[i] < 0`
   21657 // If not specified, defaults to 0
   21658 func StridedSliceBeginMask(value int64) StridedSliceAttr {
   21659 	return func(m optionalAttr) {
   21660 		m["begin_mask"] = value
   21661 	}
   21662 }
   21663 
   21664 // StridedSliceEndMask sets the optional end_mask attribute to value.
   21665 //
   21666 // value: analogous to `begin_mask`
   21667 // If not specified, defaults to 0
   21668 func StridedSliceEndMask(value int64) StridedSliceAttr {
   21669 	return func(m optionalAttr) {
   21670 		m["end_mask"] = value
   21671 	}
   21672 }
   21673 
   21674 // StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
   21675 //
   21676 // value: a bitmask where bit `i` being 1 means the `i`th
   21677 // position is actually an ellipsis. One bit at most can be 1.
   21678 // If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
   21679 // is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
   21680 // implicitly creates as many range specifications as necessary to fully
   21681 // specify the sliced range for every dimension. For example for a 4-dimensional
   21682 // tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
   21683 // If not specified, defaults to 0
   21684 func StridedSliceEllipsisMask(value int64) StridedSliceAttr {
   21685 	return func(m optionalAttr) {
   21686 		m["ellipsis_mask"] = value
   21687 	}
   21688 }
   21689 
   21690 // StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
   21691 //
   21692 // value: a bitmask where bit `i` being 1 means the `i`th
   21693 // specification creates a new shape 1 dimension. For example
   21694 // `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
   21695 // If not specified, defaults to 0
   21696 func StridedSliceNewAxisMask(value int64) StridedSliceAttr {
   21697 	return func(m optionalAttr) {
   21698 		m["new_axis_mask"] = value
   21699 	}
   21700 }
   21701 
   21702 // StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
   21703 //
   21704 // value: a bitmask where bit `i` implies that the `i`th
   21705 // specification should shrink the dimensionality. begin and end
   21706 // must imply a slice of size 1 in the dimension. For example in
   21707 // python one might do `foo[:, 3, :]` which would result in
   21708 // `shrink_axis_mask` being 2.
   21709 // If not specified, defaults to 0
   21710 func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr {
   21711 	return func(m optionalAttr) {
   21712 		m["shrink_axis_mask"] = value
   21713 	}
   21714 }
   21715 
   21716 // Return a strided slice from `input`.
   21717 //
   21718 // Note, most python users will want to use the Python `Tensor.__getitem__`
   21719 // or `Variable.__getitem__` rather than this op directly.
   21720 //
   21721 // The goal of this op is to produce a new tensor with a subset of
   21722 // the elements from the `n` dimensional `input` tensor. The subset is chosen using
   21723 // a sequence of `m` sparse range specifications encoded into the arguments
   21724 // of this function. Note, in some cases
   21725 // `m` could be equal to `n`, but this need not be the case. Each
   21726 // range specification entry can be one of the following:
   21727 //
   21728 // - An ellipsis (...). Ellipses are used to imply zero or more
   21729 //   dimensions of full-dimension selection and are produced using
   21730 //   `ellipsis_mask`. For example, `foo[...]` is the identity slice.
   21731 //
   21732 // - A new axis. This is used to insert a new shape=1 dimension and is
   21733 //   produced using `new_axis_mask`. For example, `foo[:, ...]` where
   21734 //   `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
   21735 //
   21736 //
   21737 // - A range `begin:end:stride`. This is used to specify how much to choose from
   21738 //   a given dimension. `stride` can be any integer but 0.  `begin` is an integer
   21739 //   which represents the index of the first value to select while `end` represents
   21740 //   the index of the last value to select. The number of values selected in each
   21741 //   dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
   21742 //   `begin` and `end` can be negative where `-1` is the last element, `-2` is
   21743 //   the second to last. `begin_mask` controls whether to replace the explicitly
   21744 //   given `begin` with an implicit effective value of `0` if `stride > 0` and
   21745 //   `-1` if `stride < 0`. `end_mask` is analogous but produces the number
   21746 //   required to create the largest open interval. For example, given a shape
   21747 //   `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
   21748 //   not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
   21749 //   and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
   21750 //   first dimension of a tensor while dropping the last two (in the original
   21751 //   order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
   21752 //
   21753 // - A single index. This is used to keep only elements that have a given
   21754 //   index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
   21755 //   shape `(6,)` tensor. This is encoded in `begin` and `end` and
   21756 //   `shrink_axis_mask`.
   21757 //
   21758 // Each conceptual range specification is encoded in the op's argument. This
   21759 // encoding is best understand by considering a non-trivial example. In
   21760 // particular,
   21761 // `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
   21762 //
   21763 // ```
   21764 // begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
   21765 // end = [2, 4, x, x, -3, x]
   21766 // strides = [1, 1, x, x, -1, 1]
   21767 // begin_mask = 1<<4 | 1 << 5 = 48
   21768 // end_mask = 1<<5 = 32
   21769 // ellipsis_mask = 1<<3 = 8
   21770 // new_axis_mask = 1<<2 4
   21771 // shrink_axis_mask = 1<<0
   21772 // ```
   21773 //
   21774 // In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
   21775 // the slice becomes (2, 1, 5, 5, 2, 5).
   21776 // Let us walk step by step through each argument specification.
   21777 //
   21778 // 1.  The first argument in the example slice is turned into `begin = 1` and
   21779 // `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
   21780 // also set the appropriate bit in `shrink_axis_mask`.
   21781 //
   21782 // 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
   21783 // zero bits contributed.
   21784 //
   21785 // 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
   21786 // dimension in the final shape. Dummy values are contributed to begin,
   21787 // end and stride, while the new_axis_mask bit is set.
   21788 //
   21789 // 4. `...` grab the full ranges from as many dimensions as needed to
   21790 // fully specify a slice for every dimension of the input shape.
   21791 //
   21792 // 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
   21793 // with a dimension that has shape `s` is converted to a positive index
   21794 // `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
   21795 // is done internally so begin, end and strides receive x, -3, and -1.
   21796 // The appropriate begin_mask bit is set to indicate the start range is the
   21797 // full range (ignoring the x).
   21798 //
   21799 // 6. `:` indicates that the entire contents of the corresponding dimension
   21800 // is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
   21801 // receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
   21802 // `end_mask` are also set.
   21803 //
   21804 // *Requirements*:
   21805 //   `0 != strides[i] for i in [0, m)`
   21806 //   `ellipsis_mask must be a power of two (only one ellipsis)`
   21807 //
   21808 // Arguments:
   21809 //
   21810 //	begin: `begin[k]` specifies the offset into the `k`th range specification.
   21811 // The exact dimension this corresponds to will be determined by context.
   21812 // Out-of-bounds values will be silently clamped. If the `k`th bit of
   21813 // `begin_mask` then `begin[k]` is ignored and the full range of the
   21814 // appropriate dimension is used instead. Negative values causes indexing
   21815 // to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
   21816 //	end: `end[i]` is like `begin` with the exception that `end_mask` is
   21817 // used to determine full ranges.
   21818 //	strides: `strides[i]` specifies the increment in the `i`th specification
   21819 // after extracting a given element. Negative indices will reverse
   21820 // the original order. Out or range values are
   21821 // clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
   21822 func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional ...StridedSliceAttr) (output tf.Output) {
   21823 	if scope.Err() != nil {
   21824 		return
   21825 	}
   21826 	attrs := map[string]interface{}{}
   21827 	for _, a := range optional {
   21828 		a(attrs)
   21829 	}
   21830 	opspec := tf.OpSpec{
   21831 		Type: "StridedSlice",
   21832 		Input: []tf.Input{
   21833 			input, begin, end, strides,
   21834 		},
   21835 		Attrs: attrs,
   21836 	}
   21837 	op := scope.AddOperation(opspec)
   21838 	return op.Output(0)
   21839 }
   21840 
   21841 // PriorityQueueV2Attr is an optional argument to PriorityQueueV2.
   21842 type PriorityQueueV2Attr func(optionalAttr)
   21843 
   21844 // PriorityQueueV2ComponentTypes sets the optional component_types attribute to value.
   21845 //
   21846 // value: The type of each component in a value.
   21847 // If not specified, defaults to <>
   21848 //
   21849 // REQUIRES: len(value) >= 0
   21850 func PriorityQueueV2ComponentTypes(value []tf.DataType) PriorityQueueV2Attr {
   21851 	return func(m optionalAttr) {
   21852 		m["component_types"] = value
   21853 	}
   21854 }
   21855 
   21856 // PriorityQueueV2Capacity sets the optional capacity attribute to value.
   21857 //
   21858 // value: The upper bound on the number of elements in this queue.
   21859 // Negative numbers mean no limit.
   21860 // If not specified, defaults to -1
   21861 func PriorityQueueV2Capacity(value int64) PriorityQueueV2Attr {
   21862 	return func(m optionalAttr) {
   21863 		m["capacity"] = value
   21864 	}
   21865 }
   21866 
   21867 // PriorityQueueV2Container sets the optional container attribute to value.
   21868 //
   21869 // value: If non-empty, this queue is placed in the given container.
   21870 // Otherwise, a default container is used.
   21871 // If not specified, defaults to ""
   21872 func PriorityQueueV2Container(value string) PriorityQueueV2Attr {
   21873 	return func(m optionalAttr) {
   21874 		m["container"] = value
   21875 	}
   21876 }
   21877 
   21878 // PriorityQueueV2SharedName sets the optional shared_name attribute to value.
   21879 //
   21880 // value: If non-empty, this queue will be shared under the given name
   21881 // across multiple sessions.
   21882 // If not specified, defaults to ""
   21883 func PriorityQueueV2SharedName(value string) PriorityQueueV2Attr {
   21884 	return func(m optionalAttr) {
   21885 		m["shared_name"] = value
   21886 	}
   21887 }
   21888 
   21889 // A queue that produces elements sorted by the first component value.
   21890 //
   21891 // Note that the PriorityQueue requires the first component of any element
   21892 // to be a scalar int64, in addition to the other elements declared by
   21893 // component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
   21894 // and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
   21895 // entry in their input (resp. output) lists.
   21896 //
   21897 // Arguments:
   21898 //	shapes: The shape of each component in a value. The length of this attr must
   21899 // be either 0 or the same as the length of component_types. If the length of
   21900 // this attr is 0, the shapes of queue elements are not constrained, and
   21901 // only one element may be dequeued at a time.
   21902 //
   21903 // Returns The handle to the queue.
   21904 func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output) {
   21905 	if scope.Err() != nil {
   21906 		return
   21907 	}
   21908 	attrs := map[string]interface{}{"shapes": shapes}
   21909 	for _, a := range optional {
   21910 		a(attrs)
   21911 	}
   21912 	opspec := tf.OpSpec{
   21913 		Type: "PriorityQueueV2",
   21914 
   21915 		Attrs: attrs,
   21916 	}
   21917 	op := scope.AddOperation(opspec)
   21918 	return op.Output(0)
   21919 }
   21920 
   21921 // UnstageAttr is an optional argument to Unstage.
   21922 type UnstageAttr func(optionalAttr)
   21923 
   21924 // UnstageCapacity sets the optional capacity attribute to value.
   21925 // If not specified, defaults to 0
   21926 //
   21927 // REQUIRES: value >= 0
   21928 func UnstageCapacity(value int64) UnstageAttr {
   21929 	return func(m optionalAttr) {
   21930 		m["capacity"] = value
   21931 	}
   21932 }
   21933 
   21934 // UnstageMemoryLimit sets the optional memory_limit attribute to value.
   21935 // If not specified, defaults to 0
   21936 //
   21937 // REQUIRES: value >= 0
   21938 func UnstageMemoryLimit(value int64) UnstageAttr {
   21939 	return func(m optionalAttr) {
   21940 		m["memory_limit"] = value
   21941 	}
   21942 }
   21943 
   21944 // UnstageContainer sets the optional container attribute to value.
   21945 // If not specified, defaults to ""
   21946 func UnstageContainer(value string) UnstageAttr {
   21947 	return func(m optionalAttr) {
   21948 		m["container"] = value
   21949 	}
   21950 }
   21951 
   21952 // UnstageSharedName sets the optional shared_name attribute to value.
   21953 // If not specified, defaults to ""
   21954 func UnstageSharedName(value string) UnstageAttr {
   21955 	return func(m optionalAttr) {
   21956 		m["shared_name"] = value
   21957 	}
   21958 }
   21959 
   21960 // Op is similar to a lightweight Dequeue.
   21961 //
   21962 // The basic functionality is similar to dequeue with many fewer
   21963 // capabilities and options.  This Op is optimized for performance.
   21964 func Unstage(scope *Scope, dtypes []tf.DataType, optional ...UnstageAttr) (values []tf.Output) {
   21965 	if scope.Err() != nil {
   21966 		return
   21967 	}
   21968 	attrs := map[string]interface{}{"dtypes": dtypes}
   21969 	for _, a := range optional {
   21970 		a(attrs)
   21971 	}
   21972 	opspec := tf.OpSpec{
   21973 		Type: "Unstage",
   21974 
   21975 		Attrs: attrs,
   21976 	}
   21977 	op := scope.AddOperation(opspec)
   21978 	if scope.Err() != nil {
   21979 		return
   21980 	}
   21981 	var idx int
   21982 	var err error
   21983 	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
   21984 		scope.UpdateErr("Unstage", err)
   21985 		return
   21986 	}
   21987 	return values
   21988 }
   21989 
   21990 // ArgMaxAttr is an optional argument to ArgMax.
   21991 type ArgMaxAttr func(optionalAttr)
   21992 
   21993 // ArgMaxOutputType sets the optional output_type attribute to value.
   21994 // If not specified, defaults to DT_INT64
   21995 func ArgMaxOutputType(value tf.DataType) ArgMaxAttr {
   21996 	return func(m optionalAttr) {
   21997 		m["output_type"] = value
   21998 	}
   21999 }
   22000 
   22001 // Returns the index with the largest value across dimensions of a tensor.
   22002 //
   22003 // Note that in case of ties the identity of the return value is not guaranteed.
   22004 //
   22005 // Arguments:
   22006 //
   22007 //	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
   22008 // Describes which dimension of the input Tensor to reduce across. For vectors,
   22009 // use dimension = 0.
   22010 func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output) {
   22011 	if scope.Err() != nil {
   22012 		return
   22013 	}
   22014 	attrs := map[string]interface{}{}
   22015 	for _, a := range optional {
   22016 		a(attrs)
   22017 	}
   22018 	opspec := tf.OpSpec{
   22019 		Type: "ArgMax",
   22020 		Input: []tf.Input{
   22021 			input, dimension,
   22022 		},
   22023 		Attrs: attrs,
   22024 	}
   22025 	op := scope.AddOperation(opspec)
   22026 	return op.Output(0)
   22027 }
   22028 
   22029 // ResourceStridedSliceAssignAttr is an optional argument to ResourceStridedSliceAssign.
   22030 type ResourceStridedSliceAssignAttr func(optionalAttr)
   22031 
   22032 // ResourceStridedSliceAssignBeginMask sets the optional begin_mask attribute to value.
   22033 // If not specified, defaults to 0
   22034 func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr {
   22035 	return func(m optionalAttr) {
   22036 		m["begin_mask"] = value
   22037 	}
   22038 }
   22039 
   22040 // ResourceStridedSliceAssignEndMask sets the optional end_mask attribute to value.
   22041 // If not specified, defaults to 0
   22042 func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr {
   22043 	return func(m optionalAttr) {
   22044 		m["end_mask"] = value
   22045 	}
   22046 }
   22047 
   22048 // ResourceStridedSliceAssignEllipsisMask sets the optional ellipsis_mask attribute to value.
   22049 // If not specified, defaults to 0
   22050 func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr {
   22051 	return func(m optionalAttr) {
   22052 		m["ellipsis_mask"] = value
   22053 	}
   22054 }
   22055 
   22056 // ResourceStridedSliceAssignNewAxisMask sets the optional new_axis_mask attribute to value.
   22057 // If not specified, defaults to 0
   22058 func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr {
   22059 	return func(m optionalAttr) {
   22060 		m["new_axis_mask"] = value
   22061 	}
   22062 }
   22063 
   22064 // ResourceStridedSliceAssignShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
   22065 // If not specified, defaults to 0
   22066 func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr {
   22067 	return func(m optionalAttr) {
   22068 		m["shrink_axis_mask"] = value
   22069 	}
   22070 }
   22071 
   22072 // Assign `value` to the sliced l-value reference of `ref`.
   22073 //
   22074 // The values of `value` are assigned to the positions in the variable
   22075 // `ref` that are selected by the slice parameters. The slice parameters
   22076 // `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
   22077 //
   22078 // NOTE this op currently does not support broadcasting and so `value`'s
   22079 // shape must be exactly the shape produced by the slice of `ref`.
   22080 //
   22081 // Returns the created operation.
   22082 func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...ResourceStridedSliceAssignAttr) (o *tf.Operation) {
   22083 	if scope.Err() != nil {
   22084 		return
   22085 	}
   22086 	attrs := map[string]interface{}{}
   22087 	for _, a := range optional {
   22088 		a(attrs)
   22089 	}
   22090 	opspec := tf.OpSpec{
   22091 		Type: "ResourceStridedSliceAssign",
   22092 		Input: []tf.Input{
   22093 			ref, begin, end, strides, value,
   22094 		},
   22095 		Attrs: attrs,
   22096 	}
   22097 	return scope.AddOperation(opspec)
   22098 }
   22099 
   22100 // QueueEnqueueV2Attr is an optional argument to QueueEnqueueV2.
   22101 type QueueEnqueueV2Attr func(optionalAttr)
   22102 
   22103 // QueueEnqueueV2TimeoutMs sets the optional timeout_ms attribute to value.
   22104 //
   22105 // value: If the queue is full, this operation will block for up to
   22106 // timeout_ms milliseconds.
   22107 // Note: This option is not supported yet.
   22108 // If not specified, defaults to -1
   22109 func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr {
   22110 	return func(m optionalAttr) {
   22111 		m["timeout_ms"] = value
   22112 	}
   22113 }
   22114 
   22115 // Enqueues a tuple of one or more tensors in the given queue.
   22116 //
   22117 // The components input has k elements, which correspond to the components of
   22118 // tuples stored in the given queue.
   22119 //
   22120 // N.B. If the queue is full, this operation will block until the given
   22121 // element has been enqueued (or 'timeout_ms' elapses, if specified).
   22122 //
   22123 // Arguments:
   22124 //	handle: The handle to a queue.
   22125 //	components: One or more tensors from which the enqueued tensors should be taken.
   22126 //
   22127 // Returns the created operation.
   22128 func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation) {
   22129 	if scope.Err() != nil {
   22130 		return
   22131 	}
   22132 	attrs := map[string]interface{}{}
   22133 	for _, a := range optional {
   22134 		a(attrs)
   22135 	}
   22136 	opspec := tf.OpSpec{
   22137 		Type: "QueueEnqueueV2",
   22138 		Input: []tf.Input{
   22139 			handle, tf.OutputList(components),
   22140 		},
   22141 		Attrs: attrs,
   22142 	}
   22143 	return scope.AddOperation(opspec)
   22144 }
   22145 
   22146 // QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
   22147 type QueueDequeueManyV2Attr func(optionalAttr)
   22148 
   22149 // QueueDequeueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
   22150 //
   22151 // value: If the queue has fewer than n elements, this operation
   22152 // will block for up to timeout_ms milliseconds.
   22153 // Note: This option is not supported yet.
   22154 // If not specified, defaults to -1
   22155 func QueueDequeueManyV2TimeoutMs(value int64) QueueDequeueManyV2Attr {
   22156 	return func(m optionalAttr) {
   22157 		m["timeout_ms"] = value
   22158 	}
   22159 }
   22160 
   22161 // Dequeues `n` tuples of one or more tensors from the given queue.
   22162 //
   22163 // If the queue is closed and there are fewer than `n` elements, then an
   22164 // OutOfRange error is returned.
   22165 //
   22166 // This operation concatenates queue-element component tensors along the
   22167 // 0th dimension to make a single component tensor.  All of the components
   22168 // in the dequeued tuple will have size `n` in the 0th dimension.
   22169 //
   22170 // This operation has `k` outputs, where `k` is the number of components in
   22171 // the tuples stored in the given queue, and output `i` is the ith
   22172 // component of the dequeued tuple.
   22173 //
   22174 // N.B. If the queue is empty, this operation will block until `n` elements
   22175 // have been dequeued (or 'timeout_ms' elapses, if specified).
   22176 //
   22177 // Arguments:
   22178 //	handle: The handle to a queue.
   22179 //	n: The number of tuples to dequeue.
   22180 //	component_types: The type of each component in a tuple.
   22181 //
   22182 // Returns One or more tensors that were dequeued as a tuple.
   22183 func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueManyV2Attr) (components []tf.Output) {
   22184 	if scope.Err() != nil {
   22185 		return
   22186 	}
   22187 	attrs := map[string]interface{}{"component_types": component_types}
   22188 	for _, a := range optional {
   22189 		a(attrs)
   22190 	}
   22191 	opspec := tf.OpSpec{
   22192 		Type: "QueueDequeueManyV2",
   22193 		Input: []tf.Input{
   22194 			handle, n,
   22195 		},
   22196 		Attrs: attrs,
   22197 	}
   22198 	op := scope.AddOperation(opspec)
   22199 	if scope.Err() != nil {
   22200 		return
   22201 	}
   22202 	var idx int
   22203 	var err error
   22204 	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
   22205 		scope.UpdateErr("QueueDequeueManyV2", err)
   22206 		return
   22207 	}
   22208 	return components
   22209 }
   22210 
   22211 // EncodeBase64Attr is an optional argument to EncodeBase64.
   22212 type EncodeBase64Attr func(optionalAttr)
   22213 
   22214 // EncodeBase64Pad sets the optional pad attribute to value.
   22215 //
   22216 // value: Bool whether padding is applied at the ends.
   22217 // If not specified, defaults to false
   22218 func EncodeBase64Pad(value bool) EncodeBase64Attr {
   22219 	return func(m optionalAttr) {
   22220 		m["pad"] = value
   22221 	}
   22222 }
   22223 
   22224 // Encode strings into web-safe base64 format.
   22225 //
   22226 // Refer to the following article for more information on base64 format:
   22227 // en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
   22228 // end so that the encoded has length multiple of 4. See Padding section of the
   22229 // link above.
   22230 //
   22231 // Web-safe means that the encoder uses - and _ instead of + and /.
   22232 //
   22233 // Arguments:
   22234 //	input: Strings to be encoded.
   22235 //
   22236 // Returns Input strings encoded in base64.
   22237 func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) {
   22238 	if scope.Err() != nil {
   22239 		return
   22240 	}
   22241 	attrs := map[string]interface{}{}
   22242 	for _, a := range optional {
   22243 		a(attrs)
   22244 	}
   22245 	opspec := tf.OpSpec{
   22246 		Type: "EncodeBase64",
   22247 		Input: []tf.Input{
   22248 			input,
   22249 		},
   22250 		Attrs: attrs,
   22251 	}
   22252 	op := scope.AddOperation(opspec)
   22253 	return op.Output(0)
   22254 }
   22255 
   22256 // Deprecated. Use TensorArrayCloseV3
   22257 //
   22258 // DEPRECATED at GraphDef version 26: Use TensorArrayCloseV3
   22259 //
   22260 // Returns the created operation.
   22261 func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
   22262 	if scope.Err() != nil {
   22263 		return
   22264 	}
   22265 	opspec := tf.OpSpec{
   22266 		Type: "TensorArrayCloseV2",
   22267 		Input: []tf.Input{
   22268 			handle,
   22269 		},
   22270 	}
   22271 	return scope.AddOperation(opspec)
   22272 }
   22273 
   22274 // CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage.
   22275 type CropAndResizeGradImageAttr func(optionalAttr)
   22276 
   22277 // CropAndResizeGradImageMethod sets the optional method attribute to value.
   22278 //
   22279 // value: A string specifying the interpolation method. Only 'bilinear' is
   22280 // supported for now.
   22281 // If not specified, defaults to "bilinear"
   22282 func CropAndResizeGradImageMethod(value string) CropAndResizeGradImageAttr {
   22283 	return func(m optionalAttr) {
   22284 		m["method"] = value
   22285 	}
   22286 }
   22287 
   22288 // Computes the gradient of the crop_and_resize op wrt the input image tensor.
   22289 //
   22290 // Arguments:
   22291 //	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
   22292 //	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
   22293 // specifies the coordinates of a box in the `box_ind[i]` image and is specified
   22294 // in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
   22295 // `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
   22296 // `[0, 1]` interval of normalized image height is mapped to
   22297 // `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
   22298 // which case the sampled crop is an up-down flipped version of the original
   22299 // image. The width dimension is treated similarly. Normalized coordinates
   22300 // outside the `[0, 1]` range are allowed, in which case we use
   22301 // `extrapolation_value` to extrapolate the input image values.
   22302 //	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
   22303 // The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
   22304 //	image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
   22305 // containing the original image size. Both `image_height` and `image_width` need
   22306 // to be positive.
   22307 //
   22308 //
   22309 // Returns A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
   22310 func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, image_size tf.Output, T tf.DataType, optional ...CropAndResizeGradImageAttr) (output tf.Output) {
   22311 	if scope.Err() != nil {
   22312 		return
   22313 	}
   22314 	attrs := map[string]interface{}{"T": T}
   22315 	for _, a := range optional {
   22316 		a(attrs)
   22317 	}
   22318 	opspec := tf.OpSpec{
   22319 		Type: "CropAndResizeGradImage",
   22320 		Input: []tf.Input{
   22321 			grads, boxes, box_ind, image_size,
   22322 		},
   22323 		Attrs: attrs,
   22324 	}
   22325 	op := scope.AddOperation(opspec)
   22326 	return op.Output(0)
   22327 }
   22328 
   22329 // Reads and outputs the entire contents of the input filename.
   22330 func ReadFile(scope *Scope, filename tf.Output) (contents tf.Output) {
   22331 	if scope.Err() != nil {
   22332 		return
   22333 	}
   22334 	opspec := tf.OpSpec{
   22335 		Type: "ReadFile",
   22336 		Input: []tf.Input{
   22337 			filename,
   22338 		},
   22339 	}
   22340 	op := scope.AddOperation(opspec)
   22341 	return op.Output(0)
   22342 }
   22343 
   22344 // Concatenates tensors along one dimension.
   22345 //
   22346 // Arguments:
   22347 //	values: List of `N` Tensors to concatenate. Their ranks and types must match,
   22348 // and their sizes must match in all dimensions except `concat_dim`.
   22349 //	axis: 0-D.  The dimension along which to concatenate.  Must be in the
   22350 // range [-rank(values), rank(values)).
   22351 //
   22352 // Returns A `Tensor` with the concatenation of values stacked along the
   22353 // `concat_dim` dimension.  This tensor's shape matches that of `values` except
   22354 // in `concat_dim` where it has the sum of the sizes.
   22355 func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) {
   22356 	if scope.Err() != nil {
   22357 		return
   22358 	}
   22359 	opspec := tf.OpSpec{
   22360 		Type: "ConcatV2",
   22361 		Input: []tf.Input{
   22362 			tf.OutputList(values), axis,
   22363 		},
   22364 	}
   22365 	op := scope.AddOperation(opspec)
   22366 	return op.Output(0)
   22367 }
   22368 
   22369 // Forwards the value of an available tensor from `inputs` to `output`.
   22370 //
   22371 // `Merge` waits for at least one of the tensors in `inputs` to become available.
   22372 // It is usually combined with `Switch` to implement branching.
   22373 //
   22374 // `Merge` forwards the first tensor to become available to `output`, and sets
   22375 // `value_index` to its index in `inputs`.
   22376 //
   22377 // Arguments:
   22378 //	inputs: The input tensors, exactly one of which will become available.
   22379 //
   22380 // Returns Will be set to the available input tensor.The index of the chosen input tensor in `inputs`.
   22381 func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
   22382 	if scope.Err() != nil {
   22383 		return
   22384 	}
   22385 	opspec := tf.OpSpec{
   22386 		Type: "Merge",
   22387 		Input: []tf.Input{
   22388 			tf.OutputList(inputs),
   22389 		},
   22390 	}
   22391 	op := scope.AddOperation(opspec)
   22392 	return op.Output(0), op.Output(1)
   22393 }
   22394 
   22395 // QueueCloseV2Attr is an optional argument to QueueCloseV2.
   22396 type QueueCloseV2Attr func(optionalAttr)
   22397 
   22398 // QueueCloseV2CancelPendingEnqueues sets the optional cancel_pending_enqueues attribute to value.
   22399 //
   22400 // value: If true, all pending enqueue requests that are
   22401 // blocked on the given queue will be canceled.
   22402 // If not specified, defaults to false
   22403 func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr {
   22404 	return func(m optionalAttr) {
   22405 		m["cancel_pending_enqueues"] = value
   22406 	}
   22407 }
   22408 
   22409 // Closes the given queue.
   22410 //
   22411 // This operation signals that no more elements will be enqueued in the
   22412 // given queue. Subsequent Enqueue(Many) operations will fail.
   22413 // Subsequent Dequeue(Many) operations will continue to succeed if
   22414 // sufficient elements remain in the queue. Subsequent Dequeue(Many)
   22415 // operations that would block will fail immediately.
   22416 //
   22417 // Arguments:
   22418 //	handle: The handle to a queue.
   22419 //
   22420 // Returns the created operation.
   22421 func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation) {
   22422 	if scope.Err() != nil {
   22423 		return
   22424 	}
   22425 	attrs := map[string]interface{}{}
   22426 	for _, a := range optional {
   22427 		a(attrs)
   22428 	}
   22429 	opspec := tf.OpSpec{
   22430 		Type: "QueueCloseV2",
   22431 		Input: []tf.Input{
   22432 			handle,
   22433 		},
   22434 		Attrs: attrs,
   22435 	}
   22436 	return scope.AddOperation(opspec)
   22437 }
   22438 
   22439 // Computes inverse hyperbolic tangent of x element-wise.
   22440 func Atanh(scope *Scope, x tf.Output) (y tf.Output) {
   22441 	if scope.Err() != nil {
   22442 		return
   22443 	}
   22444 	opspec := tf.OpSpec{
   22445 		Type: "Atanh",
   22446 		Input: []tf.Input{
   22447 			x,
   22448 		},
   22449 	}
   22450 	op := scope.AddOperation(opspec)
   22451 	return op.Output(0)
   22452 }
   22453 
   22454 // Returns true if queue is closed.
   22455 //
   22456 // This operation returns true if the queue is closed and false if the queue
   22457 // is open.
   22458 //
   22459 // Arguments:
   22460 //	handle: The handle to a queue.
   22461 func QueueIsClosedV2(scope *Scope, handle tf.Output) (is_closed tf.Output) {
   22462 	if scope.Err() != nil {
   22463 		return
   22464 	}
   22465 	opspec := tf.OpSpec{
   22466 		Type: "QueueIsClosedV2",
   22467 		Input: []tf.Input{
   22468 			handle,
   22469 		},
   22470 	}
   22471 	op := scope.AddOperation(opspec)
   22472 	return op.Output(0)
   22473 }
   22474 
   22475 // Returns the batched diagonal part of a batched tensor.
   22476 //
   22477 // This operation returns a tensor with the `diagonal` part
   22478 // of the batched `input`. The `diagonal` part is computed as follows:
   22479 //
   22480 // Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
   22481 // tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
   22482 //
   22483 // `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
   22484 //
   22485 // The input must be at least a matrix.
   22486 //
   22487 // For example:
   22488 //
   22489 // ```
   22490 // # 'input' is [[[1, 0, 0, 0]
   22491 //                [0, 2, 0, 0]
   22492 //                [0, 0, 3, 0]
   22493 //                [0, 0, 0, 4]],
   22494 //               [[5, 0, 0, 0]
   22495 //                [0, 6, 0, 0]
   22496 //                [0, 0, 7, 0]
   22497 //                [0, 0, 0, 8]]]
   22498 //
   22499 // and input.shape = (2, 4, 4)
   22500 //
   22501 // tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
   22502 //
   22503 // which has shape (2, 4)
   22504 // ```
   22505 //
   22506 // Arguments:
   22507 //	input: Rank `k` tensor where `k >= 2`.
   22508 //
   22509 // Returns The extracted diagonal(s) having shape
   22510 // `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
   22511 func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
   22512 	if scope.Err() != nil {
   22513 		return
   22514 	}
   22515 	opspec := tf.OpSpec{
   22516 		Type: "MatrixDiagPart",
   22517 		Input: []tf.Input{
   22518 			input,
   22519 		},
   22520 	}
   22521 	op := scope.AddOperation(opspec)
   22522 	return op.Output(0)
   22523 }
   22524 
   22525 // Computes the absolute value of a tensor.
   22526 //
   22527 // Given a tensor `x`, this operation returns a tensor containing the absolute
   22528 // value of each element in `x`. For example, if x is an input element and y is
   22529 // an output element, this operation computes \\(y = |x|\\).
   22530 func Abs(scope *Scope, x tf.Output) (y tf.Output) {
   22531 	if scope.Err() != nil {
   22532 		return
   22533 	}
   22534 	opspec := tf.OpSpec{
   22535 		Type: "Abs",
   22536 		Input: []tf.Input{
   22537 			x,
   22538 		},
   22539 	}
   22540 	op := scope.AddOperation(opspec)
   22541 	return op.Output(0)
   22542 }
   22543 
   22544 // Flushes and closes the summary writer.
   22545 //
   22546 // Also removes it from the resource manager. To reopen, use another
   22547 // CreateSummaryFileWriter op.
   22548 //
   22549 // Arguments:
   22550 //	writer: A handle to the summary writer resource.
   22551 //
   22552 // Returns the created operation.
   22553 func CloseSummaryWriter(scope *Scope, writer tf.Output) (o *tf.Operation) {
   22554 	if scope.Err() != nil {
   22555 		return
   22556 	}
   22557 	opspec := tf.OpSpec{
   22558 		Type: "CloseSummaryWriter",
   22559 		Input: []tf.Input{
   22560 			writer,
   22561 		},
   22562 	}
   22563 	return scope.AddOperation(opspec)
   22564 }
   22565 
   22566 // StackV2Attr is an optional argument to StackV2.
   22567 type StackV2Attr func(optionalAttr)
   22568 
   22569 // StackV2StackName sets the optional stack_name attribute to value.
   22570 //
   22571 // value: Overrides the name used for the temporary stack resource. Default
   22572 // value is the name of the 'Stack' op (which is guaranteed unique).
   22573 // If not specified, defaults to ""
   22574 func StackV2StackName(value string) StackV2Attr {
   22575 	return func(m optionalAttr) {
   22576 		m["stack_name"] = value
   22577 	}
   22578 }
   22579 
   22580 // A stack that produces elements in first-in last-out order.
   22581 //
   22582 // Arguments:
   22583 //	max_size: The maximum size of the stack if non-negative. If negative, the stack
   22584 // size is unlimited.
   22585 //	elem_type: The type of the elements on the stack.
   22586 //
   22587 // Returns The handle to the stack.
   22588 func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, optional ...StackV2Attr) (handle tf.Output) {
   22589 	if scope.Err() != nil {
   22590 		return
   22591 	}
   22592 	attrs := map[string]interface{}{"elem_type": elem_type}
   22593 	for _, a := range optional {
   22594 		a(attrs)
   22595 	}
   22596 	opspec := tf.OpSpec{
   22597 		Type: "StackV2",
   22598 		Input: []tf.Input{
   22599 			max_size,
   22600 		},
   22601 		Attrs: attrs,
   22602 	}
   22603 	op := scope.AddOperation(opspec)
   22604 	return op.Output(0)
   22605 }
   22606 
   22607 // OrderedMapStageAttr is an optional argument to OrderedMapStage.
   22608 type OrderedMapStageAttr func(optionalAttr)
   22609 
   22610 // OrderedMapStageCapacity sets the optional capacity attribute to value.
   22611 //
   22612 // value: Maximum number of elements in the Staging Area. If > 0, inserts
   22613 // on the container will block when the capacity is reached.
   22614 // If not specified, defaults to 0
   22615 //
   22616 // REQUIRES: value >= 0
   22617 func OrderedMapStageCapacity(value int64) OrderedMapStageAttr {
   22618 	return func(m optionalAttr) {
   22619 		m["capacity"] = value
   22620 	}
   22621 }
   22622 
   22623 // OrderedMapStageMemoryLimit sets the optional memory_limit attribute to value.
   22624 // If not specified, defaults to 0
   22625 //
   22626 // REQUIRES: value >= 0
   22627 func OrderedMapStageMemoryLimit(value int64) OrderedMapStageAttr {
   22628 	return func(m optionalAttr) {
   22629 		m["memory_limit"] = value
   22630 	}
   22631 }
   22632 
   22633 // OrderedMapStageContainer sets the optional container attribute to value.
   22634 //
   22635 // value: If non-empty, this queue is placed in the given container. Otherwise,
   22636 // a default container is used.
   22637 // If not specified, defaults to ""
   22638 func OrderedMapStageContainer(value string) OrderedMapStageAttr {
   22639 	return func(m optionalAttr) {
   22640 		m["container"] = value
   22641 	}
   22642 }
   22643 
   22644 // OrderedMapStageSharedName sets the optional shared_name attribute to value.
   22645 //
   22646 // value: It is necessary to match this name to the matching Unstage Op.
   22647 // If not specified, defaults to ""
   22648 func OrderedMapStageSharedName(value string) OrderedMapStageAttr {
   22649 	return func(m optionalAttr) {
   22650 		m["shared_name"] = value
   22651 	}
   22652 }
   22653 
   22654 // Stage (key, values) in the underlying container which behaves like a ordered
   22655 //
   22656 // associative container.   Elements are ordered by key.
   22657 //
   22658 // Arguments:
   22659 //	key: int64
   22660 //
   22661 //	values: a list of tensors
   22662 // dtypes A list of data types that inserted values should adhere to.
   22663 //
   22664 //
   22665 // Returns the created operation.
   22666 func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...OrderedMapStageAttr) (o *tf.Operation) {
   22667 	if scope.Err() != nil {
   22668 		return
   22669 	}
   22670 	attrs := map[string]interface{}{"dtypes": dtypes}
   22671 	for _, a := range optional {
   22672 		a(attrs)
   22673 	}
   22674 	opspec := tf.OpSpec{
   22675 		Type: "OrderedMapStage",
   22676 		Input: []tf.Input{
   22677 			key, indices, tf.OutputList(values),
   22678 		},
   22679 		Attrs: attrs,
   22680 	}
   22681 	return scope.AddOperation(opspec)
   22682 }
   22683 
   22684 // StackPushV2Attr is an optional argument to StackPushV2.
   22685 type StackPushV2Attr func(optionalAttr)
   22686 
   22687 // StackPushV2SwapMemory sets the optional swap_memory attribute to value.
   22688 //
   22689 // value: Swap `elem` to CPU. Default to false.
   22690 // If not specified, defaults to false
   22691 func StackPushV2SwapMemory(value bool) StackPushV2Attr {
   22692 	return func(m optionalAttr) {
   22693 		m["swap_memory"] = value
   22694 	}
   22695 }
   22696 
   22697 // Push an element onto the stack.
   22698 //
   22699 // Arguments:
   22700 //	handle: The handle to a stack.
   22701 //	elem: The tensor to be pushed onto the stack.
   22702 //
   22703 // Returns The same tensor as the input 'elem'.
   22704 func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output) {
   22705 	if scope.Err() != nil {
   22706 		return
   22707 	}
   22708 	attrs := map[string]interface{}{}
   22709 	for _, a := range optional {
   22710 		a(attrs)
   22711 	}
   22712 	opspec := tf.OpSpec{
   22713 		Type: "StackPushV2",
   22714 		Input: []tf.Input{
   22715 			handle, elem,
   22716 		},
   22717 		Attrs: attrs,
   22718 	}
   22719 	op := scope.AddOperation(opspec)
   22720 	return op.Output(0)
   22721 }
   22722 
   22723 // FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2.
   22724 type FusedBatchNormGradV2Attr func(optionalAttr)
   22725 
   22726 // FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value.
   22727 //
   22728 // value: A small float number added to the variance of x.
   22729 // If not specified, defaults to 0.0001
   22730 func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr {
   22731 	return func(m optionalAttr) {
   22732 		m["epsilon"] = value
   22733 	}
   22734 }
   22735 
   22736 // FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value.
   22737 //
   22738 // value: The data format for y_backprop, x, x_backprop.
   22739 // Either "NHWC" (default) or "NCHW".
   22740 // If not specified, defaults to "NHWC"
   22741 func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr {
   22742 	return func(m optionalAttr) {
   22743 		m["data_format"] = value
   22744 	}
   22745 }
   22746 
   22747 // FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value.
   22748 //
   22749 // value: A bool value to indicate the operation is for training (default)
   22750 // or inference.
   22751 // If not specified, defaults to true
   22752 func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr {
   22753 	return func(m optionalAttr) {
   22754 		m["is_training"] = value
   22755 	}
   22756 }
   22757 
   22758 // Gradient for batch normalization.
   22759 //
   22760 // Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
   22761 // The size of 1D Tensors matches the dimension C of the 4D Tensors.
   22762 //
   22763 // Arguments:
   22764 //	y_backprop: A 4D Tensor for the gradient with respect to y.
   22765 //	x: A 4D Tensor for input data.
   22766 //	scale: A 1D Tensor for scaling factor, to scale the normalized x.
   22767 //	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
   22768 // mean to be reused in gradient computation. When is_training is
   22769 // False, a 1D Tensor for the population mean to be reused in both
   22770 // 1st and 2nd order gradient computation.
   22771 //	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
   22772 // variance (inverted variance in the cuDNN case) to be reused in
   22773 // gradient computation. When is_training is False, a 1D Tensor
   22774 // for the population variance to be reused in both 1st and 2nd
   22775 // order gradient computation.
   22776 //
   22777 // Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
   22778 // in FusedBatchNorm.
   22779 func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
   22780 	if scope.Err() != nil {
   22781 		return
   22782 	}
   22783 	attrs := map[string]interface{}{}
   22784 	for _, a := range optional {
   22785 		a(attrs)
   22786 	}
   22787 	opspec := tf.OpSpec{
   22788 		Type: "FusedBatchNormGradV2",
   22789 		Input: []tf.Input{
   22790 			y_backprop, x, scale, reserve_space_1, reserve_space_2,
   22791 		},
   22792 		Attrs: attrs,
   22793 	}
   22794 	op := scope.AddOperation(opspec)
   22795 	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
   22796 }
   22797 
   22798 // Creates a TensorArray for storing the gradients of values in the given handle.
   22799 //
   22800 // If the given TensorArray gradient already exists, returns a reference to it.
   22801 //
   22802 // Locks the size of the original TensorArray by disabling its dynamic size flag.
   22803 //
   22804 // **A note about the input flow_in:**
   22805 //
   22806 // The handle flow_in forces the execution of the gradient lookup to occur
   22807 // only after certain other operations have occurred.  For example, when
   22808 // the forward TensorArray is dynamically sized, writes to this TensorArray
   22809 // may resize the object.  The gradient TensorArray is statically sized based
   22810 // on the size of the forward TensorArray when this operation executes.
   22811 // Furthermore, the size of the forward TensorArray is frozen by this call.
   22812 // As a result, the flow is used to ensure that the call to generate the gradient
   22813 // TensorArray only happens after all writes are executed.
   22814 //
   22815 // In the case of dynamically sized TensorArrays, gradient computation should
   22816 // only be performed on read operations that have themselves been chained via
   22817 // flow to occur only after all writes have executed. That way the final size
   22818 // of the forward TensorArray is known when this operation is called.
   22819 //
   22820 // **A note about the source attribute:**
   22821 //
   22822 // TensorArray gradient calls use an accumulator TensorArray object.  If
   22823 // multiple gradients are calculated and run in the same session, the multiple
   22824 // gradient nodes may accidentally flow through the same accumulator TensorArray.
   22825 // This double counts and generally breaks the TensorArray gradient flow.
   22826 //
   22827 // The solution is to identify which gradient call this particular
   22828 // TensorArray gradient is being called in.  This is performed by identifying
   22829 // a unique string (e.g. "gradients", "gradients_1", ...) from the input
   22830 // gradient Tensor's name.  This string is used as a suffix when creating
   22831 // the TensorArray gradient object here (the attribute `source`).
   22832 //
   22833 // The attribute `source` is added as a suffix to the forward TensorArray's
   22834 // name when performing the creation / lookup, so that each separate gradient
   22835 // calculation gets its own TensorArray accumulator.
   22836 //
   22837 // Arguments:
   22838 //	handle: The handle to the forward TensorArray.
   22839 //	flow_in: A float scalar that enforces proper chaining of operations.
   22840 //	source: The gradient source string, used to decide which gradient TensorArray
   22841 // to return.
   22842 func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
   22843 	if scope.Err() != nil {
   22844 		return
   22845 	}
   22846 	attrs := map[string]interface{}{"source": source}
   22847 	opspec := tf.OpSpec{
   22848 		Type: "TensorArrayGradV3",
   22849 		Input: []tf.Input{
   22850 			handle, flow_in,
   22851 		},
   22852 		Attrs: attrs,
   22853 	}
   22854 	op := scope.AddOperation(opspec)
   22855 	return op.Output(0), op.Output(1)
   22856 }
   22857 
   22858 // Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
   22859 //
   22860 // Each comparison returns a boolean `true` (if `input_value > threshold`)
   22861 // or and `false` otherwise.
   22862 //
   22863 // This operation is useful for Locality-Sensitive-Hashing (LSH) and other
   22864 // algorithms that use hashing approximations of cosine and `L2` distances;
   22865 // codes can be generated from an input via:
   22866 //
   22867 // ```python
   22868 // codebook_size = 50
   22869 // codebook_bits = codebook_size * 32
   22870 // codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
   22871 //                            dtype=x.dtype,
   22872 //                            initializer=tf.orthogonal_initializer())
   22873 // codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
   22874 // codes = tf.bitcast(codes, tf.int32)  # go from uint8 to int32
   22875 // # now codes has shape x.shape[:-1] + [codebook_size]
   22876 // ```
   22877 //
   22878 // **NOTE**: Currently, the innermost dimension of the tensor must be divisible
   22879 // by 8.
   22880 //
   22881 // Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
   22882 // a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
   22883 //
   22884 // Arguments:
   22885 //	input: Values to compare against `threshold` and bitpack.
   22886 //	threshold: Threshold to compare against.
   22887 //
   22888 // Returns The bitpacked comparisons.
   22889 func CompareAndBitpack(scope *Scope, input tf.Output, threshold tf.Output) (output tf.Output) {
   22890 	if scope.Err() != nil {
   22891 		return
   22892 	}
   22893 	opspec := tf.OpSpec{
   22894 		Type: "CompareAndBitpack",
   22895 		Input: []tf.Input{
   22896 			input, threshold,
   22897 		},
   22898 	}
   22899 	op := scope.AddOperation(opspec)
   22900 	return op.Output(0)
   22901 }
   22902 
   22903 // Push an element onto the tensor_array.
   22904 //
   22905 // Arguments:
   22906 //	handle: The handle to a TensorArray.
   22907 //	index: The position to write to inside the TensorArray.
   22908 //	value: The tensor to write to the TensorArray.
   22909 //	flow_in: A float scalar that enforces proper chaining of operations.
   22910 //
   22911 // Returns A float scalar that enforces proper chaining of operations.
   22912 func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
   22913 	if scope.Err() != nil {
   22914 		return
   22915 	}
   22916 	opspec := tf.OpSpec{
   22917 		Type: "TensorArrayWriteV3",
   22918 		Input: []tf.Input{
   22919 			handle, index, value, flow_in,
   22920 		},
   22921 	}
   22922 	op := scope.AddOperation(opspec)
   22923 	return op.Output(0)
   22924 }
   22925 
   22926 // Scatter the data from the input value into specific TensorArray elements.
   22927 //
   22928 // `indices` must be a vector, its length must match the first dim of `value`.
   22929 //
   22930 // Arguments:
   22931 //	handle: The handle to a TensorArray.
   22932 //	indices: The locations at which to write the tensor elements.
   22933 //	value: The concatenated tensor to write to the TensorArray.
   22934 //	flow_in: A float scalar that enforces proper chaining of operations.
   22935 //
   22936 // Returns A float scalar that enforces proper chaining of operations.
   22937 func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
   22938 	if scope.Err() != nil {
   22939 		return
   22940 	}
   22941 	opspec := tf.OpSpec{
   22942 		Type: "TensorArrayScatterV3",
   22943 		Input: []tf.Input{
   22944 			handle, indices, value, flow_in,
   22945 		},
   22946 	}
   22947 	op := scope.AddOperation(opspec)
   22948 	return op.Output(0)
   22949 }
   22950 
   22951 // TensorArrayConcatV3Attr is an optional argument to TensorArrayConcatV3.
   22952 type TensorArrayConcatV3Attr func(optionalAttr)
   22953 
   22954 // TensorArrayConcatV3ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
   22955 //
   22956 // value: The expected shape of an element, if known,
   22957 // excluding the first dimension. Used to validate the shapes of
   22958 // TensorArray elements. If this shape is not fully specified, concatenating
   22959 // zero-size TensorArrays is an error.
   22960 // If not specified, defaults to <unknown_rank:true >
   22961 func TensorArrayConcatV3ElementShapeExcept0(value tf.Shape) TensorArrayConcatV3Attr {
   22962 	return func(m optionalAttr) {
   22963 		m["element_shape_except0"] = value
   22964 	}
   22965 }
   22966 
   22967 // Concat the elements from the TensorArray into value `value`.
   22968 //
   22969 // Takes `T` elements of shapes
   22970 //
   22971 //   ```
   22972 //   (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
   22973 //   ```
   22974 //
   22975 // and concatenates them into a Tensor of shape:
   22976 //
   22977 //   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
   22978 //
   22979 // All elements must have the same shape (excepting the first dimension).
   22980 //
   22981 // Arguments:
   22982 //	handle: The handle to a TensorArray.
   22983 //	flow_in: A float scalar that enforces proper chaining of operations.
   22984 //	dtype: The type of the elem that is returned.
   22985 //
   22986 // Returns All of the elements in the TensorArray, concatenated along the first
   22987 // axis.A vector of the row sizes of the original T elements in the
   22988 // value output.  In the example above, this would be the values:
   22989 // `(n1, n2, ..., n(T-1))`.
   22990 func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV3Attr) (value tf.Output, lengths tf.Output) {
   22991 	if scope.Err() != nil {
   22992 		return
   22993 	}
   22994 	attrs := map[string]interface{}{"dtype": dtype}
   22995 	for _, a := range optional {
   22996 		a(attrs)
   22997 	}
   22998 	opspec := tf.OpSpec{
   22999 		Type: "TensorArrayConcatV3",
   23000 		Input: []tf.Input{
   23001 			handle, flow_in,
   23002 		},
   23003 		Attrs: attrs,
   23004 	}
   23005 	op := scope.AddOperation(opspec)
   23006 	return op.Output(0), op.Output(1)
   23007 }
   23008 
   23009 // ParameterizedTruncatedNormalAttr is an optional argument to ParameterizedTruncatedNormal.
   23010 type ParameterizedTruncatedNormalAttr func(optionalAttr)
   23011 
   23012 // ParameterizedTruncatedNormalSeed sets the optional seed attribute to value.
   23013 //
   23014 // value: If either `seed` or `seed2` are set to be non-zero, the random number
   23015 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   23016 // random seed.
   23017 // If not specified, defaults to 0
   23018 func ParameterizedTruncatedNormalSeed(value int64) ParameterizedTruncatedNormalAttr {
   23019 	return func(m optionalAttr) {
   23020 		m["seed"] = value
   23021 	}
   23022 }
   23023 
   23024 // ParameterizedTruncatedNormalSeed2 sets the optional seed2 attribute to value.
   23025 //
   23026 // value: A second seed to avoid seed collision.
   23027 // If not specified, defaults to 0
   23028 func ParameterizedTruncatedNormalSeed2(value int64) ParameterizedTruncatedNormalAttr {
   23029 	return func(m optionalAttr) {
   23030 		m["seed2"] = value
   23031 	}
   23032 }
   23033 
   23034 // Outputs random values from a normal distribution. The parameters may each be a
   23035 //
   23036 // scalar which applies to the entire output, or a vector of length shape[0] which
   23037 // stores the parameters for each batch.
   23038 //
   23039 // Arguments:
   23040 //	shape: The shape of the output tensor. Batches are indexed by the 0th dimension.
   23041 //	means: The mean parameter of each batch.
   23042 //	stdevs: The standard deviation parameter of each batch. Must be greater than 0.
   23043 //	minvals: The minimum cutoff. May be -infinity.
   23044 //	maxvals: The maximum cutoff. May be +infinity, and must be more than the minval
   23045 // for each batch.
   23046 //
   23047 // Returns A matrix of shape num_batches x samples_per_batch, filled with random
   23048 // truncated normal values using the parameters for each row.
   23049 func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, minvals tf.Output, maxvals tf.Output, optional ...ParameterizedTruncatedNormalAttr) (output tf.Output) {
   23050 	if scope.Err() != nil {
   23051 		return
   23052 	}
   23053 	attrs := map[string]interface{}{}
   23054 	for _, a := range optional {
   23055 		a(attrs)
   23056 	}
   23057 	opspec := tf.OpSpec{
   23058 		Type: "ParameterizedTruncatedNormal",
   23059 		Input: []tf.Input{
   23060 			shape, means, stdevs, minvals, maxvals,
   23061 		},
   23062 		Attrs: attrs,
   23063 	}
   23064 	op := scope.AddOperation(opspec)
   23065 	return op.Output(0)
   23066 }
   23067 
   23068 // Returns a diagonal tensor with a given diagonal values.
   23069 //
   23070 // Given a `diagonal`, this operation returns a tensor with the `diagonal` and
   23071 // everything else padded with zeros. The diagonal is computed as follows:
   23072 //
   23073 // Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
   23074 // rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
   23075 //
   23076 // `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
   23077 //
   23078 // For example:
   23079 //
   23080 // ```
   23081 // # 'diagonal' is [1, 2, 3, 4]
   23082 // tf.diag(diagonal) ==> [[1, 0, 0, 0]
   23083 //                        [0, 2, 0, 0]
   23084 //                        [0, 0, 3, 0]
   23085 //                        [0, 0, 0, 4]]
   23086 // ```
   23087 //
   23088 // Arguments:
   23089 //	diagonal: Rank k tensor where k is at most 1.
   23090 func Diag(scope *Scope, diagonal tf.Output) (output tf.Output) {
   23091 	if scope.Err() != nil {
   23092 		return
   23093 	}
   23094 	opspec := tf.OpSpec{
   23095 		Type: "Diag",
   23096 		Input: []tf.Input{
   23097 			diagonal,
   23098 		},
   23099 	}
   23100 	op := scope.AddOperation(opspec)
   23101 	return op.Output(0)
   23102 }
   23103 
   23104 // Split the data from the input value into TensorArray elements.
   23105 //
   23106 // Assuming that `lengths` takes on values
   23107 //
   23108 //   ```(n0, n1, ..., n(T-1))```
   23109 //
   23110 // and that `value` has shape
   23111 //
   23112 //   ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
   23113 //
   23114 // this splits values into a TensorArray with T tensors.
   23115 //
   23116 // TensorArray index t will be the subtensor of values with starting position
   23117 //
   23118 //   ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
   23119 //
   23120 // and having size
   23121 //
   23122 //   ```nt x d0 x d1 x ...```
   23123 //
   23124 // Arguments:
   23125 //	handle: The handle to a TensorArray.
   23126 //	value: The concatenated tensor to write to the TensorArray.
   23127 //	lengths: The vector of lengths, how to split the rows of value into the
   23128 // TensorArray.
   23129 //	flow_in: A float scalar that enforces proper chaining of operations.
   23130 //
   23131 // Returns A float scalar that enforces proper chaining of operations.
   23132 func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
   23133 	if scope.Err() != nil {
   23134 		return
   23135 	}
   23136 	opspec := tf.OpSpec{
   23137 		Type: "TensorArraySplitV3",
   23138 		Input: []tf.Input{
   23139 			handle, value, lengths, flow_in,
   23140 		},
   23141 	}
   23142 	op := scope.AddOperation(opspec)
   23143 	return op.Output(0)
   23144 }
   23145 
   23146 // SerializeSparseAttr is an optional argument to SerializeSparse.
   23147 type SerializeSparseAttr func(optionalAttr)
   23148 
   23149 // SerializeSparseOutType sets the optional out_type attribute to value.
   23150 //
   23151 // value: The `dtype` to use for serialization; the supported types are `string`
   23152 // (default) and `variant`.
   23153 // If not specified, defaults to DT_STRING
   23154 func SerializeSparseOutType(value tf.DataType) SerializeSparseAttr {
   23155 	return func(m optionalAttr) {
   23156 		m["out_type"] = value
   23157 	}
   23158 }
   23159 
   23160 // Serialize a `SparseTensor` into a `[3]` `Tensor` object.
   23161 //
   23162 // Arguments:
   23163 //	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
   23164 //	sparse_values: 1-D.  The `values` of the `SparseTensor`.
   23165 //	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
   23166 func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeSparseAttr) (serialized_sparse tf.Output) {
   23167 	if scope.Err() != nil {
   23168 		return
   23169 	}
   23170 	attrs := map[string]interface{}{}
   23171 	for _, a := range optional {
   23172 		a(attrs)
   23173 	}
   23174 	opspec := tf.OpSpec{
   23175 		Type: "SerializeSparse",
   23176 		Input: []tf.Input{
   23177 			sparse_indices, sparse_values, sparse_shape,
   23178 		},
   23179 		Attrs: attrs,
   23180 	}
   23181 	op := scope.AddOperation(opspec)
   23182 	return op.Output(0)
   23183 }
   23184 
   23185 // RandomShuffleQueueV2Attr is an optional argument to RandomShuffleQueueV2.
   23186 type RandomShuffleQueueV2Attr func(optionalAttr)
   23187 
   23188 // RandomShuffleQueueV2Shapes sets the optional shapes attribute to value.
   23189 //
   23190 // value: The shape of each component in a value. The length of this attr must
   23191 // be either 0 or the same as the length of component_types. If the length of
   23192 // this attr is 0, the shapes of queue elements are not constrained, and
   23193 // only one element may be dequeued at a time.
   23194 // If not specified, defaults to <>
   23195 //
   23196 // REQUIRES: len(value) >= 0
   23197 func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr {
   23198 	return func(m optionalAttr) {
   23199 		m["shapes"] = value
   23200 	}
   23201 }
   23202 
   23203 // RandomShuffleQueueV2Capacity sets the optional capacity attribute to value.
   23204 //
   23205 // value: The upper bound on the number of elements in this queue.
   23206 // Negative numbers mean no limit.
   23207 // If not specified, defaults to -1
   23208 func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr {
   23209 	return func(m optionalAttr) {
   23210 		m["capacity"] = value
   23211 	}
   23212 }
   23213 
   23214 // RandomShuffleQueueV2MinAfterDequeue sets the optional min_after_dequeue attribute to value.
   23215 //
   23216 // value: Dequeue will block unless there would be this
   23217 // many elements after the dequeue or the queue is closed. This
   23218 // ensures a minimum level of mixing of elements.
   23219 // If not specified, defaults to 0
   23220 func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr {
   23221 	return func(m optionalAttr) {
   23222 		m["min_after_dequeue"] = value
   23223 	}
   23224 }
   23225 
   23226 // RandomShuffleQueueV2Seed sets the optional seed attribute to value.
   23227 //
   23228 // value: If either seed or seed2 is set to be non-zero, the random number
   23229 // generator is seeded by the given seed.  Otherwise, a random seed is used.
   23230 // If not specified, defaults to 0
   23231 func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr {
   23232 	return func(m optionalAttr) {
   23233 		m["seed"] = value
   23234 	}
   23235 }
   23236 
   23237 // RandomShuffleQueueV2Seed2 sets the optional seed2 attribute to value.
   23238 //
   23239 // value: A second seed to avoid seed collision.
   23240 // If not specified, defaults to 0
   23241 func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr {
   23242 	return func(m optionalAttr) {
   23243 		m["seed2"] = value
   23244 	}
   23245 }
   23246 
   23247 // RandomShuffleQueueV2Container sets the optional container attribute to value.
   23248 //
   23249 // value: If non-empty, this queue is placed in the given container.
   23250 // Otherwise, a default container is used.
   23251 // If not specified, defaults to ""
   23252 func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr {
   23253 	return func(m optionalAttr) {
   23254 		m["container"] = value
   23255 	}
   23256 }
   23257 
   23258 // RandomShuffleQueueV2SharedName sets the optional shared_name attribute to value.
   23259 //
   23260 // value: If non-empty, this queue will be shared under the given name
   23261 // across multiple sessions.
   23262 // If not specified, defaults to ""
   23263 func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr {
   23264 	return func(m optionalAttr) {
   23265 		m["shared_name"] = value
   23266 	}
   23267 }
   23268 
   23269 // A queue that randomizes the order of elements.
   23270 //
   23271 // Arguments:
   23272 //	component_types: The type of each component in a value.
   23273 //
   23274 // Returns The handle to the queue.
   23275 func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional ...RandomShuffleQueueV2Attr) (handle tf.Output) {
   23276 	if scope.Err() != nil {
   23277 		return
   23278 	}
   23279 	attrs := map[string]interface{}{"component_types": component_types}
   23280 	for _, a := range optional {
   23281 		a(attrs)
   23282 	}
   23283 	opspec := tf.OpSpec{
   23284 		Type: "RandomShuffleQueueV2",
   23285 
   23286 		Attrs: attrs,
   23287 	}
   23288 	op := scope.AddOperation(opspec)
   23289 	return op.Output(0)
   23290 }
   23291 
   23292 // Draw bounding boxes on a batch of images.
   23293 //
   23294 // Outputs a copy of `images` but draws on top of the pixels zero or more bounding
   23295 // boxes specified by the locations in `boxes`. The coordinates of the each
   23296 // bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
   23297 // bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
   23298 // height of the underlying image.
   23299 //
   23300 // For example, if an image is 100 x 200 pixels (height x width) and the bounding
   23301 // box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
   23302 // the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
   23303 //
   23304 // Parts of the bounding box may fall outside the image.
   23305 //
   23306 // Arguments:
   23307 //	images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
   23308 //	boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
   23309 // boxes.
   23310 //
   23311 // Returns 4-D with the same shape as `images`. The batch of input images with
   23312 // bounding boxes drawn on the images.
   23313 func DrawBoundingBoxes(scope *Scope, images tf.Output, boxes tf.Output) (output tf.Output) {
   23314 	if scope.Err() != nil {
   23315 		return
   23316 	}
   23317 	opspec := tf.OpSpec{
   23318 		Type: "DrawBoundingBoxes",
   23319 		Input: []tf.Input{
   23320 			images, boxes,
   23321 		},
   23322 	}
   23323 	op := scope.AddOperation(opspec)
   23324 	return op.Output(0)
   23325 }
   23326 
   23327 // LearnedUnigramCandidateSamplerAttr is an optional argument to LearnedUnigramCandidateSampler.
   23328 type LearnedUnigramCandidateSamplerAttr func(optionalAttr)
   23329 
   23330 // LearnedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
   23331 //
   23332 // value: If either seed or seed2 are set to be non-zero, the random number
   23333 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   23334 // random seed.
   23335 // If not specified, defaults to 0
   23336 func LearnedUnigramCandidateSamplerSeed(value int64) LearnedUnigramCandidateSamplerAttr {
   23337 	return func(m optionalAttr) {
   23338 		m["seed"] = value
   23339 	}
   23340 }
   23341 
   23342 // LearnedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
   23343 //
   23344 // value: An second seed to avoid seed collision.
   23345 // If not specified, defaults to 0
   23346 func LearnedUnigramCandidateSamplerSeed2(value int64) LearnedUnigramCandidateSamplerAttr {
   23347 	return func(m optionalAttr) {
   23348 		m["seed2"] = value
   23349 	}
   23350 }
   23351 
   23352 // Generates labels for candidate sampling with a learned unigram distribution.
   23353 //
   23354 // See explanations of candidate sampling and the data formats at
   23355 // go/candidate-sampling.
   23356 //
   23357 // For each batch, this op picks a single set of sampled candidate labels.
   23358 //
   23359 // The advantages of sampling candidates per-batch are simplicity and the
   23360 // possibility of efficient dense matrix multiplication. The disadvantage is that
   23361 // the sampled candidates must be chosen independently of the context and of the
   23362 // true labels.
   23363 //
   23364 // Arguments:
   23365 //	true_classes: A batch_size * num_true matrix, in which each row contains the
   23366 // IDs of the num_true target_classes in the corresponding original label.
   23367 //	num_true: Number of true labels per context.
   23368 //	num_sampled: Number of candidates to randomly sample.
   23369 //	unique: If unique is true, we sample with rejection, so that all sampled
   23370 // candidates in a batch are unique. This requires some approximation to
   23371 // estimate the post-rejection sampling probabilities.
   23372 //	range_max: The sampler will sample integers from the interval [0, range_max).
   23373 //
   23374 // Returns A vector of length num_sampled, in which each element is
   23375 // the ID of a sampled candidate.A batch_size * num_true matrix, representing
   23376 // the number of times each candidate is expected to occur in a batch
   23377 // of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
   23378 // candidate representing the number of times the candidate is expected
   23379 // to occur in a batch of sampled candidates.  If unique=true, then this is a
   23380 // probability.
   23381 func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LearnedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
   23382 	if scope.Err() != nil {
   23383 		return
   23384 	}
   23385 	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
   23386 	for _, a := range optional {
   23387 		a(attrs)
   23388 	}
   23389 	opspec := tf.OpSpec{
   23390 		Type: "LearnedUnigramCandidateSampler",
   23391 		Input: []tf.Input{
   23392 			true_classes,
   23393 		},
   23394 		Attrs: attrs,
   23395 	}
   23396 	op := scope.AddOperation(opspec)
   23397 	return op.Output(0), op.Output(1), op.Output(2)
   23398 }
   23399 
   23400 // Computes gradients for the scaled exponential linear (Selu) operation.
   23401 //
   23402 // Arguments:
   23403 //	gradients: The backpropagated gradients to the corresponding Selu operation.
   23404 //	outputs: The outputs of the corresponding Selu operation.
   23405 //
   23406 // Returns The gradients: `gradients * (outputs + scale * alpha)`
   23407 // if outputs < 0, `scale * gradients` otherwise.
   23408 func SeluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
   23409 	if scope.Err() != nil {
   23410 		return
   23411 	}
   23412 	opspec := tf.OpSpec{
   23413 		Type: "SeluGrad",
   23414 		Input: []tf.Input{
   23415 			gradients, outputs,
   23416 		},
   23417 	}
   23418 	op := scope.AddOperation(opspec)
   23419 	return op.Output(0)
   23420 }
   23421 
   23422 // Get the current size of the TensorArray.
   23423 //
   23424 // Arguments:
   23425 //	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
   23426 //	flow_in: A float scalar that enforces proper chaining of operations.
   23427 //
   23428 // Returns The current size of the TensorArray.
   23429 func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
   23430 	if scope.Err() != nil {
   23431 		return
   23432 	}
   23433 	opspec := tf.OpSpec{
   23434 		Type: "TensorArraySizeV3",
   23435 		Input: []tf.Input{
   23436 			handle, flow_in,
   23437 		},
   23438 	}
   23439 	op := scope.AddOperation(opspec)
   23440 	return op.Output(0)
   23441 }
   23442 
   23443 // Deprecated. Use TensorArrayGradV3
   23444 //
   23445 // DEPRECATED at GraphDef version 26: Use TensorArrayWriteV3
   23446 func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
   23447 	if scope.Err() != nil {
   23448 		return
   23449 	}
   23450 	opspec := tf.OpSpec{
   23451 		Type: "TensorArrayWriteV2",
   23452 		Input: []tf.Input{
   23453 			handle, index, value, flow_in,
   23454 		},
   23455 	}
   23456 	op := scope.AddOperation(opspec)
   23457 	return op.Output(0)
   23458 }
   23459 
   23460 // SparseReduceMaxAttr is an optional argument to SparseReduceMax.
   23461 type SparseReduceMaxAttr func(optionalAttr)
   23462 
   23463 // SparseReduceMaxKeepDims sets the optional keep_dims attribute to value.
   23464 //
   23465 // value: If true, retain reduced dimensions with length 1.
   23466 // If not specified, defaults to false
   23467 func SparseReduceMaxKeepDims(value bool) SparseReduceMaxAttr {
   23468 	return func(m optionalAttr) {
   23469 		m["keep_dims"] = value
   23470 	}
   23471 }
   23472 
   23473 // Computes the max of elements across dimensions of a SparseTensor.
   23474 //
   23475 // This Op takes a SparseTensor and is the sparse counterpart to
   23476 // `tf.reduce_max()`.  In particular, this Op also returns a dense `Tensor`
   23477 // instead of a sparse one.
   23478 //
   23479 // Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
   23480 // `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
   23481 // `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
   23482 // with length 1.
   23483 //
   23484 // If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
   23485 // with a single element is returned.  Additionally, the axes can be negative,
   23486 // which are interpreted according to the indexing rules in Python.
   23487 //
   23488 // Arguments:
   23489 //	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   23490 // SparseTensor, possibly not in canonical ordering.
   23491 //	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
   23492 //	input_shape: 1-D.  Shape of the input SparseTensor.
   23493 //	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
   23494 //
   23495 // Returns `R-K`-D.  The reduced Tensor.
   23496 func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxAttr) (output tf.Output) {
   23497 	if scope.Err() != nil {
   23498 		return
   23499 	}
   23500 	attrs := map[string]interface{}{}
   23501 	for _, a := range optional {
   23502 		a(attrs)
   23503 	}
   23504 	opspec := tf.OpSpec{
   23505 		Type: "SparseReduceMax",
   23506 		Input: []tf.Input{
   23507 			input_indices, input_values, input_shape, reduction_axes,
   23508 		},
   23509 		Attrs: attrs,
   23510 	}
   23511 	op := scope.AddOperation(opspec)
   23512 	return op.Output(0)
   23513 }
   23514 
   23515 // AsStringAttr is an optional argument to AsString.
   23516 type AsStringAttr func(optionalAttr)
   23517 
   23518 // AsStringPrecision sets the optional precision attribute to value.
   23519 //
   23520 // value: The post-decimal precision to use for floating point numbers.
   23521 // Only used if precision > -1.
   23522 // If not specified, defaults to -1
   23523 func AsStringPrecision(value int64) AsStringAttr {
   23524 	return func(m optionalAttr) {
   23525 		m["precision"] = value
   23526 	}
   23527 }
   23528 
   23529 // AsStringScientific sets the optional scientific attribute to value.
   23530 //
   23531 // value: Use scientific notation for floating point numbers.
   23532 // If not specified, defaults to false
   23533 func AsStringScientific(value bool) AsStringAttr {
   23534 	return func(m optionalAttr) {
   23535 		m["scientific"] = value
   23536 	}
   23537 }
   23538 
   23539 // AsStringShortest sets the optional shortest attribute to value.
   23540 //
   23541 // value: Use shortest representation (either scientific or standard) for
   23542 // floating point numbers.
   23543 // If not specified, defaults to false
   23544 func AsStringShortest(value bool) AsStringAttr {
   23545 	return func(m optionalAttr) {
   23546 		m["shortest"] = value
   23547 	}
   23548 }
   23549 
   23550 // AsStringWidth sets the optional width attribute to value.
   23551 //
   23552 // value: Pad pre-decimal numbers to this width.
   23553 // Applies to both floating point and integer numbers.
   23554 // Only used if width > -1.
   23555 // If not specified, defaults to -1
   23556 func AsStringWidth(value int64) AsStringAttr {
   23557 	return func(m optionalAttr) {
   23558 		m["width"] = value
   23559 	}
   23560 }
   23561 
   23562 // AsStringFill sets the optional fill attribute to value.
   23563 //
   23564 // value: The value to pad if width > -1.  If empty, pads with spaces.
   23565 // Another typical value is '0'.  String cannot be longer than 1 character.
   23566 // If not specified, defaults to ""
   23567 func AsStringFill(value string) AsStringAttr {
   23568 	return func(m optionalAttr) {
   23569 		m["fill"] = value
   23570 	}
   23571 }
   23572 
   23573 // Converts each entry in the given tensor to strings.  Supports many numeric
   23574 //
   23575 // types and boolean.
   23576 func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output tf.Output) {
   23577 	if scope.Err() != nil {
   23578 		return
   23579 	}
   23580 	attrs := map[string]interface{}{}
   23581 	for _, a := range optional {
   23582 		a(attrs)
   23583 	}
   23584 	opspec := tf.OpSpec{
   23585 		Type: "AsString",
   23586 		Input: []tf.Input{
   23587 			input,
   23588 		},
   23589 		Attrs: attrs,
   23590 	}
   23591 	op := scope.AddOperation(opspec)
   23592 	return op.Output(0)
   23593 }
   23594 
   23595 // Deprecated. Use TensorArrayScatterV3
   23596 //
   23597 // DEPRECATED at GraphDef version 26: Use TensorArrayScatterV3
   23598 func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
   23599 	if scope.Err() != nil {
   23600 		return
   23601 	}
   23602 	opspec := tf.OpSpec{
   23603 		Type: "TensorArrayScatterV2",
   23604 		Input: []tf.Input{
   23605 			handle, indices, value, flow_in,
   23606 		},
   23607 	}
   23608 	op := scope.AddOperation(opspec)
   23609 	return op.Output(0)
   23610 }
   23611 
   23612 // Applies sparse addition to `input` using individual values or slices
   23613 //
   23614 // from `updates` according to indices `indices`.  The updates are non-aliasing:
   23615 // `input` is only modified in-place if no other operations will use it.
   23616 // Otherwise, a copy of `input` is made.  This operation has a gradient with
   23617 // respect to both `input` and `updates`.
   23618 //
   23619 // `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
   23620 //
   23621 // `indices` must be integer tensor, containing indices into `input`.
   23622 // It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
   23623 //
   23624 // The innermost dimension of `indices` (with length `K`) corresponds to
   23625 // indices into elements (if `K = P`) or `(P-K)`-dimensional slices
   23626 // (if `K < P`) along the `K`th dimension of `input`.
   23627 //
   23628 // `updates` is `Tensor` of rank `Q-1+P-K` with shape:
   23629 //
   23630 // ```
   23631 // [d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
   23632 // ```
   23633 //
   23634 // For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
   23635 // elements. In Python, that addition would look like this:
   23636 //
   23637 //     input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
   23638 //     indices = tf.constant([[4], [3], [1], [7]])
   23639 //     updates = tf.constant([9, 10, 11, 12])
   23640 //     output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
   23641 //     with tf.Session() as sess:
   23642 //       print(sess.run(output))
   23643 //
   23644 // The resulting value `output` would look like this:
   23645 //
   23646 //     [1, 13, 3, 14, 14, 6, 7, 20]
   23647 //
   23648 // See @{tf.scatter_nd} for more details about how to make updates to slices.
   23649 //
   23650 // Arguments:
   23651 //	input: A Tensor.
   23652 //	indices: A Tensor. Must be one of the following types: `int32`, `int64`.
   23653 // A tensor of indices into `input`.
   23654 //	updates: A Tensor. Must have the same type as ref. A tensor of updated values
   23655 // to add to `input`.
   23656 //
   23657 // Returns A `Tensor` with the same shape as `input`, containing values of `input`
   23658 // updated with `updates`.
   23659 func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
   23660 	if scope.Err() != nil {
   23661 		return
   23662 	}
   23663 	opspec := tf.OpSpec{
   23664 		Type: "ScatterNdNonAliasingAdd",
   23665 		Input: []tf.Input{
   23666 			input, indices, updates,
   23667 		},
   23668 	}
   23669 	op := scope.AddOperation(opspec)
   23670 	return op.Output(0)
   23671 }
   23672 
   23673 // FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
   23674 type FractionalMaxPoolAttr func(optionalAttr)
   23675 
   23676 // FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
   23677 //
   23678 // value: When set to True, generates the pooling sequence in a
   23679 // pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
   23680 // Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
   23681 // difference between pseudorandom and random.
   23682 // If not specified, defaults to false
   23683 func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
   23684 	return func(m optionalAttr) {
   23685 		m["pseudo_random"] = value
   23686 	}
   23687 }
   23688 
   23689 // FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
   23690 //
   23691 // value: When set to True, it means when pooling, the values at the boundary
   23692 // of adjacent pooling cells are used by both cells. For example:
   23693 //
   23694 // `index  0  1  2  3  4`
   23695 //
   23696 // `value  20 5  16 3  7`
   23697 //
   23698 // If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
   23699 // The result would be [20, 16] for fractional max pooling.
   23700 // If not specified, defaults to false
   23701 func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
   23702 	return func(m optionalAttr) {
   23703 		m["overlapping"] = value
   23704 	}
   23705 }
   23706 
   23707 // FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
   23708 //
   23709 // value: When set to True, a fixed pooling region will be used when
   23710 // iterating over a FractionalMaxPool node in the computation graph. Mainly used
   23711 // in unit test to make FractionalMaxPool deterministic.
   23712 // If not specified, defaults to false
   23713 func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
   23714 	return func(m optionalAttr) {
   23715 		m["deterministic"] = value
   23716 	}
   23717 }
   23718 
   23719 // FractionalMaxPoolSeed sets the optional seed attribute to value.
   23720 //
   23721 // value: If either seed or seed2 are set to be non-zero, the random number
   23722 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   23723 // random seed.
   23724 // If not specified, defaults to 0
   23725 func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
   23726 	return func(m optionalAttr) {
   23727 		m["seed"] = value
   23728 	}
   23729 }
   23730 
   23731 // FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
   23732 //
   23733 // value: An second seed to avoid seed collision.
   23734 // If not specified, defaults to 0
   23735 func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
   23736 	return func(m optionalAttr) {
   23737 		m["seed2"] = value
   23738 	}
   23739 }
   23740 
   23741 // Performs fractional max pooling on the input.
   23742 //
   23743 // Fractional max pooling is slightly different than regular max pooling.  In
   23744 // regular max pooling, you downsize an input set by taking the maximum value of
   23745 // smaller N x N subsections of the set (often 2x2), and try to reduce the set by
   23746 // a factor of N, where N is an integer.  Fractional max pooling, as you might
   23747 // expect from the word "fractional", means that the overall reduction ratio N
   23748 // does not have to be an integer.
   23749 //
   23750 // The sizes of the pooling regions are generated randomly but are fairly uniform.
   23751 // For example, let's look at the height dimension, and the constraints on the
   23752 // list of rows that will be pool boundaries.
   23753 //
   23754 // First we define the following:
   23755 //
   23756 // 1.  input_row_length : the number of rows from the input set
   23757 // 2.  output_row_length : which will be smaller than the input
   23758 // 3.  alpha = input_row_length / output_row_length : our reduction ratio
   23759 // 4.  K = floor(alpha)
   23760 // 5.  row_pooling_sequence : this is the result list of pool boundary rows
   23761 //
   23762 // Then, row_pooling_sequence should satisfy:
   23763 //
   23764 // 1.  a[0] = 0 : the first value of the sequence is 0
   23765 // 2.  a[end] = input_row_length : the last value of the sequence is the size
   23766 // 3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
   23767 // 4.  length(row_pooling_sequence) = output_row_length+1
   23768 //
   23769 // For more details on fractional max pooling, see this paper:
   23770 // [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
   23771 //
   23772 // Arguments:
   23773 //	value: 4-D with shape `[batch, height, width, channels]`.
   23774 //	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
   23775 // supports row and col dimension and should be >= 1.0. For example, a valid
   23776 // pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
   23777 // must be 1.0 because we don't allow pooling on batch and channels
   23778 // dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
   23779 // respectively.
   23780 //
   23781 // Returns output tensor after fractional max pooling.row pooling sequence, needed to calculate gradient.column pooling sequence, needed to calculate gradient.
   23782 func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
   23783 	if scope.Err() != nil {
   23784 		return
   23785 	}
   23786 	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
   23787 	for _, a := range optional {
   23788 		a(attrs)
   23789 	}
   23790 	opspec := tf.OpSpec{
   23791 		Type: "FractionalMaxPool",
   23792 		Input: []tf.Input{
   23793 			value,
   23794 		},
   23795 		Attrs: attrs,
   23796 	}
   23797 	op := scope.AddOperation(opspec)
   23798 	return op.Output(0), op.Output(1), op.Output(2)
   23799 }
   23800 
   23801 // Deprecated. Use TensorArraySizeV3
   23802 //
   23803 // DEPRECATED at GraphDef version 26: Use TensorArraySizeV3
   23804 func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
   23805 	if scope.Err() != nil {
   23806 		return
   23807 	}
   23808 	opspec := tf.OpSpec{
   23809 		Type: "TensorArraySizeV2",
   23810 		Input: []tf.Input{
   23811 			handle, flow_in,
   23812 		},
   23813 	}
   23814 	op := scope.AddOperation(opspec)
   23815 	return op.Output(0)
   23816 }
   23817 
   23818 // Conv2DAttr is an optional argument to Conv2D.
   23819 type Conv2DAttr func(optionalAttr)
   23820 
   23821 // Conv2DUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
   23822 // If not specified, defaults to true
   23823 func Conv2DUseCudnnOnGpu(value bool) Conv2DAttr {
   23824 	return func(m optionalAttr) {
   23825 		m["use_cudnn_on_gpu"] = value
   23826 	}
   23827 }
   23828 
   23829 // Conv2DDataFormat sets the optional data_format attribute to value.
   23830 //
   23831 // value: Specify the data format of the input and output data. With the
   23832 // default format "NHWC", the data is stored in the order of:
   23833 //     [batch, height, width, channels].
   23834 // Alternatively, the format could be "NCHW", the data storage order of:
   23835 //     [batch, channels, height, width].
   23836 // If not specified, defaults to "NHWC"
   23837 func Conv2DDataFormat(value string) Conv2DAttr {
   23838 	return func(m optionalAttr) {
   23839 		m["data_format"] = value
   23840 	}
   23841 }
   23842 
   23843 // Conv2DDilations sets the optional dilations attribute to value.
   23844 //
   23845 // value: 1-D tensor of length 4.  The dilation factor for each dimension of
   23846 // `input`. If set to k > 1, there will be k-1 skipped cells between each
   23847 // filter element on that dimension. The dimension order is determined by the
   23848 // value of `data_format`, see above for details. Dilations in the batch and
   23849 // depth dimensions must be 1.
   23850 // If not specified, defaults to <i:1 i:1 i:1 i:1 >
   23851 func Conv2DDilations(value []int64) Conv2DAttr {
   23852 	return func(m optionalAttr) {
   23853 		m["dilations"] = value
   23854 	}
   23855 }
   23856 
   23857 // Computes a 2-D convolution given 4-D `input` and `filter` tensors.
   23858 //
   23859 // Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
   23860 // and a filter / kernel tensor of shape
   23861 // `[filter_height, filter_width, in_channels, out_channels]`, this op
   23862 // performs the following:
   23863 //
   23864 // 1. Flattens the filter to a 2-D matrix with shape
   23865 //    `[filter_height * filter_width * in_channels, output_channels]`.
   23866 // 2. Extracts image patches from the input tensor to form a *virtual*
   23867 //    tensor of shape `[batch, out_height, out_width,
   23868 //    filter_height * filter_width * in_channels]`.
   23869 // 3. For each patch, right-multiplies the filter matrix and the image patch
   23870 //    vector.
   23871 //
   23872 // In detail, with the default NHWC format,
   23873 //
   23874 //     output[b, i, j, k] =
   23875 //         sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
   23876 //                         filter[di, dj, q, k]
   23877 //
   23878 // Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
   23879 // horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
   23880 //
   23881 // Arguments:
   23882 //	input: A 4-D tensor. The dimension order is interpreted according to the value
   23883 // of `data_format`, see below for details.
   23884 //	filter: A 4-D tensor of shape
   23885 // `[filter_height, filter_width, in_channels, out_channels]`
   23886 //	strides: 1-D tensor of length 4.  The stride of the sliding window for each
   23887 // dimension of `input`. The dimension order is determined by the value of
   23888 // `data_format`, see below for details.
   23889 //	padding: The type of padding algorithm to use.
   23890 //
   23891 // Returns A 4-D tensor. The dimension order is determined by the value of
   23892 // `data_format`, see below for details.
   23893 func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv2DAttr) (output tf.Output) {
   23894 	if scope.Err() != nil {
   23895 		return
   23896 	}
   23897 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   23898 	for _, a := range optional {
   23899 		a(attrs)
   23900 	}
   23901 	opspec := tf.OpSpec{
   23902 		Type: "Conv2D",
   23903 		Input: []tf.Input{
   23904 			input, filter,
   23905 		},
   23906 		Attrs: attrs,
   23907 	}
   23908 	op := scope.AddOperation(opspec)
   23909 	return op.Output(0)
   23910 }
   23911 
   23912 // FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs.
   23913 type FakeQuantWithMinMaxArgsAttr func(optionalAttr)
   23914 
   23915 // FakeQuantWithMinMaxArgsMin sets the optional min attribute to value.
   23916 // If not specified, defaults to -6
   23917 func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr {
   23918 	return func(m optionalAttr) {
   23919 		m["min"] = value
   23920 	}
   23921 }
   23922 
   23923 // FakeQuantWithMinMaxArgsMax sets the optional max attribute to value.
   23924 // If not specified, defaults to 6
   23925 func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr {
   23926 	return func(m optionalAttr) {
   23927 		m["max"] = value
   23928 	}
   23929 }
   23930 
   23931 // FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value.
   23932 // If not specified, defaults to 8
   23933 func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr {
   23934 	return func(m optionalAttr) {
   23935 		m["num_bits"] = value
   23936 	}
   23937 }
   23938 
   23939 // FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value.
   23940 // If not specified, defaults to false
   23941 func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr {
   23942 	return func(m optionalAttr) {
   23943 		m["narrow_range"] = value
   23944 	}
   23945 }
   23946 
   23947 // Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
   23948 //
   23949 // Attributes `[min; max]` define the clamping range for the `inputs` data.
   23950 // `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
   23951 // when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
   23952 // then de-quantized and output as floats in `[min; max]` interval.
   23953 // `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
   23954 //
   23955 // Quantization is called fake since the output is still in floating point.
   23956 func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) {
   23957 	if scope.Err() != nil {
   23958 		return
   23959 	}
   23960 	attrs := map[string]interface{}{}
   23961 	for _, a := range optional {
   23962 		a(attrs)
   23963 	}
   23964 	opspec := tf.OpSpec{
   23965 		Type: "FakeQuantWithMinMaxArgs",
   23966 		Input: []tf.Input{
   23967 			inputs,
   23968 		},
   23969 		Attrs: attrs,
   23970 	}
   23971 	op := scope.AddOperation(opspec)
   23972 	return op.Output(0)
   23973 }
   23974 
   23975 // StageAttr is an optional argument to Stage.
   23976 type StageAttr func(optionalAttr)
   23977 
   23978 // StageCapacity sets the optional capacity attribute to value.
   23979 //
   23980 // value: Maximum number of elements in the Staging Area. If > 0, inserts
   23981 // on the container will block when the capacity is reached.
   23982 // If not specified, defaults to 0
   23983 //
   23984 // REQUIRES: value >= 0
   23985 func StageCapacity(value int64) StageAttr {
   23986 	return func(m optionalAttr) {
   23987 		m["capacity"] = value
   23988 	}
   23989 }
   23990 
   23991 // StageMemoryLimit sets the optional memory_limit attribute to value.
   23992 //
   23993 // value: The maximum number of bytes allowed for Tensors in the Staging Area.
   23994 // If > 0, inserts will block until sufficient space is available.
   23995 // If not specified, defaults to 0
   23996 //
   23997 // REQUIRES: value >= 0
   23998 func StageMemoryLimit(value int64) StageAttr {
   23999 	return func(m optionalAttr) {
   24000 		m["memory_limit"] = value
   24001 	}
   24002 }
   24003 
   24004 // StageContainer sets the optional container attribute to value.
   24005 //
   24006 // value: If non-empty, this queue is placed in the given container. Otherwise,
   24007 // a default container is used.
   24008 // If not specified, defaults to ""
   24009 func StageContainer(value string) StageAttr {
   24010 	return func(m optionalAttr) {
   24011 		m["container"] = value
   24012 	}
   24013 }
   24014 
   24015 // StageSharedName sets the optional shared_name attribute to value.
   24016 //
   24017 // value: It is necessary to match this name to the matching Unstage Op.
   24018 // If not specified, defaults to ""
   24019 func StageSharedName(value string) StageAttr {
   24020 	return func(m optionalAttr) {
   24021 		m["shared_name"] = value
   24022 	}
   24023 }
   24024 
   24025 // Stage values similar to a lightweight Enqueue.
   24026 //
   24027 // The basic functionality of this Op is similar to a queue with many
   24028 // fewer capabilities and options.  This Op is optimized for performance.
   24029 //
   24030 // Arguments:
   24031 //	values: a list of tensors
   24032 // dtypes A list of data types that inserted values should adhere to.
   24033 //
   24034 // Returns the created operation.
   24035 func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation) {
   24036 	if scope.Err() != nil {
   24037 		return
   24038 	}
   24039 	attrs := map[string]interface{}{}
   24040 	for _, a := range optional {
   24041 		a(attrs)
   24042 	}
   24043 	opspec := tf.OpSpec{
   24044 		Type: "Stage",
   24045 		Input: []tf.Input{
   24046 			tf.OutputList(values),
   24047 		},
   24048 		Attrs: attrs,
   24049 	}
   24050 	return scope.AddOperation(opspec)
   24051 }
   24052 
   24053 // StagePeekAttr is an optional argument to StagePeek.
   24054 type StagePeekAttr func(optionalAttr)
   24055 
   24056 // StagePeekCapacity sets the optional capacity attribute to value.
   24057 // If not specified, defaults to 0
   24058 //
   24059 // REQUIRES: value >= 0
   24060 func StagePeekCapacity(value int64) StagePeekAttr {
   24061 	return func(m optionalAttr) {
   24062 		m["capacity"] = value
   24063 	}
   24064 }
   24065 
   24066 // StagePeekMemoryLimit sets the optional memory_limit attribute to value.
   24067 // If not specified, defaults to 0
   24068 //
   24069 // REQUIRES: value >= 0
   24070 func StagePeekMemoryLimit(value int64) StagePeekAttr {
   24071 	return func(m optionalAttr) {
   24072 		m["memory_limit"] = value
   24073 	}
   24074 }
   24075 
   24076 // StagePeekContainer sets the optional container attribute to value.
   24077 // If not specified, defaults to ""
   24078 func StagePeekContainer(value string) StagePeekAttr {
   24079 	return func(m optionalAttr) {
   24080 		m["container"] = value
   24081 	}
   24082 }
   24083 
   24084 // StagePeekSharedName sets the optional shared_name attribute to value.
   24085 // If not specified, defaults to ""
   24086 func StagePeekSharedName(value string) StagePeekAttr {
   24087 	return func(m optionalAttr) {
   24088 		m["shared_name"] = value
   24089 	}
   24090 }
   24091 
   24092 // Op peeks at the values at the specified index.  If the
   24093 //
   24094 // underlying container does not contain sufficient elements
   24095 // this op will block until it does.   This Op is optimized for
   24096 // performance.
   24097 func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output) {
   24098 	if scope.Err() != nil {
   24099 		return
   24100 	}
   24101 	attrs := map[string]interface{}{"dtypes": dtypes}
   24102 	for _, a := range optional {
   24103 		a(attrs)
   24104 	}
   24105 	opspec := tf.OpSpec{
   24106 		Type: "StagePeek",
   24107 		Input: []tf.Input{
   24108 			index,
   24109 		},
   24110 		Attrs: attrs,
   24111 	}
   24112 	op := scope.AddOperation(opspec)
   24113 	if scope.Err() != nil {
   24114 		return
   24115 	}
   24116 	var idx int
   24117 	var err error
   24118 	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
   24119 		scope.UpdateErr("StagePeek", err)
   24120 		return
   24121 	}
   24122 	return values
   24123 }
   24124 
   24125 // Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2.
   24126 type Conv3DBackpropInputV2Attr func(optionalAttr)
   24127 
   24128 // Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value.
   24129 //
   24130 // value: The data format of the input and output data. With the
   24131 // default format "NDHWC", the data is stored in the order of:
   24132 //     [batch, in_depth, in_height, in_width, in_channels].
   24133 // Alternatively, the format could be "NCDHW", the data storage order is:
   24134 //     [batch, in_channels, in_depth, in_height, in_width].
   24135 // If not specified, defaults to "NDHWC"
   24136 func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr {
   24137 	return func(m optionalAttr) {
   24138 		m["data_format"] = value
   24139 	}
   24140 }
   24141 
   24142 // Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value.
   24143 //
   24144 // value: 1-D tensor of length 5.  The dilation factor for each dimension of
   24145 // `input`. If set to k > 1, there will be k-1 skipped cells between each
   24146 // filter element on that dimension. The dimension order is determined by the
   24147 // value of `data_format`, see above for details. Dilations in the batch and
   24148 // depth dimensions must be 1.
   24149 // If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
   24150 func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr {
   24151 	return func(m optionalAttr) {
   24152 		m["dilations"] = value
   24153 	}
   24154 }
   24155 
   24156 // Computes the gradients of 3-D convolution with respect to the input.
   24157 //
   24158 // Arguments:
   24159 //	input_sizes: An integer vector representing the tensor shape of `input`,
   24160 // where `input` is a 5-D
   24161 // `[batch, depth, rows, cols, in_channels]` tensor.
   24162 //	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
   24163 // `in_channels` must match between `input` and `filter`.
   24164 //	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
   24165 // out_channels]`.
   24166 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   24167 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   24168 //	padding: The type of padding algorithm to use.
   24169 func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) {
   24170 	if scope.Err() != nil {
   24171 		return
   24172 	}
   24173 	attrs := map[string]interface{}{"strides": strides, "padding": padding}
   24174 	for _, a := range optional {
   24175 		a(attrs)
   24176 	}
   24177 	opspec := tf.OpSpec{
   24178 		Type: "Conv3DBackpropInputV2",
   24179 		Input: []tf.Input{
   24180 			input_sizes, filter, out_backprop,
   24181 		},
   24182 		Attrs: attrs,
   24183 	}
   24184 	op := scope.AddOperation(opspec)
   24185 	return op.Output(0)
   24186 }
   24187 
   24188 // DepthToSpaceAttr is an optional argument to DepthToSpace.
   24189 type DepthToSpaceAttr func(optionalAttr)
   24190 
   24191 // DepthToSpaceDataFormat sets the optional data_format attribute to value.
   24192 // If not specified, defaults to "NHWC"
   24193 func DepthToSpaceDataFormat(value string) DepthToSpaceAttr {
   24194 	return func(m optionalAttr) {
   24195 		m["data_format"] = value
   24196 	}
   24197 }
   24198 
   24199 // DepthToSpace for tensors of type T.
   24200 //
   24201 // Rearranges data from depth into blocks of spatial data.
   24202 // This is the reverse transformation of SpaceToDepth. More specifically,
   24203 // this op outputs a copy of the input tensor where values from the `depth`
   24204 // dimension are moved in spatial blocks to the `height` and `width` dimensions.
   24205 // The attr `block_size` indicates the input block size and how the data is moved.
   24206 //
   24207 //   * Chunks of data of size `block_size * block_size` from depth are rearranged
   24208 //     into non-overlapping blocks of size `block_size x block_size`
   24209 //   * The width the output tensor is `input_depth * block_size`, whereas the
   24210 //     height is `input_height * block_size`.
   24211 //   * The Y, X coordinates within each block of the output image are determined
   24212 //     by the high order component of the input channel index.
   24213 //   * The depth of the input tensor must be divisible by
   24214 //     `block_size * block_size`.
   24215 //
   24216 // The `data_format` attr specifies the layout of the input and output tensors
   24217 // with the following options:
   24218 //   "NHWC": `[ batch, height, width, channels ]`
   24219 //   "NCHW": `[ batch, channels, height, width ]`
   24220 //   "NCHW_VECT_C":
   24221 //       `qint8 [ batch, channels / 4, height, width, 4 ]`
   24222 //
   24223 // It is useful to consider the operation as transforming a 6-D Tensor.
   24224 // e.g. for data_format = NHWC,
   24225 //      Each element in the input tensor can be specified via 6 coordinates,
   24226 //      ordered by decreasing memory layout significance as:
   24227 //      n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
   24228 //                         within the input image, bX, bY means coordinates
   24229 //                         within the output block, oC means output channels).
   24230 //      The output would be the input transposed to the following layout:
   24231 //      n,iY,bY,iX,bX,oC
   24232 //
   24233 // This operation is useful for resizing the activations between convolutions
   24234 // (but keeping all data), e.g. instead of pooling. It is also useful for training
   24235 // purely convolutional models.
   24236 //
   24237 // For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
   24238 // block_size = 2:
   24239 //
   24240 // ```
   24241 // x = [[[[1, 2, 3, 4]]]]
   24242 //
   24243 // ```
   24244 //
   24245 // This operation will output a tensor of shape `[1, 2, 2, 1]`:
   24246 //
   24247 // ```
   24248 //    [[[[1], [2]],
   24249 //      [[3], [4]]]]
   24250 // ```
   24251 //
   24252 // Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
   24253 // the corresponding output will have 2x2 elements and will have a depth of
   24254 // 1 channel (1 = `4 / (block_size * block_size)`).
   24255 // The output element shape is `[2, 2, 1]`.
   24256 //
   24257 // For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
   24258 //
   24259 // ```
   24260 // x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
   24261 // ```
   24262 //
   24263 // This operation, for block size of 2, will return the following tensor of shape
   24264 // `[1, 2, 2, 3]`
   24265 //
   24266 // ```
   24267 //    [[[[1, 2, 3], [4, 5, 6]],
   24268 //      [[7, 8, 9], [10, 11, 12]]]]
   24269 //
   24270 // ```
   24271 //
   24272 // Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
   24273 //
   24274 // ```
   24275 // x =  [[[[1, 2, 3, 4],
   24276 //        [5, 6, 7, 8]],
   24277 //       [[9, 10, 11, 12],
   24278 //        [13, 14, 15, 16]]]]
   24279 // ```
   24280 //
   24281 // the operator will return the following tensor of shape `[1 4 4 1]`:
   24282 //
   24283 // ```
   24284 // x = [[[ [1],   [2],  [5],  [6]],
   24285 //       [ [3],   [4],  [7],  [8]],
   24286 //       [ [9],  [10], [13],  [14]],
   24287 //       [ [11], [12], [15],  [16]]]]
   24288 //
   24289 // ```
   24290 //
   24291 // Arguments:
   24292 //
   24293 //	block_size: The size of the spatial block, same as in Space2Depth.
   24294 func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output) {
   24295 	if scope.Err() != nil {
   24296 		return
   24297 	}
   24298 	attrs := map[string]interface{}{"block_size": block_size}
   24299 	for _, a := range optional {
   24300 		a(attrs)
   24301 	}
   24302 	opspec := tf.OpSpec{
   24303 		Type: "DepthToSpace",
   24304 		Input: []tf.Input{
   24305 			input,
   24306 		},
   24307 		Attrs: attrs,
   24308 	}
   24309 	op := scope.AddOperation(opspec)
   24310 	return op.Output(0)
   24311 }
   24312 
   24313 // MapStageAttr is an optional argument to MapStage.
   24314 type MapStageAttr func(optionalAttr)
   24315 
   24316 // MapStageCapacity sets the optional capacity attribute to value.
   24317 //
   24318 // value: Maximum number of elements in the Staging Area. If > 0, inserts
   24319 // on the container will block when the capacity is reached.
   24320 // If not specified, defaults to 0
   24321 //
   24322 // REQUIRES: value >= 0
   24323 func MapStageCapacity(value int64) MapStageAttr {
   24324 	return func(m optionalAttr) {
   24325 		m["capacity"] = value
   24326 	}
   24327 }
   24328 
   24329 // MapStageMemoryLimit sets the optional memory_limit attribute to value.
   24330 // If not specified, defaults to 0
   24331 //
   24332 // REQUIRES: value >= 0
   24333 func MapStageMemoryLimit(value int64) MapStageAttr {
   24334 	return func(m optionalAttr) {
   24335 		m["memory_limit"] = value
   24336 	}
   24337 }
   24338 
   24339 // MapStageContainer sets the optional container attribute to value.
   24340 //
   24341 // value: If non-empty, this queue is placed in the given container. Otherwise,
   24342 // a default container is used.
   24343 // If not specified, defaults to ""
   24344 func MapStageContainer(value string) MapStageAttr {
   24345 	return func(m optionalAttr) {
   24346 		m["container"] = value
   24347 	}
   24348 }
   24349 
   24350 // MapStageSharedName sets the optional shared_name attribute to value.
   24351 //
   24352 // value: It is necessary to match this name to the matching Unstage Op.
   24353 // If not specified, defaults to ""
   24354 func MapStageSharedName(value string) MapStageAttr {
   24355 	return func(m optionalAttr) {
   24356 		m["shared_name"] = value
   24357 	}
   24358 }
   24359 
   24360 // Stage (key, values) in the underlying container which behaves like a hashtable.
   24361 //
   24362 // Arguments:
   24363 //	key: int64
   24364 //
   24365 //	values: a list of tensors
   24366 // dtypes A list of data types that inserted values should adhere to.
   24367 //
   24368 //
   24369 // Returns the created operation.
   24370 func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) {
   24371 	if scope.Err() != nil {
   24372 		return
   24373 	}
   24374 	attrs := map[string]interface{}{"dtypes": dtypes}
   24375 	for _, a := range optional {
   24376 		a(attrs)
   24377 	}
   24378 	opspec := tf.OpSpec{
   24379 		Type: "MapStage",
   24380 		Input: []tf.Input{
   24381 			key, indices, tf.OutputList(values),
   24382 		},
   24383 		Attrs: attrs,
   24384 	}
   24385 	return scope.AddOperation(opspec)
   24386 }
   24387 
   24388 // MapUnstageAttr is an optional argument to MapUnstage.
   24389 type MapUnstageAttr func(optionalAttr)
   24390 
   24391 // MapUnstageCapacity sets the optional capacity attribute to value.
   24392 // If not specified, defaults to 0
   24393 //
   24394 // REQUIRES: value >= 0
   24395 func MapUnstageCapacity(value int64) MapUnstageAttr {
   24396 	return func(m optionalAttr) {
   24397 		m["capacity"] = value
   24398 	}
   24399 }
   24400 
   24401 // MapUnstageMemoryLimit sets the optional memory_limit attribute to value.
   24402 // If not specified, defaults to 0
   24403 //
   24404 // REQUIRES: value >= 0
   24405 func MapUnstageMemoryLimit(value int64) MapUnstageAttr {
   24406 	return func(m optionalAttr) {
   24407 		m["memory_limit"] = value
   24408 	}
   24409 }
   24410 
   24411 // MapUnstageContainer sets the optional container attribute to value.
   24412 // If not specified, defaults to ""
   24413 func MapUnstageContainer(value string) MapUnstageAttr {
   24414 	return func(m optionalAttr) {
   24415 		m["container"] = value
   24416 	}
   24417 }
   24418 
   24419 // MapUnstageSharedName sets the optional shared_name attribute to value.
   24420 // If not specified, defaults to ""
   24421 func MapUnstageSharedName(value string) MapUnstageAttr {
   24422 	return func(m optionalAttr) {
   24423 		m["shared_name"] = value
   24424 	}
   24425 }
   24426 
   24427 // Op removes and returns the values associated with the key
   24428 //
   24429 // from the underlying container.   If the underlying container
   24430 // does not contain this key, the op will block until it does.
   24431 func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output) {
   24432 	if scope.Err() != nil {
   24433 		return
   24434 	}
   24435 	attrs := map[string]interface{}{"dtypes": dtypes}
   24436 	for _, a := range optional {
   24437 		a(attrs)
   24438 	}
   24439 	opspec := tf.OpSpec{
   24440 		Type: "MapUnstage",
   24441 		Input: []tf.Input{
   24442 			key, indices,
   24443 		},
   24444 		Attrs: attrs,
   24445 	}
   24446 	op := scope.AddOperation(opspec)
   24447 	if scope.Err() != nil {
   24448 		return
   24449 	}
   24450 	var idx int
   24451 	var err error
   24452 	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
   24453 		scope.UpdateErr("MapUnstage", err)
   24454 		return
   24455 	}
   24456 	return values
   24457 }
   24458 
   24459 // MapSizeAttr is an optional argument to MapSize.
   24460 type MapSizeAttr func(optionalAttr)
   24461 
   24462 // MapSizeCapacity sets the optional capacity attribute to value.
   24463 // If not specified, defaults to 0
   24464 //
   24465 // REQUIRES: value >= 0
   24466 func MapSizeCapacity(value int64) MapSizeAttr {
   24467 	return func(m optionalAttr) {
   24468 		m["capacity"] = value
   24469 	}
   24470 }
   24471 
   24472 // MapSizeMemoryLimit sets the optional memory_limit attribute to value.
   24473 // If not specified, defaults to 0
   24474 //
   24475 // REQUIRES: value >= 0
   24476 func MapSizeMemoryLimit(value int64) MapSizeAttr {
   24477 	return func(m optionalAttr) {
   24478 		m["memory_limit"] = value
   24479 	}
   24480 }
   24481 
   24482 // MapSizeContainer sets the optional container attribute to value.
   24483 // If not specified, defaults to ""
   24484 func MapSizeContainer(value string) MapSizeAttr {
   24485 	return func(m optionalAttr) {
   24486 		m["container"] = value
   24487 	}
   24488 }
   24489 
   24490 // MapSizeSharedName sets the optional shared_name attribute to value.
   24491 // If not specified, defaults to ""
   24492 func MapSizeSharedName(value string) MapSizeAttr {
   24493 	return func(m optionalAttr) {
   24494 		m["shared_name"] = value
   24495 	}
   24496 }
   24497 
   24498 // Op returns the number of elements in the underlying container.
   24499 func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size tf.Output) {
   24500 	if scope.Err() != nil {
   24501 		return
   24502 	}
   24503 	attrs := map[string]interface{}{"dtypes": dtypes}
   24504 	for _, a := range optional {
   24505 		a(attrs)
   24506 	}
   24507 	opspec := tf.OpSpec{
   24508 		Type: "MapSize",
   24509 
   24510 		Attrs: attrs,
   24511 	}
   24512 	op := scope.AddOperation(opspec)
   24513 	return op.Output(0)
   24514 }
   24515 
   24516 // MapIncompleteSizeAttr is an optional argument to MapIncompleteSize.
   24517 type MapIncompleteSizeAttr func(optionalAttr)
   24518 
   24519 // MapIncompleteSizeCapacity sets the optional capacity attribute to value.
   24520 // If not specified, defaults to 0
   24521 //
   24522 // REQUIRES: value >= 0
   24523 func MapIncompleteSizeCapacity(value int64) MapIncompleteSizeAttr {
   24524 	return func(m optionalAttr) {
   24525 		m["capacity"] = value
   24526 	}
   24527 }
   24528 
   24529 // MapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
   24530 // If not specified, defaults to 0
   24531 //
   24532 // REQUIRES: value >= 0
   24533 func MapIncompleteSizeMemoryLimit(value int64) MapIncompleteSizeAttr {
   24534 	return func(m optionalAttr) {
   24535 		m["memory_limit"] = value
   24536 	}
   24537 }
   24538 
   24539 // MapIncompleteSizeContainer sets the optional container attribute to value.
   24540 // If not specified, defaults to ""
   24541 func MapIncompleteSizeContainer(value string) MapIncompleteSizeAttr {
   24542 	return func(m optionalAttr) {
   24543 		m["container"] = value
   24544 	}
   24545 }
   24546 
   24547 // MapIncompleteSizeSharedName sets the optional shared_name attribute to value.
   24548 // If not specified, defaults to ""
   24549 func MapIncompleteSizeSharedName(value string) MapIncompleteSizeAttr {
   24550 	return func(m optionalAttr) {
   24551 		m["shared_name"] = value
   24552 	}
   24553 }
   24554 
   24555 // Op returns the number of incomplete elements in the underlying container.
   24556 func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output) {
   24557 	if scope.Err() != nil {
   24558 		return
   24559 	}
   24560 	attrs := map[string]interface{}{"dtypes": dtypes}
   24561 	for _, a := range optional {
   24562 		a(attrs)
   24563 	}
   24564 	opspec := tf.OpSpec{
   24565 		Type: "MapIncompleteSize",
   24566 
   24567 		Attrs: attrs,
   24568 	}
   24569 	op := scope.AddOperation(opspec)
   24570 	return op.Output(0)
   24571 }
   24572 
   24573 // OrderedMapUnstageAttr is an optional argument to OrderedMapUnstage.
   24574 type OrderedMapUnstageAttr func(optionalAttr)
   24575 
   24576 // OrderedMapUnstageCapacity sets the optional capacity attribute to value.
   24577 // If not specified, defaults to 0
   24578 //
   24579 // REQUIRES: value >= 0
   24580 func OrderedMapUnstageCapacity(value int64) OrderedMapUnstageAttr {
   24581 	return func(m optionalAttr) {
   24582 		m["capacity"] = value
   24583 	}
   24584 }
   24585 
   24586 // OrderedMapUnstageMemoryLimit sets the optional memory_limit attribute to value.
   24587 // If not specified, defaults to 0
   24588 //
   24589 // REQUIRES: value >= 0
   24590 func OrderedMapUnstageMemoryLimit(value int64) OrderedMapUnstageAttr {
   24591 	return func(m optionalAttr) {
   24592 		m["memory_limit"] = value
   24593 	}
   24594 }
   24595 
   24596 // OrderedMapUnstageContainer sets the optional container attribute to value.
   24597 // If not specified, defaults to ""
   24598 func OrderedMapUnstageContainer(value string) OrderedMapUnstageAttr {
   24599 	return func(m optionalAttr) {
   24600 		m["container"] = value
   24601 	}
   24602 }
   24603 
   24604 // OrderedMapUnstageSharedName sets the optional shared_name attribute to value.
   24605 // If not specified, defaults to ""
   24606 func OrderedMapUnstageSharedName(value string) OrderedMapUnstageAttr {
   24607 	return func(m optionalAttr) {
   24608 		m["shared_name"] = value
   24609 	}
   24610 }
   24611 
   24612 // Op removes and returns the values associated with the key
   24613 //
   24614 // from the underlying container.   If the underlying container
   24615 // does not contain this key, the op will block until it does.
   24616 func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageAttr) (values []tf.Output) {
   24617 	if scope.Err() != nil {
   24618 		return
   24619 	}
   24620 	attrs := map[string]interface{}{"dtypes": dtypes}
   24621 	for _, a := range optional {
   24622 		a(attrs)
   24623 	}
   24624 	opspec := tf.OpSpec{
   24625 		Type: "OrderedMapUnstage",
   24626 		Input: []tf.Input{
   24627 			key, indices,
   24628 		},
   24629 		Attrs: attrs,
   24630 	}
   24631 	op := scope.AddOperation(opspec)
   24632 	if scope.Err() != nil {
   24633 		return
   24634 	}
   24635 	var idx int
   24636 	var err error
   24637 	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
   24638 		scope.UpdateErr("OrderedMapUnstage", err)
   24639 		return
   24640 	}
   24641 	return values
   24642 }
   24643 
   24644 // OrderedMapSizeAttr is an optional argument to OrderedMapSize.
   24645 type OrderedMapSizeAttr func(optionalAttr)
   24646 
   24647 // OrderedMapSizeCapacity sets the optional capacity attribute to value.
   24648 // If not specified, defaults to 0
   24649 //
   24650 // REQUIRES: value >= 0
   24651 func OrderedMapSizeCapacity(value int64) OrderedMapSizeAttr {
   24652 	return func(m optionalAttr) {
   24653 		m["capacity"] = value
   24654 	}
   24655 }
   24656 
   24657 // OrderedMapSizeMemoryLimit sets the optional memory_limit attribute to value.
   24658 // If not specified, defaults to 0
   24659 //
   24660 // REQUIRES: value >= 0
   24661 func OrderedMapSizeMemoryLimit(value int64) OrderedMapSizeAttr {
   24662 	return func(m optionalAttr) {
   24663 		m["memory_limit"] = value
   24664 	}
   24665 }
   24666 
   24667 // OrderedMapSizeContainer sets the optional container attribute to value.
   24668 // If not specified, defaults to ""
   24669 func OrderedMapSizeContainer(value string) OrderedMapSizeAttr {
   24670 	return func(m optionalAttr) {
   24671 		m["container"] = value
   24672 	}
   24673 }
   24674 
   24675 // OrderedMapSizeSharedName sets the optional shared_name attribute to value.
   24676 // If not specified, defaults to ""
   24677 func OrderedMapSizeSharedName(value string) OrderedMapSizeAttr {
   24678 	return func(m optionalAttr) {
   24679 		m["shared_name"] = value
   24680 	}
   24681 }
   24682 
   24683 // Op returns the number of elements in the underlying container.
   24684 func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output) {
   24685 	if scope.Err() != nil {
   24686 		return
   24687 	}
   24688 	attrs := map[string]interface{}{"dtypes": dtypes}
   24689 	for _, a := range optional {
   24690 		a(attrs)
   24691 	}
   24692 	opspec := tf.OpSpec{
   24693 		Type: "OrderedMapSize",
   24694 
   24695 		Attrs: attrs,
   24696 	}
   24697 	op := scope.AddOperation(opspec)
   24698 	return op.Output(0)
   24699 }
   24700 
   24701 // CTCLossAttr is an optional argument to CTCLoss.
   24702 type CTCLossAttr func(optionalAttr)
   24703 
   24704 // CTCLossPreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
   24705 //
   24706 // value: Scalar, if true then repeated labels are
   24707 // collapsed prior to the CTC calculation.
   24708 // If not specified, defaults to false
   24709 func CTCLossPreprocessCollapseRepeated(value bool) CTCLossAttr {
   24710 	return func(m optionalAttr) {
   24711 		m["preprocess_collapse_repeated"] = value
   24712 	}
   24713 }
   24714 
   24715 // CTCLossCtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
   24716 //
   24717 // value: Scalar.  If set to false, *during* CTC calculation
   24718 // repeated non-blank labels will not be merged and are interpreted as
   24719 // individual labels.  This is a simplified version of CTC.
   24720 // If not specified, defaults to true
   24721 func CTCLossCtcMergeRepeated(value bool) CTCLossAttr {
   24722 	return func(m optionalAttr) {
   24723 		m["ctc_merge_repeated"] = value
   24724 	}
   24725 }
   24726 
   24727 // CTCLossIgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
   24728 //
   24729 // value: Scalar. If set to true, during CTC
   24730 // calculation, items that have longer output sequences than input sequences
   24731 // are skipped: they don't contribute to the loss term and have zero-gradient.
   24732 // If not specified, defaults to false
   24733 func CTCLossIgnoreLongerOutputsThanInputs(value bool) CTCLossAttr {
   24734 	return func(m optionalAttr) {
   24735 		m["ignore_longer_outputs_than_inputs"] = value
   24736 	}
   24737 }
   24738 
   24739 // Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
   24740 //
   24741 // the gradient.  This class performs the softmax operation for you, so inputs
   24742 // should be e.g. linear projections of outputs by an LSTM.
   24743 //
   24744 // Arguments:
   24745 //	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
   24746 //	labels_indices: The indices of a `SparseTensor<int32, 2>`.
   24747 // `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
   24748 // `(batch b, time t)`.
   24749 //	labels_values: The values (labels) associated with the given batch and time.
   24750 //	sequence_length: A vector containing sequence lengths (batch).
   24751 //
   24752 // Returns A vector (batch) containing log-probabilities.The gradient of `loss`.  3-D, shape:
   24753 // `(max_time x batch_size x num_classes)`.
   24754 func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossAttr) (loss tf.Output, gradient tf.Output) {
   24755 	if scope.Err() != nil {
   24756 		return
   24757 	}
   24758 	attrs := map[string]interface{}{}
   24759 	for _, a := range optional {
   24760 		a(attrs)
   24761 	}
   24762 	opspec := tf.OpSpec{
   24763 		Type: "CTCLoss",
   24764 		Input: []tf.Input{
   24765 			inputs, labels_indices, labels_values, sequence_length,
   24766 		},
   24767 		Attrs: attrs,
   24768 	}
   24769 	op := scope.AddOperation(opspec)
   24770 	return op.Output(0), op.Output(1)
   24771 }
   24772 
   24773 // CTCGreedyDecoderAttr is an optional argument to CTCGreedyDecoder.
   24774 type CTCGreedyDecoderAttr func(optionalAttr)
   24775 
   24776 // CTCGreedyDecoderMergeRepeated sets the optional merge_repeated attribute to value.
   24777 //
   24778 // value: If True, merge repeated classes in output.
   24779 // If not specified, defaults to false
   24780 func CTCGreedyDecoderMergeRepeated(value bool) CTCGreedyDecoderAttr {
   24781 	return func(m optionalAttr) {
   24782 		m["merge_repeated"] = value
   24783 	}
   24784 }
   24785 
   24786 // Performs greedy decoding on the logits given in inputs.
   24787 //
   24788 // A note about the attribute merge_repeated: if enabled, when
   24789 // consecutive logits' maximum indices are the same, only the first of
   24790 // these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
   24791 // becomes "A B B" if merge_repeated = True and "A B B B B" if
   24792 // merge_repeated = False.
   24793 //
   24794 // Regardless of the value of merge_repeated, if the maximum index of a given
   24795 // time and batch corresponds to the blank, index `(num_classes - 1)`, no new
   24796 // element is emitted.
   24797 //
   24798 // Arguments:
   24799 //	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
   24800 //	sequence_length: A vector containing sequence lengths, size `(batch_size)`.
   24801 //
   24802 // Returns Indices matrix, size `(total_decoded_outputs x 2)`,
   24803 // of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].Values vector, size: `(total_decoded_outputs)`,
   24804 // of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.Shape vector, size `(2)`, of the decoded SparseTensor.
   24805 // Values are: `[batch_size, max_decoded_length]`.Matrix, size `(batch_size x 1)`, containing sequence
   24806 // log-probabilities.
   24807 func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, optional ...CTCGreedyDecoderAttr) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, log_probability tf.Output) {
   24808 	if scope.Err() != nil {
   24809 		return
   24810 	}
   24811 	attrs := map[string]interface{}{}
   24812 	for _, a := range optional {
   24813 		a(attrs)
   24814 	}
   24815 	opspec := tf.OpSpec{
   24816 		Type: "CTCGreedyDecoder",
   24817 		Input: []tf.Input{
   24818 			inputs, sequence_length,
   24819 		},
   24820 		Attrs: attrs,
   24821 	}
   24822 	op := scope.AddOperation(opspec)
   24823 	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
   24824 }
   24825 
   24826 // Forwards `data` to the output port determined by `pred`.
   24827 //
   24828 // If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
   24829 // the data goes to `output_false`.
   24830 //
   24831 // See also `RefSwitch` and `Merge`.
   24832 //
   24833 // Arguments:
   24834 //	data: The tensor to be forwarded to the appropriate output.
   24835 //	pred: A scalar that specifies which output port will receive data.
   24836 //
   24837 // Returns If `pred` is false, data will be forwarded to this output.If `pred` is true, data will be forwarded to this output.
   24838 func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output) {
   24839 	if scope.Err() != nil {
   24840 		return
   24841 	}
   24842 	opspec := tf.OpSpec{
   24843 		Type: "Switch",
   24844 		Input: []tf.Input{
   24845 			data, pred,
   24846 		},
   24847 	}
   24848 	op := scope.AddOperation(opspec)
   24849 	return op.Output(0), op.Output(1)
   24850 }
   24851 
   24852 // Add all input tensors element wise.
   24853 //
   24854 // Arguments:
   24855 //	inputs: Must all be the same size and shape.
   24856 func AddN(scope *Scope, inputs []tf.Output) (sum tf.Output) {
   24857 	if scope.Err() != nil {
   24858 		return
   24859 	}
   24860 	opspec := tf.OpSpec{
   24861 		Type: "AddN",
   24862 		Input: []tf.Input{
   24863 			tf.OutputList(inputs),
   24864 		},
   24865 	}
   24866 	op := scope.AddOperation(opspec)
   24867 	return op.Output(0)
   24868 }
   24869 
   24870 // EnterAttr is an optional argument to Enter.
   24871 type EnterAttr func(optionalAttr)
   24872 
   24873 // EnterIsConstant sets the optional is_constant attribute to value.
   24874 //
   24875 // value: If true, the output is constant within the child frame.
   24876 // If not specified, defaults to false
   24877 func EnterIsConstant(value bool) EnterAttr {
   24878 	return func(m optionalAttr) {
   24879 		m["is_constant"] = value
   24880 	}
   24881 }
   24882 
   24883 // EnterParallelIterations sets the optional parallel_iterations attribute to value.
   24884 //
   24885 // value: The number of iterations allowed to run in parallel.
   24886 // If not specified, defaults to 10
   24887 func EnterParallelIterations(value int64) EnterAttr {
   24888 	return func(m optionalAttr) {
   24889 		m["parallel_iterations"] = value
   24890 	}
   24891 }
   24892 
   24893 // Creates or finds a child frame, and makes `data` available to the child frame.
   24894 //
   24895 // This op is used together with `Exit` to create loops in the graph.
   24896 // The unique `frame_name` is used by the `Executor` to identify frames. If
   24897 // `is_constant` is true, `output` is a constant in the child frame; otherwise
   24898 // it may be changed in the child frame. At most `parallel_iterations` iterations
   24899 // are run in parallel in the child frame.
   24900 //
   24901 // Arguments:
   24902 //	data: The tensor to be made available to the child frame.
   24903 //	frame_name: The name of the child frame.
   24904 //
   24905 // Returns The same tensor as `data`.
   24906 func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output) {
   24907 	if scope.Err() != nil {
   24908 		return
   24909 	}
   24910 	attrs := map[string]interface{}{"frame_name": frame_name}
   24911 	for _, a := range optional {
   24912 		a(attrs)
   24913 	}
   24914 	opspec := tf.OpSpec{
   24915 		Type: "Enter",
   24916 		Input: []tf.Input{
   24917 			data,
   24918 		},
   24919 		Attrs: attrs,
   24920 	}
   24921 	op := scope.AddOperation(opspec)
   24922 	return op.Output(0)
   24923 }
   24924 
   24925 // Produce a string tensor that encodes the state of a Reader.
   24926 //
   24927 // Not all Readers support being serialized, so this can produce an
   24928 // Unimplemented error.
   24929 //
   24930 // Arguments:
   24931 //	reader_handle: Handle to a Reader.
   24932 func ReaderSerializeStateV2(scope *Scope, reader_handle tf.Output) (state tf.Output) {
   24933 	if scope.Err() != nil {
   24934 		return
   24935 	}
   24936 	opspec := tf.OpSpec{
   24937 		Type: "ReaderSerializeStateV2",
   24938 		Input: []tf.Input{
   24939 			reader_handle,
   24940 		},
   24941 	}
   24942 	op := scope.AddOperation(opspec)
   24943 	return op.Output(0)
   24944 }
   24945 
   24946 // Exits the current frame to its parent frame.
   24947 //
   24948 // Exit makes its input `data` available to the parent frame.
   24949 //
   24950 // Arguments:
   24951 //	data: The tensor to be made available to the parent frame.
   24952 //
   24953 // Returns The same tensor as `data`.
   24954 func Exit(scope *Scope, data tf.Output) (output tf.Output) {
   24955 	if scope.Err() != nil {
   24956 		return
   24957 	}
   24958 	opspec := tf.OpSpec{
   24959 		Type: "Exit",
   24960 		Input: []tf.Input{
   24961 			data,
   24962 		},
   24963 	}
   24964 	op := scope.AddOperation(opspec)
   24965 	return op.Output(0)
   24966 }
   24967 
   24968 // Returns a copy of the input tensor.
   24969 func Snapshot(scope *Scope, input tf.Output) (output tf.Output) {
   24970 	if scope.Err() != nil {
   24971 		return
   24972 	}
   24973 	opspec := tf.OpSpec{
   24974 		Type: "Snapshot",
   24975 		Input: []tf.Input{
   24976 			input,
   24977 		},
   24978 	}
   24979 	op := scope.AddOperation(opspec)
   24980 	return op.Output(0)
   24981 }
   24982 
   24983 // Scatter `updates` into a new (initially zero) tensor according to `indices`.
   24984 //
   24985 // Creates a new tensor by applying sparse `updates` to individual
   24986 // values or slices within a zero tensor of the given `shape` according to
   24987 // indices.  This operator is the inverse of the @{tf.gather_nd} operator which
   24988 // extracts values or slices from a given tensor.
   24989 //
   24990 // **WARNING**: The order in which updates are applied is nondeterministic, so the
   24991 // output will be nondeterministic if `indices` contains duplicates.
   24992 //
   24993 // `indices` is an integer tensor containing indices into a new tensor of shape
   24994 // `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
   24995 //
   24996 //     indices.shape[-1] <= shape.rank
   24997 //
   24998 // The last dimension of `indices` corresponds to indices into elements
   24999 // (if `indices.shape[-1] = shape.rank`) or slices
   25000 // (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
   25001 // `shape`.  `updates` is a tensor with shape
   25002 //
   25003 //     indices.shape[:-1] + shape[indices.shape[-1]:]
   25004 //
   25005 // The simplest form of scatter is to insert individual elements in a tensor by
   25006 // index. For example, say we want to insert 4 scattered elements in a rank-1
   25007 // tensor with 8 elements.
   25008 //
   25009 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   25010 // <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
   25011 // </div>
   25012 //
   25013 // In Python, this scatter operation would look like this:
   25014 //
   25015 // ```python
   25016 //     indices = tf.constant([[4], [3], [1], [7]])
   25017 //     updates = tf.constant([9, 10, 11, 12])
   25018 //     shape = tf.constant([8])
   25019 //     scatter = tf.scatter_nd(indices, updates, shape)
   25020 //     with tf.Session() as sess:
   25021 //       print(sess.run(scatter))
   25022 // ```
   25023 //
   25024 // The resulting tensor would look like this:
   25025 //
   25026 //     [0, 11, 0, 10, 9, 0, 0, 12]
   25027 //
   25028 // We can also, insert entire slices of a higher rank tensor all at once. For
   25029 // example, if we wanted to insert two slices in the first dimension of a
   25030 // rank-3 tensor with two matrices of new values.
   25031 //
   25032 // <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
   25033 // <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
   25034 // </div>
   25035 //
   25036 // In Python, this scatter operation would look like this:
   25037 //
   25038 // ```python
   25039 //     indices = tf.constant([[0], [2]])
   25040 //     updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
   25041 //                             [7, 7, 7, 7], [8, 8, 8, 8]],
   25042 //                            [[5, 5, 5, 5], [6, 6, 6, 6],
   25043 //                             [7, 7, 7, 7], [8, 8, 8, 8]]])
   25044 //     shape = tf.constant([4, 4, 4])
   25045 //     scatter = tf.scatter_nd(indices, updates, shape)
   25046 //     with tf.Session() as sess:
   25047 //       print(sess.run(scatter))
   25048 // ```
   25049 //
   25050 // The resulting tensor would look like this:
   25051 //
   25052 //     [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
   25053 //      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
   25054 //      [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
   25055 //      [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
   25056 //
   25057 // Arguments:
   25058 //	indices: Index tensor.
   25059 //	updates: Updates to scatter into output.
   25060 //	shape: 1-D. The shape of the resulting tensor.
   25061 //
   25062 // Returns A new tensor with the given shape and updates applied according
   25063 // to the indices.
   25064 func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output) (output tf.Output) {
   25065 	if scope.Err() != nil {
   25066 		return
   25067 	}
   25068 	opspec := tf.OpSpec{
   25069 		Type: "ScatterNd",
   25070 		Input: []tf.Input{
   25071 			indices, updates, shape,
   25072 		},
   25073 	}
   25074 	op := scope.AddOperation(opspec)
   25075 	return op.Output(0)
   25076 }
   25077 
   25078 // SpaceToDepthAttr is an optional argument to SpaceToDepth.
   25079 type SpaceToDepthAttr func(optionalAttr)
   25080 
   25081 // SpaceToDepthDataFormat sets the optional data_format attribute to value.
   25082 // If not specified, defaults to "NHWC"
   25083 func SpaceToDepthDataFormat(value string) SpaceToDepthAttr {
   25084 	return func(m optionalAttr) {
   25085 		m["data_format"] = value
   25086 	}
   25087 }
   25088 
   25089 // SpaceToDepth for tensors of type T.
   25090 //
   25091 // Rearranges blocks of spatial data, into depth. More specifically,
   25092 // this op outputs a copy of the input tensor where values from the `height`
   25093 // and `width` dimensions are moved to the `depth` dimension.
   25094 // The attr `block_size` indicates the input block size.
   25095 //
   25096 //   * Non-overlapping blocks of size `block_size x block size` are rearranged
   25097 //     into depth at each location.
   25098 //   * The depth of the output tensor is `block_size * block_size * input_depth`.
   25099 //   * The Y, X coordinates within each block of the input become the high order
   25100 //     component of the output channel index.
   25101 //   * The input tensor's height and width must be divisible by block_size.
   25102 //
   25103 // The `data_format` attr specifies the layout of the input and output tensors
   25104 // with the following options:
   25105 //   "NHWC": `[ batch, height, width, channels ]`
   25106 //   "NCHW": `[ batch, channels, height, width ]`
   25107 //   "NCHW_VECT_C":
   25108 //       `qint8 [ batch, channels / 4, height, width, 4 ]`
   25109 //
   25110 // It is useful to consider the operation as transforming a 6-D Tensor.
   25111 // e.g. for data_format = NHWC,
   25112 //      Each element in the input tensor can be specified via 6 coordinates,
   25113 //      ordered by decreasing memory layout significance as:
   25114 //      n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
   25115 //                         within the output image, bX, bY means coordinates
   25116 //                         within the input block, iC means input channels).
   25117 //      The output would be a transpose to the following layout:
   25118 //      n,oY,oX,bY,bX,iC
   25119 //
   25120 // This operation is useful for resizing the activations between convolutions
   25121 // (but keeping all data), e.g. instead of pooling. It is also useful for training
   25122 // purely convolutional models.
   25123 //
   25124 // For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
   25125 // block_size = 2:
   25126 //
   25127 // ```
   25128 // x = [[[[1], [2]],
   25129 //       [[3], [4]]]]
   25130 // ```
   25131 //
   25132 // This operation will output a tensor of shape `[1, 1, 1, 4]`:
   25133 //
   25134 // ```
   25135 // [[[[1, 2, 3, 4]]]]
   25136 // ```
   25137 //
   25138 // Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
   25139 // the corresponding output will have a single element (i.e. width and height are
   25140 // both 1) and will have a depth of 4 channels (1 * block_size * block_size).
   25141 // The output element shape is `[1, 1, 4]`.
   25142 //
   25143 // For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
   25144 //
   25145 // ```
   25146 // x = [[[[1, 2, 3], [4, 5, 6]],
   25147 //       [[7, 8, 9], [10, 11, 12]]]]
   25148 // ```
   25149 //
   25150 // This operation, for block_size of 2, will return the following tensor of shape
   25151 // `[1, 1, 1, 12]`
   25152 //
   25153 // ```
   25154 // [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
   25155 // ```
   25156 //
   25157 // Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
   25158 //
   25159 // ```
   25160 // x = [[[[1],   [2],  [5],  [6]],
   25161 //       [[3],   [4],  [7],  [8]],
   25162 //       [[9],  [10], [13],  [14]],
   25163 //       [[11], [12], [15],  [16]]]]
   25164 // ```
   25165 //
   25166 // the operator will return the following tensor of shape `[1 2 2 4]`:
   25167 //
   25168 // ```
   25169 // x = [[[[1, 2, 3, 4],
   25170 //        [5, 6, 7, 8]],
   25171 //       [[9, 10, 11, 12],
   25172 //        [13, 14, 15, 16]]]]
   25173 // ```
   25174 //
   25175 // Arguments:
   25176 //
   25177 //	block_size: The size of the spatial block.
   25178 func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output) {
   25179 	if scope.Err() != nil {
   25180 		return
   25181 	}
   25182 	attrs := map[string]interface{}{"block_size": block_size}
   25183 	for _, a := range optional {
   25184 		a(attrs)
   25185 	}
   25186 	opspec := tf.OpSpec{
   25187 		Type: "SpaceToDepth",
   25188 		Input: []tf.Input{
   25189 			input,
   25190 		},
   25191 		Attrs: attrs,
   25192 	}
   25193 	op := scope.AddOperation(opspec)
   25194 	return op.Output(0)
   25195 }
   25196 
   25197 // AbortAttr is an optional argument to Abort.
   25198 type AbortAttr func(optionalAttr)
   25199 
   25200 // AbortErrorMsg sets the optional error_msg attribute to value.
   25201 //
   25202 // value: A string which is the message associated with the exception.
   25203 // If not specified, defaults to ""
   25204 func AbortErrorMsg(value string) AbortAttr {
   25205 	return func(m optionalAttr) {
   25206 		m["error_msg"] = value
   25207 	}
   25208 }
   25209 
   25210 // AbortExitWithoutError sets the optional exit_without_error attribute to value.
   25211 // If not specified, defaults to false
   25212 func AbortExitWithoutError(value bool) AbortAttr {
   25213 	return func(m optionalAttr) {
   25214 		m["exit_without_error"] = value
   25215 	}
   25216 }
   25217 
   25218 // Raise a exception to abort the process when called.
   25219 //
   25220 // If exit_without_error is true, the process will exit normally,
   25221 // otherwise it will exit with a SIGABORT signal.
   25222 //
   25223 // Returns nothing but an exception.
   25224 //
   25225 // Returns the created operation.
   25226 func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation) {
   25227 	if scope.Err() != nil {
   25228 		return
   25229 	}
   25230 	attrs := map[string]interface{}{}
   25231 	for _, a := range optional {
   25232 		a(attrs)
   25233 	}
   25234 	opspec := tf.OpSpec{
   25235 		Type: "Abort",
   25236 
   25237 		Attrs: attrs,
   25238 	}
   25239 	return scope.AddOperation(opspec)
   25240 }
   25241 
   25242 // UniformCandidateSamplerAttr is an optional argument to UniformCandidateSampler.
   25243 type UniformCandidateSamplerAttr func(optionalAttr)
   25244 
   25245 // UniformCandidateSamplerSeed sets the optional seed attribute to value.
   25246 //
   25247 // value: If either seed or seed2 are set to be non-zero, the random number
   25248 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   25249 // random seed.
   25250 // If not specified, defaults to 0
   25251 func UniformCandidateSamplerSeed(value int64) UniformCandidateSamplerAttr {
   25252 	return func(m optionalAttr) {
   25253 		m["seed"] = value
   25254 	}
   25255 }
   25256 
   25257 // UniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
   25258 //
   25259 // value: An second seed to avoid seed collision.
   25260 // If not specified, defaults to 0
   25261 func UniformCandidateSamplerSeed2(value int64) UniformCandidateSamplerAttr {
   25262 	return func(m optionalAttr) {
   25263 		m["seed2"] = value
   25264 	}
   25265 }
   25266 
   25267 // Generates labels for candidate sampling with a uniform distribution.
   25268 //
   25269 // See explanations of candidate sampling and the data formats at
   25270 // go/candidate-sampling.
   25271 //
   25272 // For each batch, this op picks a single set of sampled candidate labels.
   25273 //
   25274 // The advantages of sampling candidates per-batch are simplicity and the
   25275 // possibility of efficient dense matrix multiplication. The disadvantage is that
   25276 // the sampled candidates must be chosen independently of the context and of the
   25277 // true labels.
   25278 //
   25279 // Arguments:
   25280 //	true_classes: A batch_size * num_true matrix, in which each row contains the
   25281 // IDs of the num_true target_classes in the corresponding original label.
   25282 //	num_true: Number of true labels per context.
   25283 //	num_sampled: Number of candidates to randomly sample.
   25284 //	unique: If unique is true, we sample with rejection, so that all sampled
   25285 // candidates in a batch are unique. This requires some approximation to
   25286 // estimate the post-rejection sampling probabilities.
   25287 //	range_max: The sampler will sample integers from the interval [0, range_max).
   25288 //
   25289 // Returns A vector of length num_sampled, in which each element is
   25290 // the ID of a sampled candidate.A batch_size * num_true matrix, representing
   25291 // the number of times each candidate is expected to occur in a batch
   25292 // of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
   25293 // candidate representing the number of times the candidate is expected
   25294 // to occur in a batch of sampled candidates.  If unique=true, then this is a
   25295 // probability.
   25296 func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...UniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
   25297 	if scope.Err() != nil {
   25298 		return
   25299 	}
   25300 	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
   25301 	for _, a := range optional {
   25302 		a(attrs)
   25303 	}
   25304 	opspec := tf.OpSpec{
   25305 		Type: "UniformCandidateSampler",
   25306 		Input: []tf.Input{
   25307 			true_classes,
   25308 		},
   25309 		Attrs: attrs,
   25310 	}
   25311 	op := scope.AddOperation(opspec)
   25312 	return op.Output(0), op.Output(1), op.Output(2)
   25313 }
   25314 
   25315 // FixedUnigramCandidateSamplerAttr is an optional argument to FixedUnigramCandidateSampler.
   25316 type FixedUnigramCandidateSamplerAttr func(optionalAttr)
   25317 
   25318 // FixedUnigramCandidateSamplerVocabFile sets the optional vocab_file attribute to value.
   25319 //
   25320 // value: Each valid line in this file (which should have a CSV-like format)
   25321 // corresponds to a valid word ID. IDs are in sequential order, starting from
   25322 // num_reserved_ids. The last entry in each line is expected to be a value
   25323 // corresponding to the count or relative probability. Exactly one of vocab_file
   25324 // and unigrams needs to be passed to this op.
   25325 // If not specified, defaults to ""
   25326 func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr {
   25327 	return func(m optionalAttr) {
   25328 		m["vocab_file"] = value
   25329 	}
   25330 }
   25331 
   25332 // FixedUnigramCandidateSamplerDistortion sets the optional distortion attribute to value.
   25333 //
   25334 // value: The distortion is used to skew the unigram probability distribution.
   25335 // Each weight is first raised to the distortion's power before adding to the
   25336 // internal unigram distribution. As a result, distortion = 1.0 gives regular
   25337 // unigram sampling (as defined by the vocab file), and distortion = 0.0 gives
   25338 // a uniform distribution.
   25339 // If not specified, defaults to 1
   25340 func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr {
   25341 	return func(m optionalAttr) {
   25342 		m["distortion"] = value
   25343 	}
   25344 }
   25345 
   25346 // FixedUnigramCandidateSamplerNumReservedIds sets the optional num_reserved_ids attribute to value.
   25347 //
   25348 // value: Optionally some reserved IDs can be added in the range [0,
   25349 // ..., num_reserved_ids) by the users. One use case is that a special unknown
   25350 // word token is used as ID 0. These IDs will have a sampling probability of 0.
   25351 // If not specified, defaults to 0
   25352 func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr {
   25353 	return func(m optionalAttr) {
   25354 		m["num_reserved_ids"] = value
   25355 	}
   25356 }
   25357 
   25358 // FixedUnigramCandidateSamplerNumShards sets the optional num_shards attribute to value.
   25359 //
   25360 // value: A sampler can be used to sample from a subset of the original range
   25361 // in order to speed up the whole computation through parallelism. This parameter
   25362 // (together with 'shard') indicates the number of partitions that are being
   25363 // used in the overall computation.
   25364 // If not specified, defaults to 1
   25365 //
   25366 // REQUIRES: value >= 1
   25367 func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr {
   25368 	return func(m optionalAttr) {
   25369 		m["num_shards"] = value
   25370 	}
   25371 }
   25372 
   25373 // FixedUnigramCandidateSamplerShard sets the optional shard attribute to value.
   25374 //
   25375 // value: A sampler can be used to sample from a subset of the original range
   25376 // in order to speed up the whole computation through parallelism. This parameter
   25377 // (together with 'num_shards') indicates the particular partition number of a
   25378 // sampler op, when partitioning is being used.
   25379 // If not specified, defaults to 0
   25380 //
   25381 // REQUIRES: value >= 0
   25382 func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr {
   25383 	return func(m optionalAttr) {
   25384 		m["shard"] = value
   25385 	}
   25386 }
   25387 
   25388 // FixedUnigramCandidateSamplerUnigrams sets the optional unigrams attribute to value.
   25389 //
   25390 // value: A list of unigram counts or probabilities, one per ID in sequential
   25391 // order. Exactly one of vocab_file and unigrams should be passed to this op.
   25392 // If not specified, defaults to <>
   25393 func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr {
   25394 	return func(m optionalAttr) {
   25395 		m["unigrams"] = value
   25396 	}
   25397 }
   25398 
   25399 // FixedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
   25400 //
   25401 // value: If either seed or seed2 are set to be non-zero, the random number
   25402 // generator is seeded by the given seed.  Otherwise, it is seeded by a
   25403 // random seed.
   25404 // If not specified, defaults to 0
   25405 func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr {
   25406 	return func(m optionalAttr) {
   25407 		m["seed"] = value
   25408 	}
   25409 }
   25410 
   25411 // FixedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
   25412 //
   25413 // value: An second seed to avoid seed collision.
   25414 // If not specified, defaults to 0
   25415 func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr {
   25416 	return func(m optionalAttr) {
   25417 		m["seed2"] = value
   25418 	}
   25419 }
   25420 
   25421 // Generates labels for candidate sampling with a learned unigram distribution.
   25422 //
   25423 // A unigram sampler could use a fixed unigram distribution read from a
   25424 // file or passed in as an in-memory array instead of building up the distribution
   25425 // from data on the fly. There is also an option to skew the distribution by
   25426 // applying a distortion power to the weights.
   25427 //
   25428 // The vocabulary file should be in CSV-like format, with the last field
   25429 // being the weight associated with the word.
   25430 //
   25431 // For each batch, this op picks a single set of sampled candidate labels.
   25432 //
   25433 // The advantages of sampling candidates per-batch are simplicity and the
   25434 // possibility of efficient dense matrix multiplication. The disadvantage is that
   25435 // the sampled candidates must be chosen independently of the context and of the
   25436 // true labels.
   25437 //
   25438 // Arguments:
   25439 //	true_classes: A batch_size * num_true matrix, in which each row contains the
   25440 // IDs of the num_true target_classes in the corresponding original label.
   25441 //	num_true: Number of true labels per context.
   25442 //	num_sampled: Number of candidates to randomly sample.
   25443 //	unique: If unique is true, we sample with rejection, so that all sampled
   25444 // candidates in a batch are unique. This requires some approximation to
   25445 // estimate the post-rejection sampling probabilities.
   25446 //	range_max: The sampler will sample integers from the interval [0, range_max).
   25447 //
   25448 // Returns A vector of length num_sampled, in which each element is
   25449 // the ID of a sampled candidate.A batch_size * num_true matrix, representing
   25450 // the number of times each candidate is expected to occur in a batch
   25451 // of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
   25452 // candidate representing the number of times the candidate is expected
   25453 // to occur in a batch of sampled candidates.  If unique=true, then this is a
   25454 // probability.
   25455 func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...FixedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
   25456 	if scope.Err() != nil {
   25457 		return
   25458 	}
   25459 	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
   25460 	for _, a := range optional {
   25461 		a(attrs)
   25462 	}
   25463 	opspec := tf.OpSpec{
   25464 		Type: "FixedUnigramCandidateSampler",
   25465 		Input: []tf.Input{
   25466 			true_classes,
   25467 		},
   25468 		Attrs: attrs,
   25469 	}
   25470 	op := scope.AddOperation(opspec)
   25471 	return op.Output(0), op.Output(1), op.Output(2)
   25472 }
   25473 
   25474 // Elementwise computes the bitwise AND of `x` and `y`.
   25475 //
   25476 // The result will have those bits set, that are set in both `x` and `y`. The
   25477 // computation is performed on the underlying representations of `x` and `y`.
   25478 func BitwiseAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   25479 	if scope.Err() != nil {
   25480 		return
   25481 	}
   25482 	opspec := tf.OpSpec{
   25483 		Type: "BitwiseAnd",
   25484 		Input: []tf.Input{
   25485 			x, y,
   25486 		},
   25487 	}
   25488 	op := scope.AddOperation(opspec)
   25489 	return op.Output(0)
   25490 }
   25491 
   25492 // Elementwise computes the bitwise left-shift of `x` and `y`.
   25493 //
   25494 // If `y` is negative, or greater than or equal to the width of `x` in bits the
   25495 // result is implementation defined.
   25496 func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   25497 	if scope.Err() != nil {
   25498 		return
   25499 	}
   25500 	opspec := tf.OpSpec{
   25501 		Type: "LeftShift",
   25502 		Input: []tf.Input{
   25503 			x, y,
   25504 		},
   25505 	}
   25506 	op := scope.AddOperation(opspec)
   25507 	return op.Output(0)
   25508 }
   25509 
   25510 // Elementwise computes the bitwise right-shift of `x` and `y`.
   25511 //
   25512 // Performs a logical shift for unsigned integer types, and an arithmetic shift
   25513 // for signed integer types.
   25514 //
   25515 // If `y` is negative, or greater than or equal to than the width of `x` in bits
   25516 // the result is implementation defined.
   25517 func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
   25518 	if scope.Err() != nil {
   25519 		return
   25520 	}
   25521 	opspec := tf.OpSpec{
   25522 		Type: "RightShift",
   25523 		Input: []tf.Input{
   25524 			x, y,
   25525 		},
   25526 	}
   25527 	op := scope.AddOperation(opspec)
   25528 	return op.Output(0)
   25529 }
   25530 
   25531 // Adjust the hue of one or more images.
   25532 //
   25533 // `images` is a tensor of at least 3 dimensions.  The last dimension is
   25534 // interpretted as channels, and must be three.
   25535 //
   25536 // The input image is considered in the RGB colorspace. Conceptually, the RGB
   25537 // colors are first mapped into HSV. A delta is then applied all the hue values,
   25538 // and then remapped back to RGB colorspace.
   25539 //
   25540 // Arguments:
   25541 //	images: Images to adjust.  At least 3-D.
   25542 //	delta: A float delta to add to the hue.
   25543 //
   25544 // Returns The hue-adjusted image or images.
   25545 func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) {
   25546 	if scope.Err() != nil {
   25547 		return
   25548 	}
   25549 	opspec := tf.OpSpec{
   25550 		Type: "AdjustHue",
   25551 		Input: []tf.Input{
   25552 			images, delta,
   25553 		},
   25554 	}
   25555 	op := scope.AddOperation(opspec)
   25556 	return op.Output(0)
   25557 }
   25558 
   25559 // AvgPool3DGradAttr is an optional argument to AvgPool3DGrad.
   25560 type AvgPool3DGradAttr func(optionalAttr)
   25561 
   25562 // AvgPool3DGradDataFormat sets the optional data_format attribute to value.
   25563 //
   25564 // value: The data format of the input and output data. With the
   25565 // default format "NDHWC", the data is stored in the order of:
   25566 //     [batch, in_depth, in_height, in_width, in_channels].
   25567 // Alternatively, the format could be "NCDHW", the data storage order is:
   25568 //     [batch, in_channels, in_depth, in_height, in_width].
   25569 // If not specified, defaults to "NDHWC"
   25570 func AvgPool3DGradDataFormat(value string) AvgPool3DGradAttr {
   25571 	return func(m optionalAttr) {
   25572 		m["data_format"] = value
   25573 	}
   25574 }
   25575 
   25576 // Computes gradients of average pooling function.
   25577 //
   25578 // Arguments:
   25579 //	orig_input_shape: The original input dimensions.
   25580 //	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
   25581 //	ksize: 1-D tensor of length 5. The size of the window for each dimension of
   25582 // the input tensor. Must have `ksize[0] = ksize[4] = 1`.
   25583 //	strides: 1-D tensor of length 5. The stride of the sliding window for each
   25584 // dimension of `input`. Must have `strides[0] = strides[4] = 1`.
   25585 //	padding: The type of padding algorithm to use.
   25586 //
   25587 // Returns The backprop for input.
   25588 func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DGradAttr) (output tf.Output) {
   25589 	if scope.Err() != nil {
   25590 		return
   25591 	}
   25592 	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
   25593 	for _, a := range optional {
   25594 		a(attrs)
   25595 	}
   25596 	opspec := tf.OpSpec{
   25597 		Type: "AvgPool3DGrad",
   25598 		Input: []tf.Input{
   25599 			orig_input_shape, grad,
   25600 		},
   25601 		Attrs: attrs,
   25602 	}
   25603 	op := scope.AddOperation(opspec)
   25604 	return op.Output(0)
   25605 }
   25606 
   25607 // ParseSingleSequenceExampleAttr is an optional argument to ParseSingleSequenceExample.
   25608 type ParseSingleSequenceExampleAttr func(optionalAttr)
   25609 
   25610 // ParseSingleSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
   25611 //
   25612 // value: A list of Ncontext_sparse types; the data types of data in
   25613 // each context Feature given in context_sparse_keys.
   25614 // Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
   25615 // DT_INT64 (Int64List), and DT_STRING (BytesList).
   25616 // If not specified, defaults to <>
   25617 //
   25618 // REQUIRES: len(value) >= 0
   25619 func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
   25620 	return func(m optionalAttr) {
   25621 		m["context_sparse_types"] = value
   25622 	}
   25623 }
   25624 
   25625 // ParseSingleSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
   25626 // If not specified, defaults to <>
   25627 //
   25628 // REQUIRES: len(value) >= 0
   25629 func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
   25630 	return func(m optionalAttr) {
   25631 		m["feature_list_dense_types"] = value
   25632 	}
   25633 }
   25634 
   25635 // ParseSingleSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
   25636 //
   25637 // value: A list of Ncontext_dense shapes; the shapes of data in
   25638 // each context Feature given in context_dense_keys.
   25639 // The number of elements in the Feature corresponding to context_dense_key[j]
   25640 // must always equal context_dense_shapes[j].NumEntries().
   25641 // The shape of context_dense_values[j] will match context_dense_shapes[j].
   25642 // If not specified, defaults to <>
   25643 //
   25644 // REQUIRES: len(value) >= 0
   25645 func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
   25646 	return func(m optionalAttr) {
   25647 		m["context_dense_shapes"] = value
   25648 	}
   25649 }
   25650 
   25651 // ParseSingleSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
   25652 //
   25653 // value: A list of Nfeature_list_sparse types; the data types
   25654 // of data in each FeatureList given in feature_list_sparse_keys.
   25655 // Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
   25656 // DT_INT64 (Int64List), and DT_STRING (BytesList).
   25657 // If not specified, defaults to <>
   25658 //
   25659 // REQUIRES: len(value) >= 0
   25660 func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
   25661 	return func(m optionalAttr) {
   25662 		m["feature_list_sparse_types"] = value
   25663 	}
   25664 }
   25665 
   25666 // ParseSingleSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
   25667 //
   25668 // value: A list of Nfeature_list_dense shapes; the shapes of
   25669 // data in each FeatureList given in feature_list_dense_keys.
   25670 // The shape of each Feature in the FeatureList corresponding to
   25671 // feature_list_dense_key[j] must always equal
   25672 // feature_list_dense_shapes[j].NumEntries().
   25673 // If not specified, defaults to <>
   25674 //
   25675 // REQUIRES: len(value) >= 0
   25676 func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
   25677 	return func(m optionalAttr) {
   25678 		m["feature_list_dense_shapes"] = value
   25679 	}
   25680 }
   25681 
   25682 // Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
   25683 //
   25684 // Arguments:
   25685 //	serialized: A scalar containing a binary serialized SequenceExample proto.
   25686 //	feature_list_dense_missing_assumed_empty: A vector listing the
   25687 // FeatureList keys which may be missing from the SequenceExample.  If the
   25688 // associated FeatureList is missing, it is treated as empty.  By default,
   25689 // any FeatureList not listed in this vector must exist in the SequenceExample.
   25690 //	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
   25691 // The keys expected in the Examples' features associated with context_sparse
   25692 // values.
   25693 //	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
   25694 // The keys expected in the SequenceExamples' context features associated with
   25695 // dense values.
   25696 //	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
   25697 // (scalars).  The keys expected in the FeatureLists associated with sparse
   25698 // values.
   25699 //	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
   25700 // The keys expected in the SequenceExamples' feature_lists associated
   25701 // with lists of dense values.
   25702 //	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
   25703 // context_dense_defaults[j] provides default values
   25704 // when the SequenceExample's context map lacks context_dense_key[j].
   25705 // If an empty Tensor is provided for context_dense_defaults[j],
   25706 // then the Feature context_dense_keys[j] is required.
   25707 // The input type is inferred from context_dense_defaults[j], even when it's
   25708 // empty.  If context_dense_defaults[j] is not empty, its shape must match
   25709 // context_dense_shapes[j].
   25710 //	debug_name: A scalar containing the name of the serialized proto.
   25711 // May contain, for example, table key (descriptive) name for the
   25712 // corresponding serialized proto.  This is purely useful for debugging
   25713 // purposes, and the presence of values here has no effect on the output.
   25714 // May also be an empty scalar if no name is available.
   25715 func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_sparse_keys []tf.Output, context_dense_keys []tf.Output, feature_list_sparse_keys []tf.Output, feature_list_dense_keys []tf.Output, context_dense_defaults []tf.Output, debug_name tf.Output, optional ...ParseSingleSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output) {
   25716 	if scope.Err() != nil {
   25717 		return
   25718 	}
   25719 	attrs := map[string]interface{}{}
   25720 	for _, a := range optional {
   25721 		a(attrs)
   25722 	}
   25723 	opspec := tf.OpSpec{
   25724 		Type: "ParseSingleSequenceExample",
   25725 		Input: []tf.Input{
   25726 			serialized, feature_list_dense_missing_assumed_empty, tf.OutputList(context_sparse_keys), tf.OutputList(context_dense_keys), tf.OutputList(feature_list_sparse_keys), tf.OutputList(feature_list_dense_keys), tf.OutputList(context_dense_defaults), debug_name,
   25727 		},
   25728 		Attrs: attrs,
   25729 	}
   25730 	op := scope.AddOperation(opspec)
   25731 	if scope.Err() != nil {
   25732 		return
   25733 	}
   25734 	var idx int
   25735 	var err error
   25736 	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
   25737 		scope.UpdateErr("ParseSingleSequenceExample", err)
   25738 		return
   25739 	}
   25740 	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
   25741 		scope.UpdateErr("ParseSingleSequenceExample", err)
   25742 		return
   25743 	}
   25744 	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
   25745 		scope.UpdateErr("ParseSingleSequenceExample", err)
   25746 		return
   25747 	}
   25748 	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
   25749 		scope.UpdateErr("ParseSingleSequenceExample", err)
   25750 		return
   25751 	}
   25752 	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
   25753 		scope.UpdateErr("ParseSingleSequenceExample", err)
   25754 		return
   25755 	}
   25756 	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
   25757 		scope.UpdateErr("ParseSingleSequenceExample", err)
   25758 		return
   25759 	}
   25760 	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
   25761 		scope.UpdateErr("ParseSingleSequenceExample", err)
   25762 		return
   25763 	}
   25764 	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
   25765 		scope.UpdateErr("ParseSingleSequenceExample", err)
   25766 		return
   25767 	}
   25768 	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values
   25769 }
   25770 
   25771 // DecodeWavAttr is an optional argument to DecodeWav.
   25772 type DecodeWavAttr func(optionalAttr)
   25773 
   25774 // DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
   25775 //
   25776 // value: Number of sample channels wanted.
   25777 // If not specified, defaults to -1
   25778 func DecodeWavDesiredChannels(value int64) DecodeWavAttr {
   25779 	return func(m optionalAttr) {
   25780 		m["desired_channels"] = value
   25781 	}
   25782 }
   25783 
   25784 // DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
   25785 //
   25786 // value: Length of audio requested.
   25787 // If not specified, defaults to -1
   25788 func DecodeWavDesiredSamples(value int64) DecodeWavAttr {
   25789 	return func(m optionalAttr) {
   25790 		m["desired_samples"] = value
   25791 	}
   25792 }
   25793 
   25794 // Decode a 16-bit PCM WAV file to a float tensor.
   25795 //
   25796 // The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
   25797 //
   25798 // When desired_channels is set, if the input contains fewer channels than this
   25799 // then the last channel will be duplicated to give the requested number, else if
   25800 // the input has more channels than requested then the additional channels will be
   25801 // ignored.
   25802 //
   25803 // If desired_samples is set, then the audio will be cropped or padded with zeroes
   25804 // to the requested length.
   25805 //
   25806 // The first output contains a Tensor with the content of the audio samples. The
   25807 // lowest dimension will be the number of channels, and the second will be the
   25808 // number of samples. For example, a ten-sample-long stereo WAV file should give an
   25809 // output shape of [10, 2].
   25810 //
   25811 // Arguments:
   25812 //	contents: The WAV-encoded audio, usually from a file.
   25813 //
   25814 // Returns 2-D with shape `[length, channels]`.Scalar holding the sample rate found in the WAV header.
   25815 func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output) {
   25816 	if scope.Err() != nil {
   25817 		return
   25818 	}
   25819 	attrs := map[string]interface{}{}
   25820 	for _, a := range optional {
   25821 		a(attrs)
   25822 	}
   25823 	opspec := tf.OpSpec{
   25824 		Type: "DecodeWav",
   25825 		Input: []tf.Input{
   25826 			contents,
   25827 		},
   25828 		Attrs: attrs,
   25829 	}
   25830 	op := scope.AddOperation(opspec)
   25831 	return op.Output(0), op.Output(1)
   25832 }
   25833 
   25834 // UniqueAttr is an optional argument to Unique.
   25835 type UniqueAttr func(optionalAttr)
   25836 
   25837 // UniqueOutIdx sets the optional out_idx attribute to value.
   25838 // If not specified, defaults to DT_INT32
   25839 func UniqueOutIdx(value tf.DataType) UniqueAttr {
   25840 	return func(m optionalAttr) {
   25841 		m["out_idx"] = value
   25842 	}
   25843 }
   25844 
   25845 // Finds unique elements in a 1-D tensor.
   25846 //
   25847 // This operation returns a tensor `y` containing all of the unique elements of `x`
   25848 // sorted in the same order that they occur in `x`. This operation also returns a
   25849 // tensor `idx` the same size as `x` that contains the index of each value of `x`
   25850 // in the unique output `y`. In other words:
   25851 //
   25852 // `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
   25853 //
   25854 // For example:
   25855 //
   25856 // ```
   25857 // # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
   25858 // y, idx = unique(x)
   25859 // y ==> [1, 2, 4, 7, 8]
   25860 // idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
   25861 // ```
   25862 //
   25863 // Arguments:
   25864 //	x: 1-D.
   25865 //
   25866 // Returns 1-D.1-D.
   25867 func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx tf.Output) {
   25868 	if scope.Err() != nil {
   25869 		return
   25870 	}
   25871 	attrs := map[string]interface{}{}
   25872 	for _, a := range optional {
   25873 		a(attrs)
   25874 	}
   25875 	opspec := tf.OpSpec{
   25876 		Type: "Unique",
   25877 		Input: []tf.Input{
   25878 			x,
   25879 		},
   25880 		Attrs: attrs,
   25881 	}
   25882 	op := scope.AddOperation(opspec)
   25883 	return op.Output(0), op.Output(1)
   25884 }
   25885 
   25886 // Concatenates a list of `N` tensors along the first dimension.
   25887 //
   25888 // The input tensors are all required to have size 1 in the first dimension.
   25889 //
   25890 // For example:
   25891 //
   25892 // ```
   25893 // # 'x' is [[1, 4]]
   25894 // # 'y' is [[2, 5]]
   25895 // # 'z' is [[3, 6]]
   25896 // parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
   25897 // ```
   25898 //
   25899 // The difference between concat and parallel_concat is that concat requires all
   25900 // of the inputs be computed before the operation will begin but doesn't require
   25901 // that the input shapes be known during graph construction.  Parallel concat
   25902 // will copy pieces of the input into the output as they become available, in
   25903 // some situations this can provide a performance benefit.
   25904 //
   25905 // Arguments:
   25906 //	values: Tensors to be concatenated. All must have size 1 in the first dimension
   25907 // and same shape.
   25908 //	shape: the final shape of the result; should be equal to the shapes of any input
   25909 // but with the number of input values in the first dimension.
   25910 //
   25911 // Returns The concatenated tensor.
   25912 func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
   25913 	if scope.Err() != nil {
   25914 		return
   25915 	}
   25916 	attrs := map[string]interface{}{"shape": shape}
   25917 	opspec := tf.OpSpec{
   25918 		Type: "ParallelConcat",
   25919 		Input: []tf.Input{
   25920 			tf.OutputList(values),
   25921 		},
   25922 		Attrs: attrs,
   25923 	}
   25924 	op := scope.AddOperation(opspec)
   25925 	return op.Output(0)
   25926 }
   25927 
   25928 // Concatenates tensors along one dimension.
   25929 //
   25930 // Arguments:
   25931 //	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
   25932 // range [0, rank(values)).
   25933 //	values: The `N` Tensors to concatenate. Their ranks and types must match,
   25934 // and their sizes must match in all dimensions except `concat_dim`.
   25935 //
   25936 // Returns A `Tensor` with the concatenation of values stacked along the
   25937 // `concat_dim` dimension.  This tensor's shape matches that of `values` except
   25938 // in `concat_dim` where it has the sum of the sizes.
   25939 func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.Output) {
   25940 	if scope.Err() != nil {
   25941 		return
   25942 	}
   25943 	opspec := tf.OpSpec{
   25944 		Type: "Concat",
   25945 		Input: []tf.Input{
   25946 			concat_dim, tf.OutputList(values),
   25947 		},
   25948 	}
   25949 	op := scope.AddOperation(opspec)
   25950 	return op.Output(0)
   25951 }
   25952 
   25953 // Compute the lower regularized incomplete Gamma function `Q(a, x)`.
   25954 //
   25955 // The lower regularized incomplete Gamma function is defined as:
   25956 //
   25957 //
   25958 // \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
   25959 //
   25960 // where
   25961 //
   25962 // \\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\)
   25963 //
   25964 // is the lower incomplete Gamma function.
   25965 //
   25966 // Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
   25967 // Gamma function.
   25968 func Igamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
   25969 	if scope.Err() != nil {
   25970 		return
   25971 	}
   25972 	opspec := tf.OpSpec{
   25973 		Type: "Igamma",
   25974 		Input: []tf.Input{
   25975 			a, x,
   25976 		},
   25977 	}
   25978 	op := scope.AddOperation(opspec)
   25979 	return op.Output(0)
   25980 }
   25981 
   25982 // Computes offsets of concat inputs within its output.
   25983 //
   25984 // For example:
   25985 //
   25986 // ```
   25987 // # 'x' is [2, 2, 7]
   25988 // # 'y' is [2, 3, 7]
   25989 // # 'z' is [2, 5, 7]
   25990 // concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
   25991 // ```
   25992 //
   25993 // This is typically used by gradient computations for a concat operation.
   25994 //
   25995 // Arguments:
   25996 //	concat_dim: The dimension along which to concatenate.
   25997 //	shape: The `N` int32 vectors representing shape of tensors being concatenated.
   25998 //
   25999 // Returns The `N` int32 vectors representing the starting offset
   26000 // of input tensors within the concatenated output.
   26001 func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output) {
   26002 	if scope.Err() != nil {
   26003 		return
   26004 	}
   26005 	opspec := tf.OpSpec{
   26006 		Type: "ConcatOffset",
   26007 		Input: []tf.Input{
   26008 			concat_dim, tf.OutputList(shape),
   26009 		},
   26010 	}
   26011 	op := scope.AddOperation(opspec)
   26012 	if scope.Err() != nil {
   26013 		return
   26014 	}
   26015 	var idx int
   26016 	var err error
   26017 	if offset, idx, err = makeOutputList(op, idx, "offset"); err != nil {
   26018 		scope.UpdateErr("ConcatOffset", err)
   26019 		return
   26020 	}
   26021 	return offset
   26022 }
   26023 
   26024 // Splits a tensor into `num_split` tensors along one dimension.
   26025 //
   26026 // Arguments:
   26027 //	axis: 0-D.  The dimension along which to split.  Must be in the range
   26028 // `[-rank(value), rank(value))`.
   26029 //	value: The tensor to split.
   26030 //	num_split: The number of ways to split.  Must evenly divide
   26031 // `value.shape[split_dim]`.
   26032 //
   26033 // Returns They are identically shaped tensors, whose shape matches that of `value`
   26034 // except along `axis`, where their sizes are
   26035 // `values.shape[split_dim] / num_split`.
   26036 func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output) {
   26037 	if scope.Err() != nil {
   26038 		return
   26039 	}
   26040 	attrs := map[string]interface{}{"num_split": num_split}
   26041 	opspec := tf.OpSpec{
   26042 		Type: "Split",
   26043 		Input: []tf.Input{
   26044 			axis, value,
   26045 		},
   26046 		Attrs: attrs,
   26047 	}
   26048 	op := scope.AddOperation(opspec)
   26049 	if scope.Err() != nil {
   26050 		return
   26051 	}
   26052 	var idx int
   26053 	var err error
   26054 	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
   26055 		scope.UpdateErr("Split", err)
   26056 		return
   26057 	}
   26058 	return output
   26059 }
   26060 
   26061 // Splits a tensor into `num_split` tensors along one dimension.
   26062 //
   26063 // Arguments:
   26064 //	value: The tensor to split.
   26065 //	size_splits: list containing the sizes of each output tensor along the split
   26066 // dimension. Must sum to the dimension of value along split_dim.
   26067 // Can contain one -1 indicating that dimension is to be inferred.
   26068 //	axis: 0-D.  The dimension along which to split.  Must be in the range
   26069 // `[-rank(value), rank(value))`.
   26070 //
   26071 //
   26072 // Returns Tensors whose shape matches that of `value`
   26073 // except along `axis`, where their sizes are
   26074 // `size_splits[i]`.
   26075 func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, num_split int64) (output []tf.Output) {
   26076 	if scope.Err() != nil {
   26077 		return
   26078 	}
   26079 	attrs := map[string]interface{}{"num_split": num_split}
   26080 	opspec := tf.OpSpec{
   26081 		Type: "SplitV",
   26082 		Input: []tf.Input{
   26083 			value, size_splits, axis,
   26084 		},
   26085 		Attrs: attrs,
   26086 	}
   26087 	op := scope.AddOperation(opspec)
   26088 	if scope.Err() != nil {
   26089 		return
   26090 	}
   26091 	var idx int
   26092 	var err error
   26093 	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
   26094 		scope.UpdateErr("SplitV", err)
   26095 		return
   26096 	}
   26097 	return output
   26098 }
   26099 
   26100 // Gives a guarantee to the TF runtime that the input tensor is a constant.
   26101 //
   26102 // The runtime is then free to make optimizations based on this.
   26103 //
   26104 // Only accepts value typed tensors as inputs and rejects resource variable handles
   26105 // as input.
   26106 //
   26107 // Returns the input tensor without modification.
   26108 func GuaranteeConst(scope *Scope, input tf.Output) (output tf.Output) {
   26109 	if scope.Err() != nil {
   26110 		return
   26111 	}
   26112 	opspec := tf.OpSpec{
   26113 		Type: "GuaranteeConst",
   26114 		Input: []tf.Input{
   26115 			input,
   26116 		},
   26117 	}
   26118 	op := scope.AddOperation(opspec)
   26119 	return op.Output(0)
   26120 }
   26121 
   26122 // Returns a tensor of zeros with the same shape and type as x.
   26123 //
   26124 // Arguments:
   26125 //	x: a tensor of type T.
   26126 //
   26127 // Returns a tensor of the same shape and type as x but filled with zeros.
   26128 func ZerosLike(scope *Scope, x tf.Output) (y tf.Output) {
   26129 	if scope.Err() != nil {
   26130 		return
   26131 	}
   26132 	opspec := tf.OpSpec{
   26133 		Type: "ZerosLike",
   26134 		Input: []tf.Input{
   26135 			x,
   26136 		},
   26137 	}
   26138 	op := scope.AddOperation(opspec)
   26139 	return op.Output(0)
   26140 }
   26141 
   26142 // Flips all bits elementwise.
   26143 //
   26144 // The result will have exactly those bits set, that are not set in `x`. The
   26145 // computation is performed on the underlying representation of x.
   26146 func Invert(scope *Scope, x tf.Output) (y tf.Output) {
   26147 	if scope.Err() != nil {
   26148 		return
   26149 	}
   26150 	opspec := tf.OpSpec{
   26151 		Type: "Invert",
   26152 		Input: []tf.Input{
   26153 			x,
   26154 		},
   26155 	}
   26156 	op := scope.AddOperation(opspec)
   26157 	return op.Output(0)
   26158 }
   26159 
   26160 // DequantizeAttr is an optional argument to Dequantize.
   26161 type DequantizeAttr func(optionalAttr)
   26162 
   26163 // DequantizeMode sets the optional mode attribute to value.
   26164 // If not specified, defaults to "MIN_COMBINED"
   26165 func DequantizeMode(value string) DequantizeAttr {
   26166 	return func(m optionalAttr) {
   26167 		m["mode"] = value
   26168 	}
   26169 }
   26170 
   26171 // Dequantize the 'input' tensor into a float Tensor.
   26172 //
   26173 // [min_range, max_range] are scalar floats that specify the range for
   26174 // the 'input' data. The 'mode' attribute controls exactly which calculations are
   26175 // used to convert the float values to their quantized equivalents.
   26176 //
   26177 // In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
   26178 //
   26179 // ```
   26180 // if T == qint8, in[i] += (range(T) + 1)/ 2.0
   26181 // out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
   26182 // ```
   26183 // here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
   26184 //
   26185 // *MIN_COMBINED Mode Example*
   26186 //
   26187 // If the input comes from a QuantizedRelu6, the output type is
   26188 // quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
   26189 // 0-6.  The min_range and max_range values are therefore 0.0 and 6.0.
   26190 // Dequantize on quint8 will take each value, cast to float, and multiply
   26191 // by 6 / 255.
   26192 // Note that if quantizedtype is qint8, the operation will additionally add
   26193 // each value by 128 prior to casting.
   26194 //
   26195 // If the mode is 'MIN_FIRST', then this approach is used:
   26196 //
   26197 // ```c++
   26198 // num_discrete_values = 1 << (# of bits in T)
   26199 // range_adjust = num_discrete_values / (num_discrete_values - 1)
   26200 // range = (range_max - range_min) * range_adjust
   26201 // range_scale = range / num_discrete_values
   26202 // const double offset_input = static_cast<double>(input) - lowest_quantized;
   26203 // result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
   26204 // ```
   26205 //
   26206 // *SCALED mode Example*
   26207 //
   26208 // `SCALED` mode matches the quantization approach used in
   26209 // `QuantizeAndDequantize{V2|V3}`.
   26210 //
   26211 // If the mode is `SCALED`, we do not use the full range of the output type,
   26212 // choosing to elide the lowest possible value for symmetry (e.g., output range is
   26213 // -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
   26214 // 0.
   26215 //
   26216 // We first find the range of values in our tensor. The
   26217 // range we use is always centered on 0, so we find m such that
   26218 // ```c++
   26219 //   m = max(abs(input_min), abs(input_max))
   26220 // ```
   26221 //
   26222 // Our input tensor range is then `[-m, m]`.
   26223 //
   26224 // Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
   26225 // If T is signed, this is
   26226 // ```
   26227 //   num_bits = sizeof(T) * 8
   26228 //   [min_fixed, max_fixed] =
   26229 //       [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
   26230 // ```
   26231 //
   26232 // Otherwise, if T is unsigned, the fixed-point range is
   26233 // ```
   26234 //   [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
   26235 // ```
   26236 //
   26237 // From this we compute our scaling factor, s:
   26238 // ```c++
   26239 //   s = (2 * m) / (max_fixed - min_fixed)
   26240 // ```
   26241 //
   26242 // Now we can dequantize the elements of our tensor:
   26243 // ```c++
   26244 // result = input * s
   26245 // ```
   26246 //
   26247 // Arguments:
   26248 //
   26249 //	min_range: The minimum scalar value possibly produced for the input.
   26250 //	max_range: The maximum scalar value possibly produced for the input.
   26251 func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, optional ...DequantizeAttr) (output tf.Output) {
   26252 	if scope.Err() != nil {
   26253 		return
   26254 	}
   26255 	attrs := map[string]interface{}{}
   26256 	for _, a := range optional {
   26257 		a(attrs)
   26258 	}
   26259 	opspec := tf.OpSpec{
   26260 		Type: "Dequantize",
   26261 		Input: []tf.Input{
   26262 			input, min_range, max_range,
   26263 		},
   26264 		Attrs: attrs,
   26265 	}
   26266 	op := scope.AddOperation(opspec)
   26267 	return op.Output(0)
   26268 }
   26269 
   26270 // Returns the element-wise max of two SparseTensors.
   26271 //
   26272 // Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
   26273 //
   26274 // Arguments:
   26275 //	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
   26276 // SparseTensor, in the canonical lexicographic ordering.
   26277 //	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
   26278 //	a_shape: 1-D.  Shape of the input SparseTensor.
   26279 //	b_indices: counterpart to `a_indices` for the other operand.
   26280 //	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
   26281 //	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
   26282 //
   26283 // Returns 2-D.  The indices of the output SparseTensor.1-D.  The values of the output SparseTensor.
   26284 func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
   26285 	if scope.Err() != nil {
   26286 		return
   26287 	}
   26288 	opspec := tf.OpSpec{
   26289 		Type: "SparseSparseMaximum",
   26290 		Input: []tf.Input{
   26291 			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
   26292 		},
   26293 	}
   26294 	op := scope.AddOperation(opspec)
   26295 	return op.Output(0), op.Output(1)
   26296 }
   26297 
   26298 // Returns a batched matrix tensor with new batched diagonal values.
   26299 //
   26300 // Given `input` and `diagonal`, this operation returns a tensor with the
   26301 // same shape and values as `input`, except for the main diagonal of the
   26302 // innermost matrices.  These will be overwritten by the values in `diagonal`.
   26303 //
   26304 // The output is computed as follows:
   26305 //
   26306 // Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
   26307 // `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
   26308 // tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
   26309 //
   26310 //   * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
   26311 //   * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
   26312 //
   26313 // Arguments:
   26314 //	input: Rank `k+1`, where `k >= 1`.
   26315 //	diagonal: Rank `k`, where `k >= 1`.
   26316 //
   26317 // Returns Rank `k+1`, with `output.shape = input.shape`.
   26318 func MatrixSetDiag(scope *Scope, input tf.Output, diagonal tf.Output) (output tf.Output) {
   26319 	if scope.Err() != nil {
   26320 		return
   26321 	}
   26322 	opspec := tf.OpSpec{
   26323 		Type: "MatrixSetDiag",
   26324 		Input: []tf.Input{
   26325 			input, diagonal,
   26326 		},
   26327 	}
   26328 	op := scope.AddOperation(opspec)
   26329 	return op.Output(0)
   26330 }
   26331 
   26332 // EditDistanceAttr is an optional argument to EditDistance.
   26333 type EditDistanceAttr func(optionalAttr)
   26334 
   26335 // EditDistanceNormalize sets the optional normalize attribute to value.
   26336 //
   26337 // value: boolean (if true, edit distances are normalized by length of truth).
   26338 //
   26339 // The output is:
   26340 // If not specified, defaults to true
   26341 func EditDistanceNormalize(value bool) EditDistanceAttr {
   26342 	return func(m optionalAttr) {
   26343 		m["normalize"] = value
   26344 	}
   26345 }
   26346 
   26347 // Computes the (possibly normalized) Levenshtein Edit Distance.
   26348 //
   26349 // The inputs are variable-length sequences provided by SparseTensors
   26350 //   (hypothesis_indices, hypothesis_values, hypothesis_shape)
   26351 // and
   26352 //   (truth_indices, truth_values, truth_shape).
   26353 //
   26354 // The inputs are:
   26355 //
   26356 // Arguments:
   26357 //	hypothesis_indices: The indices of the hypothesis list SparseTensor.
   26358 // This is an N x R int64 matrix.
   26359 //	hypothesis_values: The values of the hypothesis list SparseTensor.
   26360 // This is an N-length vector.
   26361 //	hypothesis_shape: The shape of the hypothesis list SparseTensor.
   26362 // This is an R-length vector.
   26363 //	truth_indices: The indices of the truth list SparseTensor.
   26364 // This is an M x R int64 matrix.
   26365 //	truth_values: The values of the truth list SparseTensor.
   26366 // This is an M-length vector.
   26367 //	truth_shape: truth indices, vector.
   26368 //
   26369 // Returns A dense float tensor with rank R - 1.
   26370 //
   26371 // For the example input:
   26372 //
   26373 //     // hypothesis represents a 2x1 matrix with variable-length values:
   26374 //     //   (0,0) = ["a"]
   26375 //     //   (1,0) = ["b"]
   26376 //     hypothesis_indices = [[0, 0, 0],
   26377 //                           [1, 0, 0]]
   26378 //     hypothesis_values = ["a", "b"]
   26379 //     hypothesis_shape = [2, 1, 1]
   26380 //
   26381 //     // truth represents a 2x2 matrix with variable-length values:
   26382 //     //   (0,0) = []
   26383 //     //   (0,1) = ["a"]
   26384 //     //   (1,0) = ["b", "c"]
   26385 //     //   (1,1) = ["a"]
   26386 //     truth_indices = [[0, 1, 0],
   26387 //                      [1, 0, 0],
   26388 //                      [1, 0, 1],
   26389 //                      [1, 1, 0]]
   26390 //     truth_values = ["a", "b", "c", "a"]
   26391 //     truth_shape = [2, 2, 2]
   26392 //     normalize = true
   26393 //
   26394 // The output will be:
   26395 //
   26396 //     // output is a 2x2 matrix with edit distances normalized by truth lengths.
   26397 //     output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
   26398 //               [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
   26399 func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) {
   26400 	if scope.Err() != nil {
   26401 		return
   26402 	}
   26403 	attrs := map[string]interface{}{}
   26404 	for _, a := range optional {
   26405 		a(attrs)
   26406 	}
   26407 	opspec := tf.OpSpec{
   26408 		Type: "EditDistance",
   26409 		Input: []tf.Input{
   26410 			hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape,
   26411 		},
   26412 		Attrs: attrs,
   26413 	}
   26414 	op := scope.AddOperation(opspec)
   26415 	return op.Output(0)
   26416 }
   26417 
   26418 // Gather slices from `params` into a Tensor with shape specified by `indices`.
   26419 //
   26420 // `indices` is an K-dimensional integer tensor, best thought of as a
   26421 // (K-1)-dimensional tensor of indices into `params`, where each element defines a
   26422 // slice of `params`:
   26423 //
   26424 //     output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
   26425 //
   26426 // Whereas in @{tf.gather} `indices` defines slices into the first
   26427 // dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
   26428 // first `N` dimensions of `params`, where `N = indices.shape[-1]`.
   26429 //
   26430 // The last dimension of `indices` can be at most the rank of
   26431 // `params`:
   26432 //
   26433 //     indices.shape[-1] <= params.rank
   26434 //
   26435 // The last dimension of `indices` corresponds to elements
   26436 // (if `indices.shape[-1] == params.rank`) or slices
   26437 // (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
   26438 // of `params`.  The output tensor has shape
   26439 //
   26440 //     indices.shape[:-1] + params.shape[indices.shape[-1]:]
   26441 //
   26442 // Some examples below.
   26443 //
   26444 // Simple indexing into a matrix:
   26445 //
   26446 // ```python
   26447 //     indices = [[0, 0], [1, 1]]
   26448 //     params = [['a', 'b'], ['c', 'd']]
   26449 //     output = ['a', 'd']
   26450 // ```
   26451 //
   26452 // Slice indexing into a matrix:
   26453 //
   26454 // ```python
   26455 //     indices = [[1], [0]]
   26456 //     params = [['a', 'b'], ['c', 'd']]
   26457 //     output = [['c', 'd'], ['a', 'b']]
   26458 // ```
   26459 //
   26460 // Indexing into a 3-tensor:
   26461 //
   26462 // ```python
   26463 //     indices = [[1]]
   26464 //     params = [[['a0', 'b0'], ['c0', 'd0']],
   26465 //               [['a1', 'b1'], ['c1', 'd1']]]
   26466 //     output = [[['a1', 'b1'], ['c1', 'd1']]]
   26467 //
   26468 //
   26469 //     indices = [[0, 1], [1, 0]]
   26470 //     params = [[['a0', 'b0'], ['c0', 'd0']],
   26471 //               [['a1', 'b1'], ['c1', 'd1']]]
   26472 //     output = [['c0', 'd0'], ['a1', 'b1']]
   26473 //
   26474 //
   26475 //     indices = [[0, 0, 1], [1, 0, 1]]
   26476 //     params = [[['a0', 'b0'], ['c0', 'd0']],
   26477 //               [['a1', 'b1'], ['c1', 'd1']]]
   26478 //     output = ['b0', 'b1']
   26479 // ```
   26480 //
   26481 // Batched indexing into a matrix:
   26482 //
   26483 // ```python
   26484 //     indices = [[[0, 0]], [[0, 1]]]
   26485 //     params = [['a', 'b'], ['c', 'd']]
   26486 //     output = [['a'], ['b']]
   26487 // ```
   26488 //
   26489 // Batched slice indexing into a matrix:
   26490 //
   26491 // ```python
   26492 //     indices = [[[1]], [[0]]]
   26493 //     params = [['a', 'b'], ['c', 'd']]
   26494 //     output = [[['c', 'd']], [['a', 'b']]]
   26495 // ```
   26496 //
   26497 // Batched indexing into a 3-tensor:
   26498 //
   26499 // ```python
   26500 //     indices = [[[1]], [[0]]]
   26501 //     params = [[['a0', 'b0'], ['c0', 'd0']],
   26502 //               [['a1', 'b1'], ['c1', 'd1']]]
   26503 //     output = [[[['a1', 'b1'], ['c1', 'd1']]],
   26504 //               [[['a0', 'b0'], ['c0', 'd0']]]]
   26505 //
   26506 //     indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
   26507 //     params = [[['a0', 'b0'], ['c0', 'd0']],
   26508 //               [['a1', 'b1'], ['c1', 'd1']]]
   26509 //     output = [[['c0', 'd0'], ['a1', 'b1']],
   26510 //               [['a0', 'b0'], ['c1', 'd1']]]
   26511 //
   26512 //
   26513 //     indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
   26514 //     params = [[['a0', 'b0'], ['c0', 'd0']],
   26515 //               [['a1', 'b1'], ['c1', 'd1']]]
   26516 //     output = [['b0', 'b1'], ['d0', 'c1']]
   26517 // ```
   26518 //
   26519 // Arguments:
   26520 //	params: The tensor from which to gather values.
   26521 //	indices: Index tensor.
   26522 //
   26523 // Returns Values from `params` gathered from indices given by `indices`, with
   26524 // shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
   26525 func GatherNd(scope *Scope, params tf.Output, indices tf.Output) (output tf.Output) {
   26526 	if scope.Err() != nil {
   26527 		return
   26528 	}
   26529 	opspec := tf.OpSpec{
   26530 		Type: "GatherNd",
   26531 		Input: []tf.Input{
   26532 			params, indices,
   26533 		},
   26534 	}
   26535 	op := scope.AddOperation(opspec)
   26536 	return op.Output(0)
   26537 }
   26538 
   26539 // Eagerly executes a python function to compute func(input)->output. The
   26540 //
   26541 // semantics of the input, output, and attributes are the same as those for
   26542 // PyFunc.
   26543 func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType) (output []tf.Output) {
   26544 	if scope.Err() != nil {
   26545 		return
   26546 	}
   26547 	attrs := map[string]interface{}{"token": token, "Tout": Tout}
   26548 	opspec := tf.OpSpec{
   26549 		Type: "EagerPyFunc",
   26550 		Input: []tf.Input{
   26551 			tf.OutputList(input),
   26552 		},
   26553 		Attrs: attrs,
   26554 	}
   26555 	op := scope.AddOperation(opspec)
   26556 	if scope.Err() != nil {
   26557 		return
   26558 	}
   26559 	var idx int
   26560 	var err error
   26561 	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
   26562 		scope.UpdateErr("EagerPyFunc", err)
   26563 		return
   26564 	}
   26565 	return output
   26566 }
   26567 
   26568 // Stops gradient computation.
   26569 //
   26570 // When executed in a graph, this op outputs its input tensor as-is.
   26571 //
   26572 // When building ops to compute gradients, this op prevents the contribution of
   26573 // its inputs to be taken into account.  Normally, the gradient generator adds ops
   26574 // to a graph to compute the derivatives of a specified 'loss' by recursively
   26575 // finding out inputs that contributed to its computation.  If you insert this op
   26576 // in the graph it inputs are masked from the gradient generator.  They are not
   26577 // taken into account for computing gradients.
   26578 //
   26579 // This is useful any time you want to compute a value with TensorFlow but need
   26580 // to pretend that the value was a constant. Some examples include:
   26581 //
   26582 // *  The *EM* algorithm where the *M-step* should not involve backpropagation
   26583 //    through the output of the *E-step*.
   26584 // *  Contrastive divergence training of Boltzmann machines where, when
   26585 //    differentiating the energy function, the training must not backpropagate
   26586 //    through the graph that generated the samples from the model.
   26587 // *  Adversarial training, where no backprop should happen through the adversarial
   26588 //    example generation process.
   26589 func StopGradient(scope *Scope, input tf.Output) (output tf.Output) {
   26590 	if scope.Err() != nil {
   26591 		return
   26592 	}
   26593 	opspec := tf.OpSpec{
   26594 		Type: "StopGradient",
   26595 		Input: []tf.Input{
   26596 			input,
   26597 		},
   26598 	}
   26599 	op := scope.AddOperation(opspec)
   26600 	return op.Output(0)
   26601 }
   26602 
   26603 // Computes asin of x element-wise.
   26604 func Asin(scope *Scope, x tf.Output) (y tf.Output) {
   26605 	if scope.Err() != nil {
   26606 		return
   26607 	}
   26608 	opspec := tf.OpSpec{
   26609 		Type: "Asin",
   26610 		Input: []tf.Input{
   26611 			x,
   26612 		},
   26613 	}
   26614 	op := scope.AddOperation(opspec)
   26615 	return op.Output(0)
   26616 }
   26617 
   26618 // PreventGradientAttr is an optional argument to PreventGradient.
   26619 type PreventGradientAttr func(optionalAttr)
   26620 
   26621 // PreventGradientMessage sets the optional message attribute to value.
   26622 //
   26623 // value: Will be printed in the error when anyone tries to differentiate
   26624 // this operation.
   26625 // If not specified, defaults to ""
   26626 func PreventGradientMessage(value string) PreventGradientAttr {
   26627 	return func(m optionalAttr) {
   26628 		m["message"] = value
   26629 	}
   26630 }
   26631 
   26632 // An identity op that triggers an error if a gradient is requested.
   26633 //
   26634 // When executed in a graph, this op outputs its input tensor as-is.
   26635 //
   26636 // When building ops to compute gradients, the TensorFlow gradient system
   26637 // will return an error when trying to lookup the gradient of this op,
   26638 // because no gradient must ever be registered for this function.  This
   26639 // op exists to prevent subtle bugs from silently returning unimplemented
   26640 // gradients in some corner cases.
   26641 //
   26642 // Arguments:
   26643 //	input: any tensor.
   26644 //
   26645 // Returns the same input tensor.
   26646 func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output) {
   26647 	if scope.Err() != nil {
   26648 		return
   26649 	}
   26650 	attrs := map[string]interface{}{}
   26651 	for _, a := range optional {
   26652 		a(attrs)
   26653 	}
   26654 	opspec := tf.OpSpec{
   26655 		Type: "PreventGradient",
   26656 		Input: []tf.Input{
   26657 			input,
   26658 		},
   26659 		Attrs: attrs,
   26660 	}
   26661 	op := scope.AddOperation(opspec)
   26662 	return op.Output(0)
   26663 }
   26664 
   26665 // Checks a tensor for NaN and Inf values.
   26666 //
   26667 // When run, reports an `InvalidArgument` error if `tensor` has any values
   26668 // that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
   26669 //
   26670 // Arguments:
   26671 //
   26672 //	message: Prefix of the error message.
   26673 func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
   26674 	if scope.Err() != nil {
   26675 		return
   26676 	}
   26677 	attrs := map[string]interface{}{"message": message}
   26678 	opspec := tf.OpSpec{
   26679 		Type: "CheckNumerics",
   26680 		Input: []tf.Input{
   26681 			tensor,
   26682 		},
   26683 		Attrs: attrs,
   26684 	}
   26685 	op := scope.AddOperation(opspec)
   26686 	return op.Output(0)
   26687 }
   26688 
   26689 // Shuffle dimensions of x according to a permutation and conjugate the result.
   26690 //
   26691 // The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
   26692 //   `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
   26693 //   `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
   26694 func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
   26695 	if scope.Err() != nil {
   26696 		return
   26697 	}
   26698 	opspec := tf.OpSpec{
   26699 		Type: "ConjugateTranspose",
   26700 		Input: []tf.Input{
   26701 			x, perm,
   26702 		},
   26703 	}
   26704 	op := scope.AddOperation(opspec)
   26705 	return op.Output(0)
   26706 }
   26707 
   26708 // UniqueV2Attr is an optional argument to UniqueV2.
   26709 type UniqueV2Attr func(optionalAttr)
   26710 
   26711 // UniqueV2OutIdx sets the optional out_idx attribute to value.
   26712 // If not specified, defaults to DT_INT32
   26713 func UniqueV2OutIdx(value tf.DataType) UniqueV2Attr {
   26714 	return func(m optionalAttr) {
   26715 		m["out_idx"] = value
   26716 	}
   26717 }
   26718 
   26719 // Finds unique elements in a 1-D tensor.
   26720 //
   26721 // This operation returns a tensor `y` containing all of the unique elements of `x`
   26722 // sorted in the same order that they occur in `x`. This operation also returns a
   26723 // tensor `idx` the same size as `x` that contains the index of each value of `x`
   26724 // in the unique output `y`. In other words:
   26725 //
   26726 // `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
   26727 //
   26728 // For example:
   26729 //
   26730 // ```
   26731 // # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
   26732 // y, idx = unique(x)
   26733 // y ==> [1, 2, 4, 7, 8]
   26734 // idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
   26735 // ```
   26736 //
   26737 // Arguments:
   26738 //	x: A `Tensor`.
   26739 //	axis: A `Tensor` of type `int64` (default: 0). The axis of the Tensor to
   26740 // find the unique elements.
   26741 //
   26742 // Returns A `Tensor`. Unique elements along the `axis` of `Tensor` x.A 1-D Tensor. Has the same type as x that contains the index of each
   26743 // value of x in the output y.
   26744 func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output) {
   26745 	if scope.Err() != nil {
   26746 		return
   26747 	}
   26748 	attrs := map[string]interface{}{}
   26749 	for _, a := range optional {
   26750 		a(attrs)
   26751 	}
   26752 	opspec := tf.OpSpec{
   26753 		Type: "UniqueV2",
   26754 		Input: []tf.Input{
   26755 			x, axis,
   26756 		},
   26757 		Attrs: attrs,
   26758 	}
   26759 	op := scope.AddOperation(opspec)
   26760 	return op.Output(0), op.Output(1)
   26761 }
   26762 
   26763 // Return a slice from 'input'.
   26764 //
   26765 // The output tensor is a tensor with dimensions described by 'size'
   26766 // whose values are extracted from 'input' starting at the offsets in
   26767 // 'begin'.
   26768 //
   26769 // *Requirements*:
   26770 //   0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
   26771 //
   26772 // Arguments:
   26773 //
   26774 //	begin: begin[i] specifies the offset into the 'i'th dimension of
   26775 // 'input' to slice from.
   26776 //	size: size[i] specifies the number of elements of the 'i'th dimension
   26777 // of 'input' to slice. If size[i] is -1, all remaining elements in dimension
   26778 // i are included in the slice (i.e. this is equivalent to setting
   26779 // size[i] = input.dim_size(i) - begin[i]).
   26780 func Slice(scope *Scope, input tf.Output, begin tf.Output, size tf.Output) (output tf.Output) {
   26781 	if scope.Err() != nil {
   26782 		return
   26783 	}
   26784 	opspec := tf.OpSpec{
   26785 		Type: "Slice",
   26786 		Input: []tf.Input{
   26787 			input, begin, size,
   26788 		},
   26789 	}
   26790 	op := scope.AddOperation(opspec)
   26791 	return op.Output(0)
   26792 }
   26793 
   26794 // StridedSliceGradAttr is an optional argument to StridedSliceGrad.
   26795 type StridedSliceGradAttr func(optionalAttr)
   26796 
   26797 // StridedSliceGradBeginMask sets the optional begin_mask attribute to value.
   26798 // If not specified, defaults to 0
   26799 func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr {
   26800 	return func(m optionalAttr) {
   26801 		m["begin_mask"] = value
   26802 	}
   26803 }
   26804 
   26805 // StridedSliceGradEndMask sets the optional end_mask attribute to value.
   26806 // If not specified, defaults to 0
   26807 func StridedSliceGradEndMask(value int64) StridedSliceGradAttr {
   26808 	return func(m optionalAttr) {
   26809 		m["end_mask"] = value
   26810 	}
   26811 }
   26812 
   26813 // StridedSliceGradEllipsisMask sets the optional ellipsis_mask attribute to value.
   26814 // If not specified, defaults to 0
   26815 func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr {
   26816 	return func(m optionalAttr) {
   26817 		m["ellipsis_mask"] = value
   26818 	}
   26819 }
   26820 
   26821 // StridedSliceGradNewAxisMask sets the optional new_axis_mask attribute to value.
   26822 // If not specified, defaults to 0
   26823 func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr {
   26824 	return func(m optionalAttr) {
   26825 		m["new_axis_mask"] = value
   26826 	}
   26827 }
   26828 
   26829 // StridedSliceGradShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
   26830 // If not specified, defaults to 0
   26831 func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr {
   26832 	return func(m optionalAttr) {
   26833 		m["shrink_axis_mask"] = value
   26834 	}
   26835 }
   26836 
   26837 // Returns the gradient of `StridedSlice`.
   26838 //
   26839 // Since `StridedSlice` cuts out pieces of its `input` which is size
   26840 // `shape`, its gradient will have the same shape (which is passed here
   26841 // as `shape`). The gradient will be zero in any element that the slice
   26842 // does not select.
   26843 //
   26844 // Arguments are the same as StridedSliceGrad with the exception that
   26845 // `dy` is the input gradient to be propagated and `shape` is the
   26846 // shape of `StridedSlice`'s `input`.
   26847 func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, strides tf.Output, dy tf.Output, optional ...StridedSliceGradAttr) (output tf.Output) {
   26848 	if scope.Err() != nil {
   26849 		return
   26850 	}
   26851 	attrs := map[string]interface{}{}
   26852 	for _, a := range optional {
   26853 		a(attrs)
   26854 	}
   26855 	opspec := tf.OpSpec{
   26856 		Type: "StridedSliceGrad",
   26857 		Input: []tf.Input{
   26858 			shape, begin, end, strides, dy,
   26859 		},
   26860 		Attrs: attrs,
   26861 	}
   26862 	op := scope.AddOperation(opspec)
   26863 	return op.Output(0)
   26864 }
   26865 
   26866 // Returns the gradient of `Tile`.
   26867 //
   26868 // DEPRECATED at GraphDef version 3: TileGrad has been replaced with reduce_sum
   26869 //
   26870 // Since `Tile` takes an input and repeats the input `multiples` times
   26871 // along each dimension, `TileGrad` takes in `multiples` and aggregates
   26872 // each repeated tile of `input` into `output`.
   26873 func TileGrad(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
   26874 	if scope.Err() != nil {
   26875 		return
   26876 	}
   26877 	opspec := tf.OpSpec{
   26878 		Type: "TileGrad",
   26879 		Input: []tf.Input{
   26880 			input, multiples,
   26881 		},
   26882 	}
   26883 	op := scope.AddOperation(opspec)
   26884 	return op.Output(0)
   26885 }
   26886 
   26887 // DataFormatDimMapAttr is an optional argument to DataFormatDimMap.
   26888 type DataFormatDimMapAttr func(optionalAttr)
   26889 
   26890 // DataFormatDimMapSrcFormat sets the optional src_format attribute to value.
   26891 //
   26892 // value: source data format.
   26893 // If not specified, defaults to "NHWC"
   26894 func DataFormatDimMapSrcFormat(value string) DataFormatDimMapAttr {
   26895 	return func(m optionalAttr) {
   26896 		m["src_format"] = value
   26897 	}
   26898 }
   26899 
   26900 // DataFormatDimMapDstFormat sets the optional dst_format attribute to value.
   26901 //
   26902 // value: destination data format.
   26903 // If not specified, defaults to "NCHW"
   26904 func DataFormatDimMapDstFormat(value string) DataFormatDimMapAttr {
   26905 	return func(m optionalAttr) {
   26906 		m["dst_format"] = value
   26907 	}
   26908 }
   26909 
   26910 // Returns the dimension index in the destination data format given the one in
   26911 //
   26912 // the source data format.
   26913 //
   26914 // Arguments:
   26915 //	x: A Tensor with each element as a dimension index in source data format.
   26916 // Must be in the range [-4, 4).
   26917 //
   26918 // Returns A Tensor with each element as a dimension index in destination data format.
   26919 func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAttr) (y tf.Output) {
   26920 	if scope.Err() != nil {
   26921 		return
   26922 	}
   26923 	attrs := map[string]interface{}{}
   26924 	for _, a := range optional {
   26925 		a(attrs)
   26926 	}
   26927 	opspec := tf.OpSpec{
   26928 		Type: "DataFormatDimMap",
   26929 		Input: []tf.Input{
   26930 			x,
   26931 		},
   26932 		Attrs: attrs,
   26933 	}
   26934 	op := scope.AddOperation(opspec)
   26935 	return op.Output(0)
   26936 }
   26937 
   26938 // Return the shape of s0 op s1 with broadcast.
   26939 //
   26940 // Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
   26941 // broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
   26942 func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output) {
   26943 	if scope.Err() != nil {
   26944 		return
   26945 	}
   26946 	opspec := tf.OpSpec{
   26947 		Type: "BroadcastArgs",
   26948 		Input: []tf.Input{
   26949 			s0, s1,
   26950 		},
   26951 	}
   26952 	op := scope.AddOperation(opspec)
   26953 	return op.Output(0)
   26954 }
   26955 
   26956 // Return the reduction indices for computing gradients of s0 op s1 with broadcast.
   26957 //
   26958 // This is typically used by gradient computations for a broadcasting operation.
   26959 func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output) {
   26960 	if scope.Err() != nil {
   26961 		return
   26962 	}
   26963 	opspec := tf.OpSpec{
   26964 		Type: "BroadcastGradientArgs",
   26965 		Input: []tf.Input{
   26966 			s0, s1,
   26967 		},
   26968 	}
   26969 	op := scope.AddOperation(opspec)
   26970 	return op.Output(0), op.Output(1)
   26971 }
   26972 
   26973 // Pads a tensor with mirrored values.
   26974 //
   26975 // This operation pads a `input` with mirrored values according to the `paddings`
   26976 // you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
   26977 // the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
   26978 // how many values to add before the contents of `input` in that dimension, and
   26979 // `paddings[D, 1]` indicates how many values to add after the contents of `input`
   26980 // in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
   26981 // than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
   26982 // (if false, respectively).
   26983 //
   26984 // The padded size of each dimension D of the output is:
   26985 //
   26986 // `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
   26987 //
   26988 // For example:
   26989 //
   26990 // ```
   26991 // # 't' is [[1, 2, 3], [4, 5, 6]].
   26992 // # 'paddings' is [[1, 1]], [2, 2]].
   26993 // # 'mode' is SYMMETRIC.
   26994 // # rank of 't' is 2.
   26995 // pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
   26996 //                       [2, 1, 1, 2, 3, 3, 2]
   26997 //                       [5, 4, 4, 5, 6, 6, 5]
   26998 //                       [5, 4, 4, 5, 6, 6, 5]]
   26999 // ```
   27000 //
   27001 // Arguments:
   27002 //	input: The input tensor to be padded.
   27003 //	paddings: A two-column matrix specifying the padding sizes. The number of
   27004 // rows must be the same as the rank of `input`.
   27005 //	mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
   27006 // do not include the borders, while in symmetric mode the padded regions
   27007 // do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
   27008 // is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
   27009 // it is `[1, 2, 3, 3, 2]` in symmetric mode.
   27010 //
   27011 // Returns The padded tensor.
   27012 func MirrorPad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
   27013 	if scope.Err() != nil {
   27014 		return
   27015 	}
   27016 	attrs := map[string]interface{}{"mode": mode}
   27017 	opspec := tf.OpSpec{
   27018 		Type: "MirrorPad",
   27019 		Input: []tf.Input{
   27020 			input, paddings,
   27021 		},
   27022 		Attrs: attrs,
   27023 	}
   27024 	op := scope.AddOperation(opspec)
   27025 	return op.Output(0)
   27026 }
   27027 
   27028 // A placeholder op for a value that will be fed into the computation.
   27029 //
   27030 // DEPRECATED at GraphDef version 23: Placeholder now behaves the same as PlaceholderV2.
   27031 //
   27032 // N.B. This operation will fail with an error if it is executed. It is
   27033 // intended as a way to represent a value that will always be fed, and to
   27034 // provide attrs that enable the fed value to be checked at runtime.
   27035 //
   27036 // Arguments:
   27037 //	dtype: The type of elements in the tensor.
   27038 //	shape: The shape of the tensor. The shape can be any partially-specified
   27039 // shape.  To be unconstrained, pass in a shape with unknown rank.
   27040 //
   27041 // Returns A placeholder tensor that must be replaced using the feed mechanism.
   27042 func PlaceholderV2(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
   27043 	if scope.Err() != nil {
   27044 		return
   27045 	}
   27046 	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
   27047 	opspec := tf.OpSpec{
   27048 		Type: "PlaceholderV2",
   27049 
   27050 		Attrs: attrs,
   27051 	}
   27052 	op := scope.AddOperation(opspec)
   27053 	return op.Output(0)
   27054 }
   27055 
   27056 // ResourceApplyAdadeltaAttr is an optional argument to ResourceApplyAdadelta.
   27057 type ResourceApplyAdadeltaAttr func(optionalAttr)
   27058 
   27059 // ResourceApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
   27060 //
   27061 // value: If True, updating of the var, accum and update_accum tensors will be protected by
   27062 // a lock; otherwise the behavior is undefined, but may exhibit less contention.
   27063 // If not specified, defaults to false
   27064 func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr {
   27065 	return func(m optionalAttr) {
   27066 		m["use_locking"] = value
   27067 	}
   27068 }
   27069 
   27070 // Update '*var' according to the adadelta scheme.
   27071 //
   27072 // accum = rho() * accum + (1 - rho()) * grad.square();
   27073 // update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
   27074 // update_accum = rho() * update_accum + (1 - rho()) * update.square();
   27075 // var -= update;
   27076 //
   27077 // Arguments:
   27078 //	var_: Should be from a Variable().
   27079 //	accum: Should be from a Variable().
   27080 //	accum_update: Should be from a Variable().
   27081 //	lr: Scaling factor. Must be a scalar.
   27082 //	rho: Decay factor. Must be a scalar.
   27083 //	epsilon: Constant factor. Must be a scalar.
   27084 //	grad: The gradient.
   27085 //
   27086 // Returns the created operation.
   27087 func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation) {
   27088 	if scope.Err() != nil {
   27089 		return
   27090 	}
   27091 	attrs := map[string]interface{}{}
   27092 	for _, a := range optional {
   27093 		a(attrs)
   27094 	}
   27095 	opspec := tf.OpSpec{
   27096 		Type: "ResourceApplyAdadelta",
   27097 		Input: []tf.Input{
   27098 			var_, accum, accum_update, lr, rho, epsilon, grad,
   27099 		},
   27100 		Attrs: attrs,
   27101 	}
   27102 	return scope.AddOperation(opspec)
   27103 }
   27104 
   27105 // SqueezeAttr is an optional argument to Squeeze.
   27106 type SqueezeAttr func(optionalAttr)
   27107 
   27108 // SqueezeAxis sets the optional axis attribute to value.
   27109 //
   27110 // value: If specified, only squeezes the dimensions listed. The dimension
   27111 // index starts at 0. It is an error to squeeze a dimension that is not 1. Must
   27112 // be in the range `[-rank(input), rank(input))`.
   27113 // If not specified, defaults to <>
   27114 //
   27115 // REQUIRES: len(value) >= 0
   27116 func SqueezeAxis(value []int64) SqueezeAttr {
   27117 	return func(m optionalAttr) {
   27118 		m["squeeze_dims"] = value
   27119 	}
   27120 }
   27121 
   27122 // Removes dimensions of size 1 from the shape of a tensor.
   27123 //
   27124 // Given a tensor `input`, this operation returns a tensor of the same type with
   27125 // all dimensions of size 1 removed. If you don't want to remove all size 1
   27126 // dimensions, you can remove specific size 1 dimensions by specifying
   27127 // `axis`.
   27128 //
   27129 // For example:
   27130 //
   27131 // ```
   27132 // # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
   27133 // shape(squeeze(t)) ==> [2, 3]
   27134 // ```
   27135 //
   27136 // Or, to remove specific size 1 dimensions:
   27137 //
   27138 // ```
   27139 // # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
   27140 // shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
   27141 // ```
   27142 //
   27143 // Arguments:
   27144 //	input: The `input` to squeeze.
   27145 //
   27146 // Returns Contains the same data as `input`, but has one or more dimensions of
   27147 // size 1 removed.
   27148 func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.Output) {
   27149 	if scope.Err() != nil {
   27150 		return
   27151 	}
   27152 	attrs := map[string]interface{}{}
   27153 	for _, a := range optional {
   27154 		a(attrs)
   27155 	}
   27156 	opspec := tf.OpSpec{
   27157 		Type: "Squeeze",
   27158 		Input: []tf.Input{
   27159 			input,
   27160 		},
   27161 		Attrs: attrs,
   27162 	}
   27163 	op := scope.AddOperation(opspec)
   27164 	return op.Output(0)
   27165 }
   27166 
   27167 // SpaceToBatch for N-D tensors of type T.
   27168 //
   27169 // This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
   27170 // grid of blocks of shape `block_shape`, and interleaves these blocks with the
   27171 // "batch" dimension (0) such that in the output, the spatial dimensions
   27172 // `[1, ..., M]` correspond to the position within the grid, and the batch
   27173 // dimension combines both the position within a spatial block and the original
   27174 // batch position.  Prior to division into blocks, the spatial dimensions of the
   27175 // input are optionally zero padded according to `paddings`.  See below for a
   27176 // precise description.
   27177 //
   27178 // Arguments:
   27179 //	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
   27180 // where spatial_shape has `M` dimensions.
   27181 //	block_shape: 1-D with shape `[M]`, all values must be >= 1.
   27182 //	paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
   27183 //   `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
   27184 //   `i + 1`, which corresponds to spatial dimension `i`.  It is required that
   27185 //   `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
   27186 //
   27187 // This operation is equivalent to the following steps:
   27188 //
   27189 // 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
   27190 //    input according to `paddings` to produce `padded` of shape `padded_shape`.
   27191 //
   27192 // 2. Reshape `padded` to `reshaped_padded` of shape:
   27193 //
   27194 //      [batch] +
   27195 //      [padded_shape[1] / block_shape[0],
   27196 //        block_shape[0],
   27197 //       ...,
   27198 //       padded_shape[M] / block_shape[M-1],
   27199 //       block_shape[M-1]] +
   27200 //      remaining_shape
   27201 //
   27202 // 3. Permute dimensions of `reshaped_padded` to produce
   27203 //    `permuted_reshaped_padded` of shape:
   27204 //
   27205 //      block_shape +
   27206 //      [batch] +
   27207 //      [padded_shape[1] / block_shape[0],
   27208 //       ...,
   27209 //       padded_shape[M] / block_shape[M-1]] +
   27210 //      remaining_shape
   27211 //
   27212 // 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
   27213 //    dimension, producing an output tensor of shape:
   27214 //
   27215 //      [batch * prod(block_shape)] +
   27216 //      [padded_shape[1] / block_shape[0],
   27217 //       ...,
   27218 //       padded_shape[M] / block_shape[M-1]] +
   27219 //      remaining_shape
   27220 //
   27221 // Some examples:
   27222 //
   27223 // (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
   27224 //     `paddings = [[0, 0], [0, 0]]`:
   27225 //
   27226 // ```
   27227 // x = [[[[1], [2]], [[3], [4]]]]
   27228 // ```
   27229 //
   27230 // The output tensor has shape `[4, 1, 1, 1]` and value:
   27231 //
   27232 // ```
   27233 // [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
   27234 // ```
   27235 //
   27236 // (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
   27237 //     `paddings = [[0, 0], [0, 0]]`:
   27238 //
   27239 // ```
   27240 // x = [[[[1, 2, 3], [4, 5, 6]],
   27241 //       [[7, 8, 9], [10, 11, 12]]]]
   27242 // ```
   27243 //
   27244 // The output tensor has shape `[4, 1, 1, 3]` and value:
   27245 //
   27246 // ```
   27247 // [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
   27248 // ```
   27249 //
   27250 // (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
   27251 //     `paddings = [[0, 0], [0, 0]]`:
   27252 //
   27253 // ```
   27254 // x = [[[[1],   [2],  [3],  [4]],
   27255 //       [[5],   [6],  [7],  [8]],
   27256 //       [[9],  [10], [11],  [12]],
   27257 //       [[13], [14], [15],  [16]]]]
   27258 // ```
   27259 //
   27260 // The output tensor has shape `[4, 2, 2, 1]` and value:
   27261 //
   27262 // ```
   27263 // x = [[[[1], [3]], [[9], [11]]],
   27264 //      [[[2], [4]], [[10], [12]]],
   27265 //      [[[5], [7]], [[13], [15]]],
   27266 //      [[[6], [8]], [[14], [16]]]]
   27267 // ```
   27268 //
   27269 // (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
   27270 //     paddings = `[[0, 0], [2, 0]]`:
   27271 //
   27272 // ```
   27273 // x = [[[[1],   [2],  [3],  [4]],
   27274 //       [[5],   [6],  [7],  [8]]],
   27275 //      [[[9],  [10], [11],  [12]],
   27276 //       [[13], [14], [15],  [16]]]]
   27277 // ```
   27278 //
   27279 // The output tensor has shape `[8, 1, 3, 1]` and value:
   27280 //
   27281 // ```
   27282 // x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
   27283 //      [[[0], [2], [4]]], [[[0], [10], [12]]],
   27284 //      [[[0], [5], [7]]], [[[0], [13], [15]]],
   27285 //      [[[0], [6], [8]]], [[[0], [14], [16]]]]
   27286 // ```
   27287 //
   27288 // Among others, this operation is useful for reducing atrous convolution into
   27289 // regular convolution.
   27290 func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output) {
   27291 	if scope.Err() != nil {
   27292 		return
   27293 	}
   27294 	opspec := tf.OpSpec{
   27295 		Type: "SpaceToBatchND",
   27296 		Input: []tf.Input{
   27297 			input, block_shape, paddings,
   27298 		},
   27299 	}
   27300 	op := scope.AddOperation(opspec)
   27301 	return op.Output(0)
   27302 }
   27303 
   27304 // QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
   27305 type QuantizeAndDequantizeV2Attr func(optionalAttr)
   27306 
   27307 // QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
   27308 //
   27309 // value: If the quantization is signed or unsigned.
   27310 // If not specified, defaults to true
   27311 func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
   27312 	return func(m optionalAttr) {
   27313 		m["signed_input"] = value
   27314 	}
   27315 }
   27316 
   27317 // QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
   27318 //
   27319 // value: The bitwidth of the quantization.
   27320 // If not specified, defaults to 8
   27321 func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
   27322 	return func(m optionalAttr) {
   27323 		m["num_bits"] = value
   27324 	}
   27325 }
   27326 
   27327 // QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
   27328 //
   27329 // value: If the range is given or should be computed from the tensor.
   27330 // If not specified, defaults to false
   27331 func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
   27332 	return func(m optionalAttr) {
   27333 		m["range_given"] = value
   27334 	}
   27335 }
   27336 
   27337 // Quantizes then dequantizes a tensor.
   27338 //
   27339 // This op simulates the precision loss from the quantized forward pass by:
   27340 // 1. Quantizing the tensor to fixed point numbers, which should match the target
   27341 //    quantization method when it is used in inference.
   27342 // 2. Dequantizing it back to floating point numbers for the following ops, most
   27343 //    likely matmul.
   27344 //
   27345 // There are different ways to quantize. This version does not use the full range
   27346 // of the output type, choosing to elide the lowest possible value for symmetry
   27347 // (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
   27348 // quantization), so that 0.0 maps to 0.
   27349 //
   27350 // To perform this op, we first find the range of values in our tensor. The range
   27351 // we use is always centered on 0, so we find m such that
   27352 //
   27353 // 1. m = max(abs(input_min), abs(input_max)) if range_given is true,
   27354 // 2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
   27355 //
   27356 // Our input tensor range is then [-m, m].
   27357 //
   27358 // Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
   27359 // If signed_input is true, this is
   27360 //
   27361 //   [min_fixed, max_fixed ] =
   27362 //       [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
   27363 //
   27364 // Otherwise, if signed_input is false, the fixed-point range is
   27365 //
   27366 //   [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
   27367 //
   27368 // From this we compute our scaling factor, s:
   27369 //
   27370 //   s = (max_fixed - min_fixed) / (2 * m).
   27371 //
   27372 // Now we can quantize and dequantize the elements of our tensor.  An element e
   27373 // is transformed into e':
   27374 //
   27375 //   e' = (e * s).round_to_nearest() / s.
   27376 //
   27377 // Note that we have a different number of buckets in the signed vs. unsigned
   27378 // cases.  For example, if num_bits == 8, we get 254 buckets in the signed case
   27379 // vs. 255 in the unsigned case.
   27380 //
   27381 // For example, suppose num_bits = 8 and m = 1.  Then
   27382 //
   27383 //   [min_fixed, max_fixed] = [-127, 127], and
   27384 //   s = (127 + 127) / 2 = 127.
   27385 //
   27386 // Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
   27387 // {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
   27388 //
   27389 // Arguments:
   27390 //	input: Tensor to quantize and then dequantize.
   27391 //	input_min: If range_given, this is the min of the range, otherwise this input
   27392 // will be ignored.
   27393 //	input_max: If range_given, this is the max of the range, otherwise this input
   27394 // will be ignored.
   27395 func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output) {
   27396 	if scope.Err() != nil {
   27397 		return
   27398 	}
   27399 	attrs := map[string]interface{}{}
   27400 	for _, a := range optional {
   27401 		a(attrs)
   27402 	}
   27403 	opspec := tf.OpSpec{
   27404 		Type: "QuantizeAndDequantizeV2",
   27405 		Input: []tf.Input{
   27406 			input, input_min, input_max,
   27407 		},
   27408 		Attrs: attrs,
   27409 	}
   27410 	op := scope.AddOperation(opspec)
   27411 	return op.Output(0)
   27412 }
   27413 
   27414 // SpaceToBatch for 4-D tensors of type T.
   27415 //
   27416 // This is a legacy version of the more general SpaceToBatchND.
   27417 //
   27418 // Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
   27419 // More specifically, this op outputs a copy of the input tensor where values from
   27420 // the `height` and `width` dimensions are moved to the `batch` dimension. After
   27421 // the zero-padding, both `height` and `width` of the input must be divisible by the
   27422 // block size.
   27423 //
   27424 // Arguments:
   27425 //	input: 4-D with shape `[batch, height, width, depth]`.
   27426 //	paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
   27427 //   the padding of the input with zeros across the spatial dimensions as follows:
   27428 //
   27429 //       paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
   27430 //
   27431 //   The effective spatial dimensions of the zero-padded input tensor will be:
   27432 //
   27433 //       height_pad = pad_top + height + pad_bottom
   27434 //       width_pad = pad_left + width + pad_right
   27435 //
   27436 // The attr `block_size` must be greater than one. It indicates the block size.
   27437 //
   27438 //   * Non-overlapping blocks of size `block_size x block size` in the height and
   27439 //     width dimensions are rearranged into the batch dimension at each location.
   27440 //   * The batch of the output tensor is `batch * block_size * block_size`.
   27441 //   * Both height_pad and width_pad must be divisible by block_size.
   27442 //
   27443 // The shape of the output will be:
   27444 //
   27445 //     [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
   27446 //      depth]
   27447 //
   27448 // Some examples:
   27449 //
   27450 // (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
   27451 //
   27452 // ```
   27453 // x = [[[[1], [2]], [[3], [4]]]]
   27454 // ```
   27455 //
   27456 // The output tensor has shape `[4, 1, 1, 1]` and value:
   27457 //
   27458 // ```
   27459 // [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
   27460 // ```
   27461 //
   27462 // (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
   27463 //
   27464 // ```
   27465 // x = [[[[1, 2, 3], [4, 5, 6]],
   27466 //       [[7, 8, 9], [10, 11, 12]]]]
   27467 // ```
   27468 //
   27469 // The output tensor has shape `[4, 1, 1, 3]` and value:
   27470 //
   27471 // ```
   27472 // [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
   27473 // ```
   27474 //
   27475 // (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
   27476 //
   27477 // ```
   27478 // x = [[[[1],   [2],  [3],  [4]],
   27479 //       [[5],   [6],  [7],  [8]],
   27480 //       [[9],  [10], [11],  [12]],
   27481 //       [[13], [14], [15],  [16]]]]
   27482 // ```
   27483 //
   27484 // The output tensor has shape `[4, 2, 2, 1]` and value:
   27485 //
   27486 // ```
   27487 // x = [[[[1], [3]], [[9], [11]]],
   27488 //      [[[2], [4]], [[10], [12]]],
   27489 //      [[[5], [7]], [[13], [15]]],
   27490 //      [[[6], [8]], [[14], [16]]]]
   27491 // ```
   27492 //
   27493 // (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
   27494 //
   27495 // ```
   27496 // x = [[[[1],   [2],  [3],  [4]],
   27497 //       [[5],   [6],  [7],  [8]]],
   27498 //      [[[9],  [10], [11],  [12]],
   27499 //       [[13], [14], [15],  [16]]]]
   27500 // ```
   27501 //
   27502 // The output tensor has shape `[8, 1, 2, 1]` and value:
   27503 //
   27504 // ```
   27505 // x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
   27506 //      [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
   27507 // ```
   27508 //
   27509 // Among others, this operation is useful for reducing atrous convolution into
   27510 // regular convolution.
   27511 //
   27512 func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output) {
   27513 	if scope.Err() != nil {
   27514 		return
   27515 	}
   27516 	attrs := map[string]interface{}{"block_size": block_size}
   27517 	opspec := tf.OpSpec{
   27518 		Type: "SpaceToBatch",
   27519 		Input: []tf.Input{
   27520 			input, paddings,
   27521 		},
   27522 		Attrs: attrs,
   27523 	}
   27524 	op := scope.AddOperation(opspec)
   27525 	return op.Output(0)
   27526 }
   27527 
   27528 // UnpackAttr is an optional argument to Unpack.
   27529 type UnpackAttr func(optionalAttr)
   27530 
   27531 // UnpackAxis sets the optional axis attribute to value.
   27532 //
   27533 // value: Dimension along which to unpack.  Negative values wrap around, so the
   27534 // valid range is `[-R, R)`.
   27535 // If not specified, defaults to 0
   27536 func UnpackAxis(value int64) UnpackAttr {
   27537 	return func(m optionalAttr) {
   27538 		m["axis"] = value
   27539 	}
   27540 }
   27541 
   27542 // Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
   27543 //
   27544 // Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
   27545 // For example, given a tensor of shape `(A, B, C, D)`;
   27546 //
   27547 // If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
   27548 //   and each tensor in `output` will have shape `(B, C, D)`. (Note that the
   27549 //   dimension unpacked along is gone, unlike `split`).
   27550 //
   27551 // If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
   27552 //   and each tensor in `output` will have shape `(A, C, D)`.
   27553 // Etc.
   27554 //
   27555 // This is the opposite of `pack`.
   27556 //
   27557 // Arguments:
   27558 //	value: 1-D or higher, with `axis` dimension size equal to `num`.
   27559 //
   27560 //
   27561 // Returns The list of tensors unpacked from `value`.
   27562 func Unpack(scope *Scope, value tf.Output, num int64, optional ...UnpackAttr) (output []tf.Output) {
   27563 	if scope.Err() != nil {
   27564 		return
   27565 	}
   27566 	attrs := map[string]interface{}{"num": num}
   27567 	for _, a := range optional {
   27568 		a(attrs)
   27569 	}
   27570 	opspec := tf.OpSpec{
   27571 		Type: "Unpack",
   27572 		Input: []tf.Input{
   27573 			value,
   27574 		},
   27575 		Attrs: attrs,
   27576 	}
   27577 	op := scope.AddOperation(opspec)
   27578 	if scope.Err() != nil {
   27579 		return
   27580 	}
   27581 	var idx int
   27582 	var err error
   27583 	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
   27584 		scope.UpdateErr("Unpack", err)
   27585 		return
   27586 	}
   27587 	return output
   27588 }
   27589 
   27590 // Increments variable pointed to by 'resource' until it reaches 'limit'.
   27591 //
   27592 // Arguments:
   27593 //	resource: Should be from a scalar `Variable` node.
   27594 //	limit: If incrementing ref would bring it above limit, instead generates an
   27595 // 'OutOfRange' error.
   27596 //
   27597 //
   27598 // Returns A copy of the input before increment. If nothing else modifies the
   27599 // input, the values produced will all be distinct.
   27600 func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output) {
   27601 	if scope.Err() != nil {
   27602 		return
   27603 	}
   27604 	attrs := map[string]interface{}{"limit": limit, "T": T}
   27605 	opspec := tf.OpSpec{
   27606 		Type: "ResourceCountUpTo",
   27607 		Input: []tf.Input{
   27608 			resource,
   27609 		},
   27610 		Attrs: attrs,
   27611 	}
   27612 	op := scope.AddOperation(opspec)
   27613 	return op.Output(0)
   27614 }
   27615 
   27616 // Delete the stack from its resource container.
   27617 //
   27618 // Arguments:
   27619 //	handle: The handle to a stack.
   27620 //
   27621 // Returns the created operation.
   27622 func StackCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
   27623 	if scope.Err() != nil {
   27624 		return
   27625 	}
   27626 	opspec := tf.OpSpec{
   27627 		Type: "StackCloseV2",
   27628 		Input: []tf.Input{
   27629 			handle,
   27630 		},
   27631 	}
   27632 	return scope.AddOperation(opspec)
   27633 }
   27634 
   27635 // BatchToSpace for N-D tensors of type T.
   27636 //
   27637 // This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
   27638 // `block_shape + [batch]`, interleaves these blocks back into the grid defined by
   27639 // the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
   27640 // the input.  The spatial dimensions of this intermediate result are then
   27641 // optionally cropped according to `crops` to produce the output.  This is the
   27642 // reverse of SpaceToBatch.  See below for a precise description.
   27643 //
   27644 // Arguments:
   27645 //	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
   27646 // where spatial_shape has M dimensions.
   27647 //	block_shape: 1-D with shape `[M]`, all values must be >= 1.
   27648 //	crops: 2-D with shape `[M, 2]`, all values must be >= 0.
   27649 //   `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
   27650 //   dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
   27651 //   required that
   27652 //   `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
   27653 //
   27654 // This operation is equivalent to the following steps:
   27655 //
   27656 // 1. Reshape `input` to `reshaped` of shape:
   27657 //      [block_shape[0], ..., block_shape[M-1],
   27658 //       batch / prod(block_shape),
   27659 //       input_shape[1], ..., input_shape[N-1]]
   27660 //
   27661 // 2. Permute dimensions of `reshaped` to produce `permuted` of shape
   27662 //      [batch / prod(block_shape),
   27663 //
   27664 //       input_shape[1], block_shape[0],
   27665 //       ...,
   27666 //       input_shape[M], block_shape[M-1],
   27667 //
   27668 //       input_shape[M+1], ..., input_shape[N-1]]
   27669 //
   27670 // 3. Reshape `permuted` to produce `reshaped_permuted` of shape
   27671 //      [batch / prod(block_shape),
   27672 //
   27673 //       input_shape[1] * block_shape[0],
   27674 //       ...,
   27675 //       input_shape[M] * block_shape[M-1],
   27676 //
   27677 //       input_shape[M+1],
   27678 //       ...,
   27679 //       input_shape[N-1]]
   27680 //
   27681 // 4. Crop the start and end of dimensions `[1, ..., M]` of
   27682 //    `reshaped_permuted` according to `crops` to produce the output of shape:
   27683 //      [batch / prod(block_shape),
   27684 //
   27685 //       input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
   27686 //       ...,
   27687 //       input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
   27688 //
   27689 //       input_shape[M+1], ..., input_shape[N-1]]
   27690 //
   27691 // Some examples:
   27692 //
   27693 // (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
   27694 //     `crops = [[0, 0], [0, 0]]`:
   27695 //
   27696 // ```
   27697 // [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
   27698 // ```
   27699 //
   27700 // The output tensor has shape `[1, 2, 2, 1]` and value:
   27701 //
   27702 // ```
   27703 // x = [[[[1], [2]], [[3], [4]]]]
   27704 // ```
   27705 //
   27706 // (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
   27707 //     `crops = [[0, 0], [0, 0]]`:
   27708 //
   27709 // ```
   27710 // [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
   27711 // ```
   27712 //
   27713 // The output tensor has shape `[1, 2, 2, 3]` and value:
   27714 //
   27715 // ```
   27716 // x = [[[[1, 2, 3], [4, 5, 6]],
   27717 //       [[7, 8, 9], [10, 11, 12]]]]
   27718 // ```
   27719 //
   27720 // (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
   27721 //     `crops = [[0, 0], [0, 0]]`:
   27722 //
   27723 // ```
   27724 // x = [[[[1], [3]], [[9], [11]]],
   27725 //      [[[2], [4]], [[10], [12]]],
   27726 //      [[[5], [7]], [[13], [15]]],
   27727 //      [[[6], [8]], [[14], [16]]]]
   27728 // ```
   27729 //
   27730 // The output tensor has shape `[1, 4, 4, 1]` and value:
   27731 //
   27732 // ```
   27733 // x = [[[1],   [2],  [3],  [4]],
   27734 //      [[5],   [6],  [7],  [8]],
   27735 //      [[9],  [10], [11],  [12]],
   27736 //      [[13], [14], [15],  [16]]]
   27737 // ```
   27738 //
   27739 // (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
   27740 //     `crops = [[0, 0], [2, 0]]`:
   27741 //
   27742 // ```
   27743 // x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
   27744 //      [[[0], [2], [4]]], [[[0], [10], [12]]],
   27745 //      [[[0], [5], [7]]], [[[0], [13], [15]]],
   27746 //      [[[0], [6], [8]]], [[[0], [14], [16]]]]
   27747 // ```
   27748 //
   27749 // The output tensor has shape `[2, 2, 4, 1]` and value:
   27750 //
   27751 // ```
   27752 // x = [[[[1],   [2],  [3],  [4]],
   27753 //       [[5],   [6],  [7],  [8]]],
   27754 //      [[[9],  [10], [11],  [12]],
   27755 //       [[13], [14], [15],  [16]]]]
   27756 // ```
   27757 func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output) {
   27758 	if scope.Err() != nil {
   27759 		return
   27760 	}
   27761 	opspec := tf.OpSpec{
   27762 		Type: "BatchToSpaceND",
   27763 		Input: []tf.Input{
   27764 			input, block_shape, crops,
   27765 		},
   27766 	}
   27767 	op := scope.AddOperation(opspec)
   27768 	return op.Output(0)
   27769 }
   27770 
   27771 // Extract `patches` from `images` and put them in the "depth" output dimension.
   27772 //
   27773 // Arguments:
   27774 //	images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
   27775 //	ksizes: The size of the sliding window for each dimension of `images`.
   27776 //	strides: 1-D of length 4. How far the centers of two consecutive patches are in
   27777 // the images. Must be: `[1, stride_rows, stride_cols, 1]`.
   27778 //	rates: 1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the
   27779 // input stride, specifying how far two consecutive patch samples are in the
   27780 // input. Equivalent to extracting patches with
   27781 // `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
   27782 // subsampling them spatially by a factor of `rates`. This is equivalent to
   27783 // `rate` in dilated (a.k.a. Atrous) convolutions.
   27784 //	padding: The type of padding algorithm to use.
   27785 //
   27786 // We specify the size-related attributes as:
   27787 //
   27788 // ```python
   27789 //       ksizes = [1, ksize_rows, ksize_cols, 1]
   27790 //       strides = [1, strides_rows, strides_cols, 1]
   27791 //       rates = [1, rates_rows, rates_cols, 1]
   27792 // ```
   27793 //
   27794 // Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
   27795 // ksize_cols * depth]` containing image patches with size
   27796 // `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
   27797 // `out_rows` and `out_cols` are the dimensions of the output patches.
   27798 func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
   27799 	if scope.Err() != nil {
   27800 		return
   27801 	}
   27802 	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
   27803 	opspec := tf.OpSpec{
   27804 		Type: "ExtractImagePatches",
   27805 		Input: []tf.Input{
   27806 			images,
   27807 		},
   27808 		Attrs: attrs,
   27809 	}
   27810 	op := scope.AddOperation(opspec)
   27811 	return op.Output(0)
   27812 }
   27813 
   27814 // Bitcasts a tensor from one type to another without copying data.
   27815 //
   27816 // Given a tensor `input`, this operation returns a tensor that has the same buffer
   27817 // data as `input` with datatype `type`.
   27818 //
   27819 // If the input datatype `T` is larger than the output datatype `type` then the
   27820 // shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
   27821 //
   27822 // If `T` is smaller than `type`, the operator requires that the rightmost
   27823 // dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
   27824 // [..., sizeof(`type`)/sizeof(`T`)] to [...].
   27825 //
   27826 // *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
   27827 // endian orderings will give different results.
   27828 func Bitcast(scope *Scope, input tf.Output, type_ tf.DataType) (output tf.Output) {
   27829 	if scope.Err() != nil {
   27830 		return
   27831 	}
   27832 	attrs := map[string]interface{}{"type": type_}
   27833 	opspec := tf.OpSpec{
   27834 		Type: "Bitcast",
   27835 		Input: []tf.Input{
   27836 			input,
   27837 		},
   27838 		Attrs: attrs,
   27839 	}
   27840 	op := scope.AddOperation(opspec)
   27841 	return op.Output(0)
   27842 }
   27843 
   27844 // OneHotAttr is an optional argument to OneHot.
   27845 type OneHotAttr func(optionalAttr)
   27846 
   27847 // OneHotAxis sets the optional axis attribute to value.
   27848 //
   27849 // value: The axis to fill (default: -1, a new inner-most axis).
   27850 // If not specified, defaults to -1
   27851 func OneHotAxis(value int64) OneHotAttr {
   27852 	return func(m optionalAttr) {
   27853 		m["axis"] = value
   27854 	}
   27855 }
   27856 
   27857 // Returns a one-hot tensor.
   27858 //
   27859 // The locations represented by indices in `indices` take value `on_value`,
   27860 // while all other locations take value `off_value`.
   27861 //
   27862 // If the input `indices` is rank `N`, the output will have rank `N+1`,
   27863 // The new axis is created at dimension `axis` (default: the new axis is
   27864 // appended at the end).
   27865 //
   27866 // If `indices` is a scalar the output shape will be a vector of length `depth`.
   27867 //
   27868 // If `indices` is a vector of length `features`, the output shape will be:
   27869 // ```
   27870 //   features x depth if axis == -1
   27871 //   depth x features if axis == 0
   27872 // ```
   27873 //
   27874 // If `indices` is a matrix (batch) with shape `[batch, features]`,
   27875 // the output shape will be:
   27876 // ```
   27877 //   batch x features x depth if axis == -1
   27878 //   batch x depth x features if axis == 1
   27879 //   depth x batch x features if axis == 0
   27880 // ```
   27881 //
   27882 //
   27883 // Examples
   27884 // =========
   27885 //
   27886 // Suppose that
   27887 //
   27888 // ```
   27889 //   indices = [0, 2, -1, 1]
   27890 //   depth = 3
   27891 //   on_value = 5.0
   27892 //   off_value = 0.0
   27893 //   axis = -1
   27894 // ```
   27895 //
   27896 // Then output is `[4 x 3]`:
   27897 //
   27898 //     ```output =
   27899 //       [5.0 0.0 0.0]  // one_hot(0)
   27900 //       [0.0 0.0 5.0]  // one_hot(2)
   27901 //       [0.0 0.0 0.0]  // one_hot(-1)
   27902 //       [0.0 5.0 0.0]  // one_hot(1)
   27903 //     ```
   27904 //
   27905 // Suppose that
   27906 //
   27907 // ```
   27908 //   indices = [0, 2, -1, 1]
   27909 //   depth = 3
   27910 //   on_value = 0.0
   27911 //   off_value = 3.0
   27912 //   axis = 0
   27913 // ```
   27914 //
   27915 // Then output is `[3 x 4]`:
   27916 //
   27917 //     ```output =
   27918 //       [0.0 3.0 3.0 3.0]
   27919 //       [3.0 3.0 3.0 0.0]
   27920 //       [3.0 3.0 3.0 3.0]
   27921 //       [3.0 0.0 3.0 3.0]
   27922 //     //  ^                one_hot(0)
   27923 //     //      ^            one_hot(2)
   27924 //     //          ^        one_hot(-1)
   27925 //     //              ^    one_hot(1)
   27926 //     ```
   27927 // Suppose that
   27928 //
   27929 // ```
   27930 //   indices = [[0, 2], [1, -1]]
   27931 //   depth = 3
   27932 //   on_value = 1.0
   27933 //   off_value = 0.0
   27934 //   axis = -1
   27935 // ```
   27936 //
   27937 // Then output is `[2 x 2 x 3]`:
   27938 //
   27939 //     ```output =
   27940 //       [
   27941 //         [1.0, 0.0, 0.0]  // one_hot(0)
   27942 //         [0.0, 0.0, 1.0]  // one_hot(2)
   27943 //       ][
   27944 //         [0.0, 1.0, 0.0]  // one_hot(1)
   27945 //         [0.0, 0.0, 0.0]  // one_hot(-1)
   27946 //       ]```
   27947 //
   27948 // Arguments:
   27949 //	indices: A tensor of indices.
   27950 //	depth: A scalar defining the depth of the one hot dimension.
   27951 //	on_value: A scalar defining the value to fill in output when `indices[j] = i`.
   27952 //	off_value: A scalar defining the value to fill in output when `indices[j] != i`.
   27953 //
   27954 // Returns The one-hot tensor.
   27955 func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional ...OneHotAttr) (output tf.Output) {
   27956 	if scope.Err() != nil {
   27957 		return
   27958 	}
   27959 	attrs := map[string]interface{}{}
   27960 	for _, a := range optional {
   27961 		a(attrs)
   27962 	}
   27963 	opspec := tf.OpSpec{
   27964 		Type: "OneHot",
   27965 		Input: []tf.Input{
   27966 			indices, depth, on_value, off_value,
   27967 		},
   27968 		Attrs: attrs,
   27969 	}
   27970 	op := scope.AddOperation(opspec)
   27971 	return op.Output(0)
   27972 }
   27973 
   27974 // QueueDequeueV2Attr is an optional argument to QueueDequeueV2.
   27975 type QueueDequeueV2Attr func(optionalAttr)
   27976 
   27977 // QueueDequeueV2TimeoutMs sets the optional timeout_ms attribute to value.
   27978 //
   27979 // value: If the queue is empty, this operation will block for up to
   27980 // timeout_ms milliseconds.
   27981 // Note: This option is not supported yet.
   27982 // If not specified, defaults to -1
   27983 func QueueDequeueV2TimeoutMs(value int64) QueueDequeueV2Attr {
   27984 	return func(m optionalAttr) {
   27985 		m["timeout_ms"] = value
   27986 	}
   27987 }
   27988 
   27989 // Dequeues a tuple of one or more tensors from the given queue.
   27990 //
   27991 // This operation has k outputs, where k is the number of components
   27992 // in the tuples stored in the given queue, and output i is the ith
   27993 // component of the dequeued tuple.
   27994 //
   27995 // N.B. If the queue is empty, this operation will block until an element
   27996 // has been dequeued (or 'timeout_ms' elapses, if specified).
   27997 //
   27998 // Arguments:
   27999 //	handle: The handle to a queue.
   28000 //	component_types: The type of each component in a tuple.
   28001 //
   28002 // Returns One or more tensors that were dequeued as a tuple.
   28003 func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, optional ...QueueDequeueV2Attr) (components []tf.Output) {
   28004 	if scope.Err() != nil {
   28005 		return
   28006 	}
   28007 	attrs := map[string]interface{}{"component_types": component_types}
   28008 	for _, a := range optional {
   28009 		a(attrs)
   28010 	}
   28011 	opspec := tf.OpSpec{
   28012 		Type: "QueueDequeueV2",
   28013 		Input: []tf.Input{
   28014 			handle,
   28015 		},
   28016 		Attrs: attrs,
   28017 	}
   28018 	op := scope.AddOperation(opspec)
   28019 	if scope.Err() != nil {
   28020 		return
   28021 	}
   28022 	var idx int
   28023 	var err error
   28024 	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
   28025 		scope.UpdateErr("QueueDequeueV2", err)
   28026 		return
   28027 	}
   28028 	return components
   28029 }
   28030 
   28031 // Returns locations of nonzero / true values in a tensor.
   28032 //
   28033 // This operation returns the coordinates of true elements in `condition`. The
   28034 // coordinates are returned in a 2-D tensor where the first dimension (rows)
   28035 // represents the number of true elements, and the second dimension (columns)
   28036 // represents the coordinates of the true elements. Keep in mind, the shape of
   28037 // the output tensor can vary depending on how many true values there are in
   28038 // `condition`. Indices are output in row-major order.
   28039 //
   28040 // For example:
   28041 //
   28042 // ```
   28043 // # 'input' tensor is [[True, False]
   28044 // #                    [True, False]]
   28045 // # 'input' has two true values, so output has two coordinates.
   28046 // # 'input' has rank of 2, so coordinates have two indices.
   28047 // where(input) ==> [[0, 0],
   28048 //                   [1, 0]]
   28049 //
   28050 // # `condition` tensor is [[[True, False]
   28051 // #                     [True, False]]
   28052 // #                    [[False, True]
   28053 // #                     [False, True]]
   28054 // #                    [[False, False]
   28055 // #                     [False, True]]]
   28056 // # 'input' has 5 true values, so output has 5 coordinates.
   28057 // # 'input' has rank of 3, so coordinates have three indices.
   28058 // where(input) ==> [[0, 0, 0],
   28059 //                   [0, 1, 0],
   28060 //                   [1, 0, 1],
   28061 //                   [1, 1, 1],
   28062 //                   [2, 1, 1]]
   28063 //
   28064 // # `condition` tensor is [[[1.5,  0.0]
   28065 // #                     [-0.5, 0.0]]
   28066 // #                    [[0.0,  0.25]
   28067 // #                     [0.0,  0.75]]
   28068 // #                    [[0.0,  0.0]
   28069 // #                     [0.0,  0.01]]]
   28070 // # 'input' has 5 nonzero values, so output has 5 coordinates.
   28071 // # 'input' has rank of 3, so coordinates have three indices.
   28072 // where(input) ==> [[0, 0, 0],
   28073 //                   [0, 1, 0],
   28074 //                   [1, 0, 1],
   28075 //                   [1, 1, 1],
   28076 //                   [2, 1, 1]]
   28077 //
   28078 // # `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
   28079 // #                     [0.0 + 0.5j, 0.0  + 0.0j]]
   28080 // #                    [[0.0 + 0.0j, 0.25 + 1.5j]
   28081 // #                     [0.0 + 0.0j, 0.75 + 0.0j]]
   28082 // #                    [[0.0 + 0.0j, 0.0  + 0.0j]
   28083 // #                     [0.0 + 0.0j, 0.01 + 0.0j]]]
   28084 // # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
   28085 // # 'input' has rank of 3, so coordinates have three indices.
   28086 // where(input) ==> [[0, 0, 0],
   28087 //                   [0, 1, 0],
   28088 //                   [1, 0, 1],
   28089 //                   [1, 1, 1],
   28090 //                   [2, 1, 1]]
   28091 // ```
   28092 func Where(scope *Scope, condition tf.Output) (index tf.Output) {
   28093 	if scope.Err() != nil {
   28094 		return
   28095 	}
   28096 	opspec := tf.OpSpec{
   28097 		Type: "Where",
   28098 		Input: []tf.Input{
   28099 			condition,
   28100 		},
   28101 	}
   28102 	op := scope.AddOperation(opspec)
   28103 	return op.Output(0)
   28104 }
   28105 
   28106 // QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
   28107 type QuantizeAndDequantizeAttr func(optionalAttr)
   28108 
   28109 // QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value.
   28110 // If not specified, defaults to true
   28111 func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr {
   28112 	return func(m optionalAttr) {
   28113 		m["signed_input"] = value
   28114 	}
   28115 }
   28116 
   28117 // QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value.
   28118 // If not specified, defaults to 8
   28119 func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr {
   28120 	return func(m optionalAttr) {
   28121 		m["num_bits"] = value
   28122 	}
   28123 }
   28124 
   28125 // QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value.
   28126 // If not specified, defaults to false
   28127 func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr {
   28128 	return func(m optionalAttr) {
   28129 		m["range_given"] = value
   28130 	}
   28131 }
   28132 
   28133 // QuantizeAndDequantizeInputMin sets the optional input_min attribute to value.
   28134 // If not specified, defaults to 0
   28135 func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr {
   28136 	return func(m optionalAttr) {
   28137 		m["input_min"] = value
   28138 	}
   28139 }
   28140 
   28141 // QuantizeAndDequantizeInputMax sets the optional input_max attribute to value.
   28142 // If not specified, defaults to 0
   28143 func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr {
   28144 	return func(m optionalAttr) {
   28145 		m["input_max"] = value
   28146 	}
   28147 }
   28148 
   28149 // Use QuantizeAndDequantizeV2 instead.
   28150 //
   28151 // DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
   28152 func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output) {
   28153 	if scope.Err() != nil {
   28154 		return
   28155 	}
   28156 	attrs := map[string]interface{}{}
   28157 	for _, a := range optional {
   28158 		a(attrs)
   28159 	}
   28160 	opspec := tf.OpSpec{
   28161 		Type: "QuantizeAndDequantize",
   28162 		Input: []tf.Input{
   28163 			input,
   28164 		},
   28165 		Attrs: attrs,
   28166 	}
   28167 	op := scope.AddOperation(opspec)
   28168 	return op.Output(0)
   28169 }
   28170 
   28171 // Returns the diagonal part of the tensor.
   28172 //
   28173 // This operation returns a tensor with the `diagonal` part
   28174 // of the `input`. The `diagonal` part is computed as follows:
   28175 //
   28176 // Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
   28177 // tensor of rank `k` with dimensions `[D1,..., Dk]` where:
   28178 //
   28179 // `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
   28180 //
   28181 // For example:
   28182 //
   28183 // ```
   28184 // # 'input' is [[1, 0, 0, 0]
   28185 //               [0, 2, 0, 0]
   28186 //               [0, 0, 3, 0]
   28187 //               [0, 0, 0, 4]]
   28188 //
   28189 // tf.diag_part(input) ==> [1, 2, 3, 4]
   28190 // ```
   28191 //
   28192 // Arguments:
   28193 //	input: Rank k tensor where k is even and not zero.
   28194 //
   28195 // Returns The extracted diagonal.
   28196 func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
   28197 	if scope.Err() != nil {
   28198 		return
   28199 	}
   28200 	opspec := tf.OpSpec{
   28201 		Type: "DiagPart",
   28202 		Input: []tf.Input{
   28203 			input,
   28204 		},
   28205 	}
   28206 	op := scope.AddOperation(opspec)
   28207 	return op.Output(0)
   28208 }
   28209 
   28210 // QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm.
   28211 type QuantizedInstanceNormAttr func(optionalAttr)
   28212 
   28213 // QuantizedInstanceNormOutputRangeGiven sets the optional output_range_given attribute to value.
   28214 //
   28215 // value: If True, `given_y_min` and `given_y_min`
   28216 // and `given_y_max` are used as the output range. Otherwise,
   28217 // the implementation computes the output range.
   28218 // If not specified, defaults to false
   28219 func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr {
   28220 	return func(m optionalAttr) {
   28221 		m["output_range_given"] = value
   28222 	}
   28223 }
   28224 
   28225 // QuantizedInstanceNormGivenYMin sets the optional given_y_min attribute to value.
   28226 //
   28227 // value: Output in `y_min` if `output_range_given` is True.
   28228 // If not specified, defaults to 0
   28229 func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr {
   28230 	return func(m optionalAttr) {
   28231 		m["given_y_min"] = value
   28232 	}
   28233 }
   28234 
   28235 // QuantizedInstanceNormGivenYMax sets the optional given_y_max attribute to value.
   28236 //
   28237 // value: Output in `y_max` if `output_range_given` is True.
   28238 // If not specified, defaults to 0
   28239 func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr {
   28240 	return func(m optionalAttr) {
   28241 		m["given_y_max"] = value
   28242 	}
   28243 }
   28244 
   28245 // QuantizedInstanceNormVarianceEpsilon sets the optional variance_epsilon attribute to value.
   28246 //
   28247 // value: A small float number to avoid dividing by 0.
   28248 // If not specified, defaults to 1e-05
   28249 func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr {
   28250 	return func(m optionalAttr) {
   28251 		m["variance_epsilon"] = value
   28252 	}
   28253 }
   28254 
   28255 // QuantizedInstanceNormMinSeparation sets the optional min_separation attribute to value.
   28256 //
   28257 // value: Minimum value of `y_max - y_min`
   28258 // If not specified, defaults to 0.001
   28259 func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr {
   28260 	return func(m optionalAttr) {
   28261 		m["min_separation"] = value
   28262 	}
   28263 }
   28264 
   28265 // Quantized Instance normalization.
   28266 //
   28267 // Arguments:
   28268 //	x: A 4D input Tensor.
   28269 //	x_min: The value represented by the lowest quantized input.
   28270 //	x_max: The value represented by the highest quantized input.
   28271 //
   28272 // Returns A 4D Tensor.The value represented by the lowest quantized output.The value represented by the highest quantized output.
   28273 func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, optional ...QuantizedInstanceNormAttr) (y tf.Output, y_min tf.Output, y_max tf.Output) {
   28274 	if scope.Err() != nil {
   28275 		return
   28276 	}
   28277 	attrs := map[string]interface{}{}
   28278 	for _, a := range optional {
   28279 		a(attrs)
   28280 	}
   28281 	opspec := tf.OpSpec{
   28282 		Type: "QuantizedInstanceNorm",
   28283 		Input: []tf.Input{
   28284 			x, x_min, x_max,
   28285 		},
   28286 		Attrs: attrs,
   28287 	}
   28288 	op := scope.AddOperation(opspec)
   28289 	return op.Output(0), op.Output(1), op.Output(2)
   28290 }
   28291 
   28292 // FakeQuantWithMinMaxVarsAttr is an optional argument to FakeQuantWithMinMaxVars.
   28293 type FakeQuantWithMinMaxVarsAttr func(optionalAttr)
   28294 
   28295 // FakeQuantWithMinMaxVarsNumBits sets the optional num_bits attribute to value.
   28296 // If not specified, defaults to 8
   28297 func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr {
   28298 	return func(m optionalAttr) {
   28299 		m["num_bits"] = value
   28300 	}
   28301 }
   28302 
   28303 // FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value.
   28304 // If not specified, defaults to false
   28305 func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr {
   28306 	return func(m optionalAttr) {
   28307 		m["narrow_range"] = value
   28308 	}
   28309 }
   28310 
   28311 // Fake-quantize the 'inputs' tensor of type float via global float scalars `min`
   28312 //
   28313 // and `max` to 'outputs' tensor of same shape as `inputs`.
   28314 //
   28315 // `[min; max]` define the clamping range for the `inputs` data.
   28316 // `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
   28317 // when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
   28318 // then de-quantized and output as floats in `[min; max]` interval.
   28319 // `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
   28320 //
   28321 // This operation has a gradient and thus allows for training `min` and `max`
   28322 // values.
   28323 func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output) {
   28324 	if scope.Err() != nil {
   28325 		return
   28326 	}
   28327 	attrs := map[string]interface{}{}
   28328 	for _, a := range optional {
   28329 		a(attrs)
   28330 	}
   28331 	opspec := tf.OpSpec{
   28332 		Type: "FakeQuantWithMinMaxVars",
   28333 		Input: []tf.Input{
   28334 			inputs, min, max,
   28335 		},
   28336 		Attrs: attrs,
   28337 	}
   28338 	op := scope.AddOperation(opspec)
   28339 	return op.Output(0)
   28340 }
   28341