Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include <numeric>
     17 
     18 #include "tensorflow/compiler/tf2xla/xla_helpers.h"
     19 #include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
     20 #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
     21 #include "tensorflow/compiler/xla/client/xla_builder.h"
     22 #include "tensorflow/core/framework/tensor.h"
     23 #include "tensorflow/core/framework/tensor_shape.h"
     24 #include "tensorflow/core/util/tensor_format.h"
     25 
     26 namespace tensorflow {
     27 namespace {
     28 
     29 class BiasOp : public XlaOpKernel {
     30  public:
     31   explicit BiasOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
     32     string data_format;
     33     if (ctx->GetAttr("data_format", &data_format).ok()) {
     34       OP_REQUIRES(ctx, FormatFromString(data_format, &data_format_),
     35                   errors::InvalidArgument("Invalid data format"));
     36     } else {
     37       data_format_ = FORMAT_NHWC;
     38     }
     39   }
     40 
     41   void Compile(XlaOpKernelContext* ctx) override {
     42     const TensorShape input_shape = ctx->InputShape(0);
     43     const TensorShape bias_shape = ctx->InputShape(1);
     44 
     45     OP_REQUIRES(ctx, TensorShapeUtils::IsMatrixOrHigher(input_shape),
     46                 errors::InvalidArgument("Input tensor must be at least 2D: ",
     47                                         input_shape.DebugString()));
     48     OP_REQUIRES(ctx, TensorShapeUtils::IsVector(bias_shape),
     49                 errors::InvalidArgument("Biases must be 1D: ",
     50                                         bias_shape.DebugString()));
     51 
     52     // feature_dim is the channel (C) dimension of the data.
     53     int feature_dim = (data_format_ == FORMAT_NHWC)
     54                           ? input_shape.dims() - 1
     55                           : /*data_format == FORMAT_NCHW*/ 1;
     56     OP_REQUIRES(
     57         ctx, feature_dim >= 0,
     58         errors::InvalidArgument("Input tensor does not have enough dimensions "
     59                                 "to contain the feature dimension"));
     60     OP_REQUIRES(
     61         ctx, bias_shape.dim_size(0) == input_shape.dim_size(feature_dim),
     62         errors::InvalidArgument(
     63             "Must provide as many biases as the last dimension "
     64             "of the input tensor: ",
     65             bias_shape.DebugString(), " vs. ", input_shape.DebugString()));
     66 
     67     xla::XlaOp result = xla::Add(ctx->Input(0), ctx->Input(1), {feature_dim});
     68     ctx->SetOutput(0, result);
     69   }
     70 
     71  private:
     72   TensorFormat data_format_;
     73 };
     74 
     75 REGISTER_XLA_OP(Name("BiasAdd"), BiasOp);
     76 REGISTER_XLA_OP(Name("BiasAddV1"), BiasOp);
     77 
     78 class BiasAddGradOp : public XlaOpKernel {
     79  public:
     80   explicit BiasAddGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
     81     string data_format;
     82     if (ctx->GetAttr("data_format", &data_format).ok()) {
     83       OP_REQUIRES(ctx, FormatFromString(data_format, &data_format_),
     84                   errors::InvalidArgument("Invalid data format"));
     85     } else {
     86       data_format_ = FORMAT_NHWC;
     87     }
     88   }
     89 
     90   void Compile(XlaOpKernelContext* ctx) override {
     91     const TensorShape out_backprop_shape = ctx->InputShape(0);
     92 
     93     OP_REQUIRES(ctx, TensorShapeUtils::IsMatrixOrHigher(out_backprop_shape),
     94                 errors::InvalidArgument("Input tensor must be at least 2D: ",
     95                                         out_backprop_shape.DebugString()));
     96 
     97     // feature_dim is the channel (C) dimension of the data.
     98     int feature_dim = (data_format_ == FORMAT_NHWC)
     99                           ? out_backprop_shape.dims() - 1
    100                           : /*data_format == FORMAT_NCHW*/ 1;
    101     OP_REQUIRES(
    102         ctx, feature_dim >= 0,
    103         errors::InvalidArgument("Input tensor does not have enough dimensions "
    104                                 "to contain the feature dimension"));
    105 
    106     std::vector<int64> reduce_dims(out_backprop_shape.dims() - 1);
    107     std::iota(reduce_dims.begin(), reduce_dims.begin() + feature_dim, 0);
    108     std::iota(reduce_dims.begin() + feature_dim, reduce_dims.end(),
    109               feature_dim + 1);
    110     xla::XlaBuilder* const b = ctx->builder();
    111     const DataType accumulation_type =
    112         XlaHelpers::SumAccumulationType(input_type(0));
    113     auto converted =
    114         XlaHelpers::ConvertElementType(ctx->Input(0), accumulation_type);
    115     auto reduce =
    116         xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
    117                     *ctx->GetOrCreateAdd(accumulation_type), reduce_dims);
    118     ctx->SetOutput(0, XlaHelpers::ConvertElementType(reduce, input_type(0)));
    119   }
    120 
    121  private:
    122   TensorFormat data_format_;
    123 };
    124 
    125 REGISTER_XLA_OP(Name("BiasAddGrad"), BiasAddGradOp);
    126 
    127 }  // namespace
    128 }  // namespace tensorflow
    129