Home | History | Annotate | Download | only in kernels
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include <numeric>
     17 
     18 #include "tensorflow/compiler/tf2xla/xla_helpers.h"
     19 #include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
     20 #include "tensorflow/compiler/tf2xla/xla_op_registry.h"
     21 #include "tensorflow/core/framework/tensor.h"
     22 #include "tensorflow/core/framework/tensor_shape.h"
     23 #include "tensorflow/core/util/tensor_format.h"
     24 
     25 namespace tensorflow {
     26 namespace {
     27 
     28 class BiasOp : public XlaOpKernel {
     29  public:
     30   explicit BiasOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
     31     string data_format;
     32     if (ctx->GetAttr("data_format", &data_format).ok()) {
     33       OP_REQUIRES(ctx, FormatFromString(data_format, &data_format_),
     34                   errors::InvalidArgument("Invalid data format"));
     35     } else {
     36       data_format_ = FORMAT_NHWC;
     37     }
     38   }
     39 
     40   void Compile(XlaOpKernelContext* ctx) override {
     41     const TensorShape input_shape = ctx->InputShape(0);
     42     const TensorShape bias_shape = ctx->InputShape(1);
     43 
     44     OP_REQUIRES(ctx, TensorShapeUtils::IsMatrixOrHigher(input_shape),
     45                 errors::InvalidArgument("Input tensor must be at least 2D: ",
     46                                         input_shape.DebugString()));
     47     OP_REQUIRES(ctx, TensorShapeUtils::IsVector(bias_shape),
     48                 errors::InvalidArgument("Biases must be 1D: ",
     49                                         bias_shape.DebugString()));
     50     int feature_dim = (data_format_ == FORMAT_NHWC) ? input_shape.dims() - 1
     51                                                     : input_shape.dims() - 3;
     52     OP_REQUIRES(
     53         ctx, feature_dim >= 0,
     54         errors::InvalidArgument("Input tensor does not have enough dimensions "
     55                                 "to contain the feature dimension"));
     56     OP_REQUIRES(
     57         ctx, bias_shape.dim_size(0) == input_shape.dim_size(feature_dim),
     58         errors::InvalidArgument(
     59             "Must provide as many biases as the last dimension "
     60             "of the input tensor: ",
     61             bias_shape.DebugString(), " vs. ", input_shape.DebugString()));
     62 
     63     xla::ComputationDataHandle result =
     64         ctx->builder()->Add(ctx->Input(0), ctx->Input(1), {feature_dim});
     65     ctx->SetOutput(0, result);
     66   }
     67 
     68  private:
     69   TensorFormat data_format_;
     70 };
     71 
     72 REGISTER_XLA_OP(Name("BiasAdd"), BiasOp);
     73 REGISTER_XLA_OP(Name("BiasAddV1"), BiasOp);
     74 
     75 class BiasAddGradOp : public XlaOpKernel {
     76  public:
     77   explicit BiasAddGradOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
     78     string data_format;
     79     if (ctx->GetAttr("data_format", &data_format).ok()) {
     80       OP_REQUIRES(ctx, FormatFromString(data_format, &data_format_),
     81                   errors::InvalidArgument("Invalid data format"));
     82     } else {
     83       data_format_ = FORMAT_NHWC;
     84     }
     85   }
     86 
     87   void Compile(XlaOpKernelContext* ctx) override {
     88     const TensorShape out_backprop_shape = ctx->InputShape(0);
     89 
     90     OP_REQUIRES(ctx, TensorShapeUtils::IsMatrixOrHigher(out_backprop_shape),
     91                 errors::InvalidArgument("Input tensor must be at least 2D: ",
     92                                         out_backprop_shape.DebugString()));
     93 
     94     int feature_dim = (data_format_ == FORMAT_NHWC)
     95                           ? out_backprop_shape.dims() - 1
     96                           : out_backprop_shape.dims() - 3;
     97     OP_REQUIRES(
     98         ctx, feature_dim >= 0,
     99         errors::InvalidArgument("Input tensor does not have enough dimensions "
    100                                 "to contain the feature dimension"));
    101 
    102     std::vector<int64> reduce_dims(out_backprop_shape.dims() - 1);
    103     std::iota(reduce_dims.begin(), reduce_dims.begin() + feature_dim, 0);
    104     std::iota(reduce_dims.begin() + feature_dim, reduce_dims.end(),
    105               feature_dim + 1);
    106     xla::ComputationDataHandle result = ctx->builder()->Reduce(
    107         ctx->Input(0), XlaHelpers::Zero(ctx->builder(), input_type(0)),
    108         *ctx->GetOrCreateAdd(input_type(0)), reduce_dims);
    109     ctx->SetOutput(0, result);
    110   }
    111 
    112  private:
    113   TensorFormat data_format_;
    114 };
    115 
    116 REGISTER_XLA_OP(Name("BiasAddGrad"), BiasAddGradOp);
    117 
    118 }  // namespace
    119 }  // namespace tensorflow
    120