Home | History | Annotate | Download | only in eager
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include "tensorflow/c/eager/c_api.h"
     17 
     18 #include <string.h>
     19 #include "tensorflow/core/framework/function.pb.h"
     20 #include "tensorflow/core/lib/strings/strcat.h"
     21 #include "tensorflow/core/platform/logging.h"
     22 #include "tensorflow/core/platform/macros.h"
     23 #include "tensorflow/core/platform/protobuf.h"
     24 #include "tensorflow/core/platform/test.h"
     25 #include "tensorflow/core/platform/test_benchmark.h"
     26 #include "tensorflow/core/protobuf/config.pb.h"
     27 
     28 using tensorflow::string;
     29 
     30 namespace {
     31 
     32 TFE_TensorHandle* TestMatrixTensorHandle() {
     33   int64_t dims[] = {2, 2};
     34   float data[] = {1.0f, 2.0f, 3.0f, 4.0f};
     35   TF_Tensor* t = TF_AllocateTensor(
     36       TF_FLOAT, &dims[0], sizeof(dims) / sizeof(int64_t), sizeof(data));
     37   memcpy(TF_TensorData(t), &data[0], TF_TensorByteSize(t));
     38   TF_Status* status = TF_NewStatus();
     39   TFE_TensorHandle* th = TFE_NewTensorHandle(t, status);
     40   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
     41   TF_DeleteTensor(t);
     42   TF_DeleteStatus(status);
     43   return th;
     44 }
     45 
     46 TFE_Op* MatMulOp(TFE_Context* ctx, TFE_TensorHandle* a, TFE_TensorHandle* b) {
     47   TF_Status* status = TF_NewStatus();
     48 
     49   TFE_Op* op = TFE_NewOp(ctx, "MatMul", status);
     50   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
     51   TFE_OpAddInput(op, a, status);
     52   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
     53   TFE_OpAddInput(op, b, status);
     54   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
     55   TF_DeleteStatus(status);
     56   TFE_OpSetAttrBool(op, "transpose_a", 0);
     57   TFE_OpSetAttrBool(op, "transpose_b", 0);
     58   TFE_OpSetAttrType(op, "T", TFE_TensorHandleDataType(a));
     59 
     60   return op;
     61 }
     62 
     63 TFE_TensorHandle* TestAxisTensorHandle() {
     64   int64_t dims[] = {1};
     65   int data[] = {1};
     66   TF_Tensor* t = TF_AllocateTensor(
     67       TF_INT32, &dims[0], sizeof(dims) / sizeof(int64_t), sizeof(data));
     68   memcpy(TF_TensorData(t), &data[0], TF_TensorByteSize(t));
     69   TF_Status* status = TF_NewStatus();
     70   TFE_TensorHandle* th = TFE_NewTensorHandle(t, status);
     71   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
     72   TF_DeleteTensor(t);
     73   TF_DeleteStatus(status);
     74   return th;
     75 }
     76 
     77 TFE_Op* MinOp(TFE_Context* ctx, TFE_TensorHandle* input,
     78               TFE_TensorHandle* axis) {
     79   TF_Status* status = TF_NewStatus();
     80 
     81   TFE_Op* op = TFE_NewOp(ctx, "Min", status);
     82   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
     83   TFE_OpAddInput(op, input, status);
     84   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
     85   TFE_OpAddInput(op, axis, status);
     86   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
     87   TFE_OpSetAttrBool(op, "keep_dims", 1);
     88   TFE_OpSetAttrType(op, "Tidx", TF_INT32);
     89   TF_DeleteStatus(status);
     90   TFE_OpSetAttrType(op, "T", TFE_TensorHandleDataType(input));
     91 
     92   return op;
     93 }
     94 
     95 // If there is a GPU device, returns true and sets 'gpu_device_name'
     96 // accordingly.
     97 bool GetGPUDeviceName(TFE_Context* ctx, string* gpu_device_name) {
     98   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
     99       TF_NewStatus(), TF_DeleteStatus);
    100   TF_DeviceList* devices = TFE_ContextListDevices(ctx, status.get());
    101   CHECK_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    102 
    103   const int num_devices = TF_DeviceListCount(devices);
    104   for (int i = 0; i < num_devices; ++i) {
    105     const string device_type(TF_DeviceListType(devices, i, status.get()));
    106     CHECK_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
    107     const string device_name(TF_DeviceListName(devices, i, status.get()));
    108     CHECK_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
    109     if (device_type == "GPU") {
    110       *gpu_device_name = device_name;
    111       LOG(INFO) << "Found GPU device " << device_name;
    112       TF_DeleteDeviceList(devices);
    113       return true;
    114     }
    115   }
    116   TF_DeleteDeviceList(devices);
    117   return false;
    118 }
    119 
    120 void BM_InitOp(int iters) {
    121   tensorflow::testing::StopTiming();
    122   TF_Status* status = TF_NewStatus();
    123   TFE_ContextOptions* opts = TFE_NewContextOptions();
    124   TFE_Context* ctx = TFE_NewContext(opts, status);
    125   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    126   TFE_DeleteContextOptions(opts);
    127 
    128   TFE_TensorHandle* m = TestMatrixTensorHandle();
    129   tensorflow::testing::StartTiming();
    130   for (int i = 0; i < iters; ++i) {
    131     TFE_Op* matmul = MatMulOp(ctx, m, m);
    132     TFE_DeleteOp(matmul);
    133   }
    134   tensorflow::testing::StopTiming();
    135   TFE_DeleteTensorHandle(m);
    136   TFE_DeleteContext(ctx, status);
    137   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    138   TF_DeleteStatus(status);
    139 }
    140 BENCHMARK(BM_InitOp);
    141 
    142 void BM_Execute(int iters) {
    143   tensorflow::testing::StopTiming();
    144   TF_Status* status = TF_NewStatus();
    145   TFE_ContextOptions* opts = TFE_NewContextOptions();
    146   TFE_Context* ctx = TFE_NewContext(opts, status);
    147   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    148   TFE_DeleteContextOptions(opts);
    149 
    150   TFE_TensorHandle* m = TestMatrixTensorHandle();
    151   TFE_Op* matmul = MatMulOp(ctx, m, m);
    152   TFE_TensorHandle* retvals[1];
    153   int num_retvals = 1;
    154   tensorflow::testing::StartTiming();
    155   for (int i = 0; i < iters; ++i) {
    156     TFE_Execute(matmul, &retvals[0], &num_retvals, status);
    157     CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    158   }
    159   tensorflow::testing::StopTiming();
    160   TFE_DeleteOp(matmul);
    161   TFE_DeleteTensorHandle(m);
    162   TFE_DeleteContext(ctx, status);
    163   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    164   TF_DeleteStatus(status);
    165 }
    166 BENCHMARK(BM_Execute);
    167 
    168 TEST(CAPI, Context) {
    169   TF_Status* status = TF_NewStatus();
    170   TFE_ContextOptions* opts = TFE_NewContextOptions();
    171   TFE_Context* ctx = TFE_NewContext(opts, status);
    172   TFE_DeleteContextOptions(opts);
    173 
    174   TF_DeviceList* devices = TFE_ContextListDevices(ctx, status);
    175   EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    176 
    177   TFE_DeleteContext(ctx, status);
    178   EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    179 
    180   const int num_devices = TF_DeviceListCount(devices);
    181   EXPECT_GE(num_devices, 1) << "At least one CPU device should exist";
    182   for (int i = 0; i < num_devices; ++i) {
    183     EXPECT_NE("", TF_DeviceListName(devices, i, status)) << i;
    184     EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    185   }
    186   TF_DeleteDeviceList(devices);
    187   TF_DeleteStatus(status);
    188 }
    189 
    190 TEST(CAPI, TensorHandle) {
    191   TFE_TensorHandle* h = TestMatrixTensorHandle();
    192   EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(h));
    193 
    194   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
    195       TF_NewStatus(), TF_DeleteStatus);
    196   TF_Tensor* t = TFE_TensorHandleResolve(h, status.get());
    197   ASSERT_EQ(16, TF_TensorByteSize(t));
    198   float data[4] = {0};
    199   memcpy(&data[0], TF_TensorData(t), TF_TensorByteSize(t));
    200   EXPECT_EQ(1.0, data[0]);
    201   EXPECT_EQ(2.0, data[1]);
    202   EXPECT_EQ(3.0, data[2]);
    203   EXPECT_EQ(4.0, data[3]);
    204   TF_DeleteTensor(t);
    205   TFE_DeleteTensorHandle(h);
    206 }
    207 
    208 TEST(CAPI, TensorHandleCopyBetweenDevices) {
    209   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
    210       TF_NewStatus(), TF_DeleteStatus);
    211   TFE_ContextOptions* opts = TFE_NewContextOptions();
    212   TFE_Context* ctx = TFE_NewContext(opts, status.get());
    213   TFE_DeleteContextOptions(opts);
    214   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    215 
    216   TFE_TensorHandle* hcpu = TestMatrixTensorHandle();
    217   TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
    218   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    219 
    220   TF_DeviceList* devices = TFE_ContextListDevices(ctx, status.get());
    221   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    222   const int num_devices = TF_DeviceListCount(devices);
    223 
    224   const char* kCPUDevice = "CPU:0";
    225   for (int i = 0; i < num_devices; ++i) {
    226     const string name(TF_DeviceListName(devices, i, status.get()));
    227     if (TF_GetCode(status.get()) != TF_OK) {
    228       ADD_FAILURE() << i << " -- " << TF_Message(status.get());
    229       continue;
    230     }
    231     auto tag = tensorflow::strings::StrCat("Device #", i, " (", name, ")");
    232     // Copy to device
    233     TFE_TensorHandle* hdevice =
    234         TFE_TensorHandleCopyToDevice(hcpu, ctx, name.c_str(), status.get());
    235     if (TF_GetCode(status.get()) != TF_OK) {
    236       ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
    237       continue;
    238     }
    239     // Copy from device to the same device.
    240     TFE_TensorHandle* hdevice2 =
    241         TFE_TensorHandleCopyToDevice(hdevice, ctx, name.c_str(), status.get());
    242     if (TF_GetCode(status.get()) != TF_OK) {
    243       ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
    244       continue;
    245     }
    246     TFE_DeleteTensorHandle(hdevice);
    247     // Copy back to CPU
    248     TFE_TensorHandle* hcopy =
    249         TFE_TensorHandleCopyToDevice(hdevice2, ctx, kCPUDevice, status.get());
    250     if (TF_GetCode(status.get()) != TF_OK) {
    251       ADD_FAILURE() << tag << " -- " << TF_Message(status.get());
    252       continue;
    253     }
    254     TFE_DeleteTensorHandle(hdevice2);
    255 
    256     // Ensure that the contents are the same!
    257     TF_Tensor* tcopy = TFE_TensorHandleResolve(hcopy, status.get());
    258     TFE_DeleteTensorHandle(hcopy);
    259     if (TF_GetCode(status.get()) != TF_OK) {
    260       ADD_FAILURE() << tag;
    261       continue;
    262     }
    263     EXPECT_EQ(TF_TensorByteSize(t), TF_TensorByteSize(tcopy)) << tag;
    264     EXPECT_EQ(
    265         0, memcmp(TF_TensorData(t), TF_TensorData(tcopy), TF_TensorByteSize(t)))
    266         << tag;
    267     TF_DeleteTensor(tcopy);
    268   }
    269 
    270   TF_DeleteDeviceList(devices);
    271   TF_DeleteTensor(t);
    272   TFE_DeleteTensorHandle(hcpu);
    273   TFE_DeleteContext(ctx, status.get());
    274   EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    275 }
    276 
    277 TEST(CAPI, TensorHandleCopyBetweenTwoGPUDevices) {
    278   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
    279       TF_NewStatus(), TF_DeleteStatus);
    280   TFE_ContextOptions* opts = TFE_NewContextOptions();
    281   TFE_Context* ctx = TFE_NewContext(opts, status.get());
    282   TFE_DeleteContextOptions(opts);
    283   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    284 
    285   TFE_TensorHandle* hcpu = TestMatrixTensorHandle();
    286   TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
    287   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    288 
    289   TF_DeviceList* devices = TFE_ContextListDevices(ctx, status.get());
    290   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    291   const int num_devices = TF_DeviceListCount(devices);
    292 
    293   const char* kCPUDevice = "CPU:0";
    294   if (num_devices < 3) {
    295     TF_DeleteDeviceList(devices);
    296     TF_DeleteTensor(t);
    297     TFE_DeleteTensorHandle(hcpu);
    298     TFE_DeleteContext(ctx, status.get());
    299     return;
    300   }
    301   const string gpu_1_name(TF_DeviceListName(devices, 1, status.get()));
    302   ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
    303   const string gpu_2_name(TF_DeviceListName(devices, 2, status.get()));
    304   ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
    305   TFE_TensorHandle* hdevice =
    306       TFE_TensorHandleCopyToDevice(hcpu, ctx, gpu_1_name.c_str(), status.get());
    307   ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
    308 
    309   TFE_TensorHandle* hdevice2 = TFE_TensorHandleCopyToDevice(
    310       hdevice, ctx, gpu_2_name.c_str(), status.get());
    311   ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
    312   TFE_DeleteTensorHandle(hdevice);
    313   // Copy back to CPU
    314   TFE_TensorHandle* hcopy =
    315       TFE_TensorHandleCopyToDevice(hdevice2, ctx, kCPUDevice, status.get());
    316   ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
    317   TFE_DeleteTensorHandle(hdevice2);
    318 
    319   // Ensure that the contents are the same!
    320   TF_Tensor* tcopy = TFE_TensorHandleResolve(hcopy, status.get());
    321   TFE_DeleteTensorHandle(hcopy);
    322   ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK);
    323   EXPECT_EQ(TF_TensorByteSize(t), TF_TensorByteSize(tcopy));
    324   EXPECT_EQ(
    325       0, memcmp(TF_TensorData(t), TF_TensorData(tcopy), TF_TensorByteSize(t)));
    326   TF_DeleteTensor(tcopy);
    327 
    328   TF_DeleteDeviceList(devices);
    329   TF_DeleteTensor(t);
    330   TFE_DeleteTensorHandle(hcpu);
    331   TFE_DeleteContext(ctx, status.get());
    332   EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    333 }
    334 
    335 TEST(CAPI, TensorHandleSilentCopy) {
    336   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
    337       TF_NewStatus(), TF_DeleteStatus);
    338   TFE_ContextOptions* opts = TFE_NewContextOptions();
    339   TFE_ContextOptionsSetDevicePlacementPolicy(opts, TFE_DEVICE_PLACEMENT_SILENT);
    340   TFE_Context* ctx = TFE_NewContext(opts, status.get());
    341   TFE_DeleteContextOptions(opts);
    342   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    343 
    344   TFE_TensorHandle* hcpu = TestMatrixTensorHandle();
    345   TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
    346   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    347 
    348   // Disable the test if no GPU is present.
    349   string gpu_device_name;
    350   if (GetGPUDeviceName(ctx, &gpu_device_name)) {
    351     TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
    352         hcpu, ctx, gpu_device_name.c_str(), status.get());
    353     ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
    354 
    355     TFE_Op* matmul = MatMulOp(ctx, hcpu, hgpu);
    356     TFE_OpSetDevice(matmul, gpu_device_name.c_str(), status.get());
    357     ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
    358     TFE_TensorHandle* retvals[1];
    359     int num_retvals = 1;
    360     TFE_Execute(matmul, &retvals[0], &num_retvals, status.get());
    361     ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
    362     TFE_DeleteOp(matmul);
    363     TFE_DeleteTensorHandle(retvals[0]);
    364     TFE_DeleteTensorHandle(hgpu);
    365   }
    366 
    367   TF_DeleteTensor(t);
    368   TFE_DeleteTensorHandle(hcpu);
    369   TFE_DeleteContext(ctx, status.get());
    370   EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    371 }
    372 
    373 TEST(CAPI, TensorHandleSilentCopyLocal) {
    374   std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
    375       TF_NewStatus(), TF_DeleteStatus);
    376   TFE_ContextOptions* opts = TFE_NewContextOptions();
    377   TFE_ContextOptionsSetDevicePlacementPolicy(opts,
    378                                              TFE_DEVICE_PLACEMENT_EXPLICIT);
    379   TFE_Context* ctx = TFE_NewContext(opts, status.get());
    380   TFE_ContextSetThreadLocalDevicePlacementPolicy(ctx,
    381                                                  TFE_DEVICE_PLACEMENT_SILENT);
    382   TFE_DeleteContextOptions(opts);
    383   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    384 
    385   TFE_TensorHandle* hcpu = TestMatrixTensorHandle();
    386   TF_Tensor* t = TFE_TensorHandleResolve(hcpu, status.get());
    387   ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    388 
    389   // Disable the test if no GPU is present.
    390   string gpu_device_name;
    391   if (GetGPUDeviceName(ctx, &gpu_device_name)) {
    392     TFE_TensorHandle* hgpu = TFE_TensorHandleCopyToDevice(
    393         hcpu, ctx, gpu_device_name.c_str(), status.get());
    394     ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
    395 
    396     TFE_Op* matmul = MatMulOp(ctx, hcpu, hgpu);
    397     TFE_OpSetDevice(matmul, gpu_device_name.c_str(), status.get());
    398     ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
    399     TFE_TensorHandle* retvals[1];
    400     int num_retvals = 1;
    401     TFE_Execute(matmul, &retvals[0], &num_retvals, status.get());
    402     ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
    403     TFE_DeleteOp(matmul);
    404     TFE_DeleteTensorHandle(retvals[0]);
    405     TFE_DeleteTensorHandle(hgpu);
    406   }
    407 
    408   TF_DeleteTensor(t);
    409   TFE_DeleteTensorHandle(hcpu);
    410   TFE_DeleteContext(ctx, status.get());
    411   EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
    412 }
    413 
    414 TEST(CAPI, SetAndGetOpDevices) {
    415   TF_Status* status = TF_NewStatus();
    416   TFE_ContextOptions* opts = TFE_NewContextOptions();
    417   TFE_Context* ctx = TFE_NewContext(opts, status);
    418   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    419   TFE_DeleteContextOptions(opts);
    420 
    421   TFE_TensorHandle* m = TestMatrixTensorHandle();
    422   TFE_Op* matmul = MatMulOp(ctx, m, m);
    423 
    424   // Disable the test if no GPU is present.
    425   string gpu_device_name;
    426   if (GetGPUDeviceName(ctx, &gpu_device_name)) {
    427     TFE_OpSetDevice(matmul, "GPU:0", status);
    428     ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    429     const char* device_name = TFE_OpGetDevice(matmul, status);
    430     ASSERT_TRUE(strstr(device_name, "GPU:0") != nullptr);
    431 
    432     TFE_OpSetDevice(matmul, "CPU:0", status);
    433     ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    434     device_name = TFE_OpGetDevice(matmul, status);
    435     ASSERT_TRUE(strstr(device_name, "CPU:0") != nullptr);
    436   }
    437 
    438   TFE_DeleteOp(matmul);
    439   TFE_DeleteTensorHandle(m);
    440   TFE_DeleteContext(ctx, status);
    441   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    442   TF_DeleteStatus(status);
    443 }
    444 
    445 TEST(CAPI, Execute_MatMul_CPU) {
    446   TF_Status* status = TF_NewStatus();
    447   TFE_ContextOptions* opts = TFE_NewContextOptions();
    448   TFE_Context* ctx = TFE_NewContext(opts, status);
    449   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    450   TFE_DeleteContextOptions(opts);
    451 
    452   TFE_TensorHandle* m = TestMatrixTensorHandle();
    453   TFE_Op* matmul = MatMulOp(ctx, m, m);
    454   TFE_TensorHandle* retvals[2] = {nullptr};
    455   int num_retvals = 2;  // Should be reduced to 1 by the TFE_Execute call.
    456   TFE_Execute(matmul, &retvals[0], &num_retvals, status);
    457   EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    458   TFE_DeleteOp(matmul);
    459   TFE_DeleteTensorHandle(m);
    460   TFE_DeleteContext(ctx, status);
    461   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    462   ASSERT_EQ(1, num_retvals);
    463 
    464   TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
    465   TFE_DeleteTensorHandle(retvals[0]);
    466   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    467   float product[4] = {0};
    468   EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
    469   memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
    470   TF_DeleteTensor(t);
    471   EXPECT_EQ(7, product[0]);
    472   EXPECT_EQ(10, product[1]);
    473   EXPECT_EQ(15, product[2]);
    474   EXPECT_EQ(22, product[3]);
    475   TF_DeleteStatus(status);
    476 }
    477 
    478 TEST(CAPI, Execute_Min_CPU) {
    479   TF_Status* status = TF_NewStatus();
    480   TFE_ContextOptions* opts = TFE_NewContextOptions();
    481   TFE_Context* ctx = TFE_NewContext(opts, status);
    482   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    483   TFE_DeleteContextOptions(opts);
    484 
    485   TFE_TensorHandle* input = TestMatrixTensorHandle();
    486   TFE_TensorHandle* axis = TestAxisTensorHandle();
    487   TFE_Op* minOp = MinOp(ctx, input, axis);
    488   TFE_TensorHandle* retvals[2] = {nullptr};
    489   int num_retvals = 2;  // Should be reduced to 1 by the TFE_Execute call.
    490   TFE_Execute(minOp, &retvals[0], &num_retvals, status);
    491   EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    492   TFE_DeleteOp(minOp);
    493   TFE_DeleteTensorHandle(input);
    494   TFE_DeleteTensorHandle(axis);
    495   TFE_DeleteContext(ctx, status);
    496   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    497   ASSERT_EQ(1, num_retvals);
    498 
    499   TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
    500   TFE_DeleteTensorHandle(retvals[0]);
    501   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    502   float output[2] = {0};
    503   EXPECT_EQ(sizeof(output), TF_TensorByteSize(t));
    504   memcpy(&output[0], TF_TensorData(t), TF_TensorByteSize(t));
    505   TF_DeleteTensor(t);
    506   EXPECT_EQ(1, output[0]);
    507   EXPECT_EQ(3, output[1]);
    508   TF_DeleteStatus(status);
    509 }
    510 
    511 #ifdef TENSORFLOW_EAGER_USE_XLA
    512 TEST(CAPI, Execute_MatMul_XLA_CPU) {
    513   TF_Status* status = TF_NewStatus();
    514   TFE_ContextOptions* opts = TFE_NewContextOptions();
    515   TFE_Context* ctx = TFE_NewContext(opts, status);
    516   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    517   TFE_DeleteContextOptions(opts);
    518 
    519   TFE_TensorHandle* m = TestMatrixTensorHandle();
    520   TFE_Op* matmul = MatMulOp(ctx, m, m);
    521 
    522   TFE_OpSetXLACompilation(matmul, true);
    523 
    524   TFE_TensorHandle* retvals[2] = {nullptr};
    525   int num_retvals = 2;  // Should be reduced to 1 by the TFE_Execute call.
    526   TFE_Execute(matmul, &retvals[0], &num_retvals, status);
    527   // Running a primitive TF operator via XLA is not yet supported.
    528   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    529 
    530   TFE_DeleteOp(matmul);
    531   TFE_DeleteTensorHandle(m);
    532   TFE_DeleteContext(ctx, status);
    533   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    534 
    535   EXPECT_EQ(1, num_retvals);
    536 
    537   TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
    538   TFE_DeleteTensorHandle(retvals[0]);
    539   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    540   float product[4] = {0};
    541   EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
    542   memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
    543   TF_DeleteTensor(t);
    544   EXPECT_EQ(7, product[0]);
    545   EXPECT_EQ(10, product[1]);
    546   EXPECT_EQ(15, product[2]);
    547   EXPECT_EQ(22, product[3]);
    548 
    549   TF_DeleteStatus(status);
    550 }
    551 
    552 TEST(CAPI, Execute_Min_XLA_CPU) {
    553   TF_Status* status = TF_NewStatus();
    554   TFE_ContextOptions* opts = TFE_NewContextOptions();
    555   TFE_Context* ctx = TFE_NewContext(opts, status);
    556   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    557   TFE_DeleteContextOptions(opts);
    558 
    559   TFE_TensorHandle* input = TestMatrixTensorHandle();
    560   TFE_TensorHandle* axis = TestAxisTensorHandle();
    561   TFE_Op* minOp = MinOp(ctx, input, axis);
    562 
    563   TFE_OpSetXLACompilation(minOp, true);
    564 
    565   TFE_TensorHandle* retvals[2] = {nullptr};
    566   int num_retvals = 2;  // Should be reduced to 1 by the TFE_Execute call.
    567   TFE_Execute(minOp, &retvals[0], &num_retvals, status);
    568   EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    569   TFE_DeleteOp(minOp);
    570   TFE_DeleteTensorHandle(input);
    571   TFE_DeleteTensorHandle(axis);
    572   TFE_DeleteContext(ctx, status);
    573   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    574   ASSERT_EQ(1, num_retvals);
    575 
    576   TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
    577   TFE_DeleteTensorHandle(retvals[0]);
    578   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    579   float output[2] = {0};
    580   EXPECT_EQ(sizeof(output), TF_TensorByteSize(t));
    581   memcpy(&output[0], TF_TensorData(t), TF_TensorByteSize(t));
    582   TF_DeleteTensor(t);
    583   EXPECT_EQ(1, output[0]);
    584   EXPECT_EQ(3, output[1]);
    585   TF_DeleteStatus(status);
    586 }
    587 #endif  // TENSORFLOW_EAGER_USE_XLA
    588 
    589 TEST(CAPI, ExecuteWithTracing) {
    590   TF_Status* status = TF_NewStatus();
    591   TFE_ContextOptions* opts = TFE_NewContextOptions();
    592   TFE_Context* ctx = TFE_NewContext(opts, status);
    593   TFE_ContextEnableRunMetadata(ctx);
    594   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    595   TFE_DeleteContextOptions(opts);
    596 
    597   TFE_TensorHandle* m = TestMatrixTensorHandle();
    598   TFE_Op* matmul = MatMulOp(ctx, m, m);
    599   TFE_TensorHandle* retvals[2] = {nullptr};
    600   int num_retvals = 2;  // Should be reduced to 1 by the TFE_Execute call.
    601   TFE_Execute(matmul, &retvals[0], &num_retvals, status);
    602   EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    603   TFE_DeleteOp(matmul);
    604   TFE_DeleteTensorHandle(m);
    605   TF_Buffer* b = TF_NewBuffer();
    606   TFE_ContextExportRunMetadata(ctx, b, status);
    607   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    608   tensorflow::RunMetadata rm;
    609   EXPECT_TRUE(
    610       rm.ParseFromString({reinterpret_cast<const char*>(b->data), b->length}));
    611   TF_DeleteBuffer(b);
    612   TFE_DeleteContext(ctx, status);
    613   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    614   ASSERT_EQ(1, num_retvals);
    615 
    616   TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
    617   TFE_DeleteTensorHandle(retvals[0]);
    618   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    619   float product[4] = {0};
    620   EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
    621   memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
    622   TF_DeleteTensor(t);
    623   EXPECT_EQ(7, product[0]);
    624   EXPECT_EQ(10, product[1]);
    625   EXPECT_EQ(15, product[2]);
    626   EXPECT_EQ(22, product[3]);
    627   TF_DeleteStatus(status);
    628 }
    629 
    630 TEST(CAPI, Function_ident_CPU) {
    631   // First create a simple identity function.
    632   TF_Graph* function_graph = TF_NewGraph();
    633   TF_OperationDescription* arg_descr =
    634       TF_NewOperation(function_graph, "Placeholder", "arg");
    635   TF_SetAttrType(arg_descr, "dtype", TF_INT32);
    636   TF_Status* status = TF_NewStatus();
    637   TF_Operation* arg = TF_FinishOperation(arg_descr, status);
    638   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    639   TF_OperationDescription* id_descr =
    640       TF_NewOperation(function_graph, "Identity", "id");
    641   TF_SetAttrType(id_descr, "T", TF_INT32);
    642   TF_AddInput(id_descr, {arg, 0});
    643   TF_Operation* id = TF_FinishOperation(id_descr, status);
    644   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    645   TF_Output input{arg, 0};
    646   TF_Output output{id, 0};
    647   TF_Function* fn =
    648       TF_GraphToFunction(function_graph, "ident", 0, 1, &id, 1, &input, 1,
    649                          &output, nullptr, nullptr, "test", status);
    650   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    651   TF_DeleteGraph(function_graph);
    652   TFE_ContextOptions* opts = TFE_NewContextOptions();
    653   TFE_Context* ctx = TFE_NewContext(opts, status);
    654   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    655   TFE_DeleteContextOptions(opts);
    656   TFE_ContextAddFunction(ctx, fn, status);
    657   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    658   TF_DeleteFunction(fn);
    659 
    660   TF_Tensor* t =
    661       TF_AllocateTensor(TF_INT32, nullptr, 0, 1 * sizeof(tensorflow::int32));
    662   *reinterpret_cast<tensorflow::int32*>(TF_TensorData(t)) = 42;
    663   TFE_TensorHandle* h = TFE_NewTensorHandle(t, status);
    664   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    665   TF_DeleteTensor(t);
    666 
    667   TFE_Op* op = TFE_NewOp(ctx, "ident", status);
    668   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    669   TFE_OpAddInput(op, h, status);
    670   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    671 
    672   std::vector<TFE_TensorHandle*> result;
    673   result.push_back(nullptr);
    674   int num_retvals = 1;
    675   TFE_Execute(op, result.data(), &num_retvals, status);
    676   TFE_DeleteOp(op);
    677   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    678   ASSERT_EQ(num_retvals, 1);
    679 
    680   TF_Tensor* r = TFE_TensorHandleResolve(result[0], status);
    681   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    682   EXPECT_EQ(*reinterpret_cast<tensorflow::int32*>(TF_TensorData(r)), 42);
    683   TFE_DeleteTensorHandle(h);
    684   TF_DeleteTensor(r);
    685   TFE_DeleteTensorHandle(result[0]);
    686   TFE_DeleteContext(ctx, status);
    687   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    688   TF_DeleteStatus(status);
    689 }
    690 
    691 #ifdef TENSORFLOW_EAGER_USE_XLA
    692 TEST(CAPI, Function_ident_XLA_CPU) {
    693   // First create a simple identity function.
    694   TF_Graph* function_graph = TF_NewGraph();
    695   TF_OperationDescription* arg_descr =
    696       TF_NewOperation(function_graph, "Placeholder", "arg");
    697   TF_SetAttrType(arg_descr, "dtype", TF_INT32);
    698   TF_Status* status = TF_NewStatus();
    699   TF_Operation* arg = TF_FinishOperation(arg_descr, status);
    700   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    701   TF_OperationDescription* id_descr =
    702       TF_NewOperation(function_graph, "Identity", "id");
    703   TF_SetAttrType(id_descr, "T", TF_INT32);
    704   TF_AddInput(id_descr, {arg, 0});
    705   TF_Operation* id = TF_FinishOperation(id_descr, status);
    706   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    707   TF_Output input{arg, 0};
    708   TF_Output output{id, 0};
    709   TF_Function* fn =
    710       TF_GraphToFunction(function_graph, "ident", 0, 1, &id, 1, &input, 1,
    711                          &output, nullptr, nullptr, "test", status);
    712   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    713   TF_DeleteGraph(function_graph);
    714   TFE_ContextOptions* opts = TFE_NewContextOptions();
    715   TFE_Context* ctx = TFE_NewContext(opts, status);
    716   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    717   TFE_DeleteContextOptions(opts);
    718   TFE_ContextAddFunction(ctx, fn, status);
    719   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    720   TF_DeleteFunction(fn);
    721 
    722   TF_Tensor* t =
    723       TF_AllocateTensor(TF_INT32, nullptr, 0, 1 * sizeof(tensorflow::int32));
    724   *reinterpret_cast<tensorflow::int32*>(TF_TensorData(t)) = 42;
    725   TFE_TensorHandle* h = TFE_NewTensorHandle(t, status);
    726   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    727   TF_DeleteTensor(t);
    728 
    729   TFE_Op* op = TFE_NewOp(ctx, "ident", status);
    730   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    731   TFE_OpAddInput(op, h, status);
    732   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    733 
    734   // Now run it via XLA.
    735   TFE_OpSetXLACompilation(op, true);
    736 
    737   std::vector<TFE_TensorHandle*> result;
    738   result.push_back(nullptr);
    739   int num_retvals = 1;
    740   TFE_Execute(op, result.data(), &num_retvals, status);
    741   TFE_DeleteOp(op);
    742   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    743   ASSERT_EQ(num_retvals, 1);
    744 
    745   TF_Tensor* r = TFE_TensorHandleResolve(result[0], status);
    746   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    747   EXPECT_EQ(*reinterpret_cast<tensorflow::int32*>(TF_TensorData(r)), 42);
    748   TFE_DeleteTensorHandle(h);
    749   TF_DeleteTensor(r);
    750   TFE_DeleteTensorHandle(result[0]);
    751   TFE_DeleteContext(ctx, status);
    752   ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
    753   TF_DeleteStatus(status);
    754 }
    755 #endif  // TENSORFLOW_EAGER_USE_XLA
    756 
    757 string MatMulFunction() {
    758   tensorflow::FunctionDef def;
    759   CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
    760       "    signature {"
    761       "      name: 'MatMulFunction'"
    762       "      input_arg {"
    763       "        name: 'a'"
    764       "        type: DT_FLOAT"
    765       "      }"
    766       "      output_arg {"
    767       "        name: 'm'"
    768       "        type: DT_FLOAT"
    769       "      }"
    770       "    }"
    771       "    node_def {"
    772       "      name: 'matmul'"
    773       "      op: 'MatMul'"
    774       "      input: 'a'"
    775       "      input: 'a'"
    776       "      attr {"
    777       "        key: 'T'"
    778       "        value {"
    779       "          type: DT_FLOAT"
    780       "        }"
    781       "      }"
    782       "    }"
    783       "    ret {"
    784       "      key: 'm'"
    785       "      value: 'matmul:product'"
    786       "    }",
    787       &def));
    788   return def.SerializeAsString();
    789 }
    790 
    791 TEST(CAPI, FunctionDefAndExecute) {
    792   TF_Status* status = TF_NewStatus();
    793   TFE_ContextOptions* opts = TFE_NewContextOptions();
    794   TFE_Context* ctx = TFE_NewContext(opts, status);
    795   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    796   TFE_DeleteContextOptions(opts);
    797 
    798   string function_def = MatMulFunction();
    799   TFE_ContextAddFunctionDef(ctx, function_def.data(), function_def.size(),
    800                             status);
    801   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    802 
    803   TFE_TensorHandle* m = TestMatrixTensorHandle();
    804   TFE_TensorHandle* retval[1] = {nullptr};
    805   int num_retvals = 1;
    806   TFE_Op* op = TFE_NewOp(ctx, "MatMulFunction", status);
    807   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    808   TFE_OpAddInput(op, m, status);
    809   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    810   TFE_Execute(op, &retval[0], &num_retvals, status);
    811   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    812   ASSERT_EQ(1, num_retvals);
    813   TFE_DeleteOp(op);
    814   TFE_DeleteTensorHandle(m);
    815   TF_Tensor* t = TFE_TensorHandleResolve(retval[0], status);
    816   TFE_DeleteTensorHandle(retval[0]);
    817   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    818   float product[4] = {0};
    819   EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
    820   memcpy(&product[0], TF_TensorData(t), TF_TensorByteSize(t));
    821   TF_DeleteTensor(t);
    822   EXPECT_EQ(7, product[0]);
    823   EXPECT_EQ(10, product[1]);
    824   EXPECT_EQ(15, product[2]);
    825   EXPECT_EQ(22, product[3]);
    826   TFE_DeleteContext(ctx, status);
    827   EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    828   TF_DeleteStatus(status);
    829 }
    830 
    831 void BM_ExecuteFunction(int iters) {
    832   tensorflow::testing::StopTiming();
    833   TF_Status* status = TF_NewStatus();
    834   TFE_ContextOptions* opts = TFE_NewContextOptions();
    835   TFE_Context* ctx = TFE_NewContext(opts, status);
    836   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    837   TFE_DeleteContextOptions(opts);
    838 
    839   string function_def = MatMulFunction();
    840   TFE_ContextAddFunctionDef(ctx, function_def.data(), function_def.size(),
    841                             status);
    842   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    843 
    844   TFE_TensorHandle* m = TestMatrixTensorHandle();
    845   TFE_Op* matmul = TFE_NewOp(ctx, "MatMulFunction", status);
    846   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    847   TFE_OpAddInput(matmul, m, status);
    848   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    849   TFE_TensorHandle* retval[1] = {nullptr};
    850   int num_retvals = 1;
    851   tensorflow::testing::StartTiming();
    852   for (int i = 0; i < iters; ++i) {
    853     TFE_Execute(matmul, &retval[0], &num_retvals, status);
    854     CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    855   }
    856   tensorflow::testing::StopTiming();
    857   TFE_DeleteTensorHandle(m);
    858   TFE_DeleteTensorHandle(retval[0]);
    859   TFE_DeleteContext(ctx, status);
    860   EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    861   TF_DeleteStatus(status);
    862 }
    863 BENCHMARK(BM_ExecuteFunction);
    864 
    865 TFE_TensorHandle* CreateVariable(TFE_Context* ctx, float value,
    866                                  TF_Status* status) {
    867   // Create the variable handle.
    868   TFE_Op* op = TFE_NewOp(ctx, "VarHandleOp", status);
    869   if (TF_GetCode(status) != TF_OK) return nullptr;
    870   TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
    871   TFE_OpSetAttrShape(op, "shape", {}, 0, status);
    872   TFE_OpSetAttrString(op, "container", "");
    873   TFE_OpSetAttrString(op, "shared_name", "");
    874   if (TF_GetCode(status) != TF_OK) return nullptr;
    875   TFE_TensorHandle* var_handle = nullptr;
    876   int num_retvals = 1;
    877   TFE_Execute(op, &var_handle, &num_retvals, status);
    878   TFE_DeleteOp(op);
    879   if (TF_GetCode(status) != TF_OK) return nullptr;
    880   CHECK_EQ(1, num_retvals);
    881 
    882   // Assign 'value' to it.
    883   op = TFE_NewOp(ctx, "AssignVariableOp", status);
    884   if (TF_GetCode(status) != TF_OK) return nullptr;
    885   TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
    886   TFE_OpAddInput(op, var_handle, status);
    887 
    888   // Convert 'value' to a TF_Tensor then a TFE_TensorHandle.
    889   std::unique_ptr<TF_Tensor, decltype(&TF_DeleteTensor)> t(
    890       TF_AllocateTensor(TF_FLOAT, nullptr, 0, sizeof(value)), TF_DeleteTensor);
    891   memcpy(TF_TensorData(t.get()), &value, TF_TensorByteSize(t.get()));
    892 
    893   std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)>
    894       value_handle(TFE_NewTensorHandle(t.get(), status),
    895                    TFE_DeleteTensorHandle);
    896   if (TF_GetCode(status) != TF_OK) return nullptr;
    897 
    898   TFE_OpAddInput(op, value_handle.get(), status);
    899   if (TF_GetCode(status) != TF_OK) return nullptr;
    900 
    901   num_retvals = 0;
    902   TFE_Execute(op, nullptr, &num_retvals, status);
    903   TFE_DeleteOp(op);
    904   if (TF_GetCode(status) != TF_OK) return nullptr;
    905   CHECK_EQ(0, num_retvals);
    906 
    907   return var_handle;
    908 }
    909 
    910 TEST(CAPI, Variables) {
    911   // Variables use resource handles, so this is really a test for resource
    912   // tensor handling.
    913   TF_Status* status = TF_NewStatus();
    914   TFE_ContextOptions* opts = TFE_NewContextOptions();
    915   TFE_Context* ctx = TFE_NewContext(opts, status);
    916   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    917   TFE_DeleteContextOptions(opts);
    918 
    919   TFE_TensorHandle* var_handle = CreateVariable(ctx, 12.0, status);
    920   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    921 
    922   TFE_Op* op = TFE_NewOp(ctx, "ReadVariableOp", status);
    923   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    924   TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
    925   TFE_OpAddInput(op, var_handle, status);
    926   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    927   int num_retvals = 1;
    928   TFE_TensorHandle* value_handle = nullptr;
    929   TFE_Execute(op, &value_handle, &num_retvals, status);
    930   TFE_DeleteOp(op);
    931 
    932   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    933   ASSERT_EQ(1, num_retvals);
    934   EXPECT_EQ(TF_FLOAT, TFE_TensorHandleDataType(value_handle));
    935   EXPECT_EQ(0, TFE_TensorHandleNumDims(value_handle));
    936   float value = 0.0f;
    937   TF_Tensor* t = TFE_TensorHandleResolve(value_handle, status);
    938   ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    939   ASSERT_EQ(sizeof(float), TF_TensorByteSize(t));
    940   memcpy(&value, TF_TensorData(t), sizeof(float));
    941   TF_DeleteTensor(t);
    942   EXPECT_EQ(12.0, value);
    943 
    944   TFE_DeleteTensorHandle(var_handle);
    945   TFE_DeleteTensorHandle(value_handle);
    946   TFE_DeleteContext(ctx, status);
    947   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    948   TF_DeleteStatus(status);
    949 }
    950 
    951 void BM_ReadVariable(int iters) {
    952   tensorflow::testing::StopTiming();
    953   TF_Status* status = TF_NewStatus();
    954   TFE_ContextOptions* opts = TFE_NewContextOptions();
    955   TFE_Context* ctx = TFE_NewContext(opts, status);
    956   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    957   TFE_DeleteContextOptions(opts);
    958 
    959   TFE_TensorHandle* var_handle = CreateVariable(ctx, 5.0, status);
    960   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    961 
    962   TFE_Op* op = TFE_NewOp(ctx, "ReadVariableOp", status);
    963   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    964   TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
    965   TFE_OpAddInput(op, var_handle, status);
    966   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    967 
    968   int num_retvals = 1;
    969   TFE_TensorHandle* h = nullptr;
    970   tensorflow::testing::StartTiming();
    971   for (int i = 0; i < iters; ++i) {
    972     TFE_Execute(op, &h, &num_retvals, status);
    973     CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    974     CHECK_EQ(1, num_retvals);
    975     CHECK(h);
    976     CHECK_EQ(TF_FLOAT, TFE_TensorHandleDataType(h));
    977     CHECK_EQ(0, TFE_TensorHandleNumDims(h));
    978     h = nullptr;
    979   }
    980   tensorflow::testing::StopTiming();
    981   TFE_DeleteOp(op);
    982 
    983   TFE_DeleteTensorHandle(var_handle);
    984   TFE_DeleteContext(ctx, status);
    985   CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    986   TF_DeleteStatus(status);
    987 }
    988 BENCHMARK(BM_ReadVariable);
    989 
    990 }  // namespace
    991