Home | History | Annotate | Download | only in distributed_runtime
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include "tensorflow/core/distributed_runtime/message_wrappers.h"
     17 #include "tensorflow/core/framework/cost_graph.pb.h"
     18 #include "tensorflow/core/framework/step_stats.pb.h"
     19 #include "tensorflow/core/protobuf/config.pb.h"
     20 #include "tensorflow/core/protobuf/named_tensor.pb.h"
     21 
     22 namespace tensorflow {
     23 
     24 namespace {
     25 
     26 bool ParseTensorProtoToTensor(const TensorProto& tensor_proto,
     27                               Tensor* out_tensor) {
     28   if (tensor_proto.dtype() > 0 && tensor_proto.dtype() <= DataType_MAX) {
     29     Tensor parsed(tensor_proto.dtype());
     30     if (parsed.FromProto(cpu_allocator(), tensor_proto)) {
     31       *out_tensor = parsed;
     32       return true;
     33     }
     34   }
     35   return false;
     36 }
     37 
     38 }  // namespace
     39 
     40 const string& InMemoryRunStepRequest::session_handle() const {
     41   return session_handle_;
     42 }
     43 
     44 void InMemoryRunStepRequest::set_session_handle(const string& handle) {
     45   session_handle_ = handle;
     46 }
     47 
     48 const string& InMemoryRunStepRequest::partial_run_handle() const {
     49   return partial_run_handle_;
     50 }
     51 
     52 void InMemoryRunStepRequest::set_partial_run_handle(const string& handle) {
     53   partial_run_handle_ = handle;
     54 }
     55 
     56 size_t InMemoryRunStepRequest::num_feeds() const { return feeds_.size(); }
     57 const string& InMemoryRunStepRequest::feed_name(size_t i) const {
     58   return feeds_[i].first;
     59 }
     60 
     61 Status InMemoryRunStepRequest::FeedValue(size_t i, Tensor* out_tensor) const {
     62   *out_tensor = feeds_[i].second;
     63   return Status::OK();
     64 }
     65 
     66 Status InMemoryRunStepRequest::FeedValue(size_t i,
     67                                          TensorProto* out_tensor) const {
     68   feeds_[i].second.AsProtoTensorContent(out_tensor);
     69   return Status::OK();
     70 }
     71 
     72 void InMemoryRunStepRequest::add_feed(const string& name, const Tensor& value) {
     73   feeds_.emplace_back(name, value);
     74 }
     75 
     76 size_t InMemoryRunStepRequest::num_fetches() const { return fetches_.size(); }
     77 const string& InMemoryRunStepRequest::fetch_name(size_t i) const {
     78   return fetches_[i];
     79 }
     80 void InMemoryRunStepRequest::add_fetch(const string& name) {
     81   fetches_.push_back(name);
     82 }
     83 
     84 size_t InMemoryRunStepRequest::num_targets() const { return targets_.size(); }
     85 const string& InMemoryRunStepRequest::target_name(size_t i) const {
     86   return targets_[i];
     87 }
     88 void InMemoryRunStepRequest::add_target(const string& name) {
     89   targets_.push_back(name);
     90 }
     91 
     92 const RunOptions& InMemoryRunStepRequest::options() const { return options_; }
     93 
     94 RunOptions* InMemoryRunStepRequest::mutable_options() { return &options_; }
     95 
     96 bool InMemoryRunStepRequest::store_errors_in_response_body() const {
     97   return store_errors_in_response_body_;
     98 }
     99 
    100 void InMemoryRunStepRequest::set_store_errors_in_response_body(
    101     bool store_errors) {
    102   store_errors_in_response_body_ = store_errors;
    103 }
    104 
    105 string InMemoryRunStepRequest::DebugString() const {
    106   return ToProto().DebugString();
    107 }
    108 
    109 const RunStepRequest& InMemoryRunStepRequest::ToProto() const {
    110   if (!proto_version_) {
    111     proto_version_.reset(new RunStepRequest);
    112     proto_version_->set_session_handle(session_handle());
    113     proto_version_->set_partial_run_handle(partial_run_handle());
    114     for (size_t i = 0; i < num_feeds(); ++i) {
    115       auto feed = proto_version_->add_feed();
    116       feed->set_name(feed_name(i));
    117       feeds_[i].second.AsProtoTensorContent(feed->mutable_tensor());
    118     }
    119     for (size_t i = 0; i < num_fetches(); ++i) {
    120       proto_version_->add_fetch(fetch_name(i));
    121     }
    122     for (size_t i = 0; i < num_targets(); ++i) {
    123       proto_version_->add_target(target_name(i));
    124     }
    125     *proto_version_->mutable_options() = options();
    126   }
    127   return *proto_version_;
    128 }
    129 
    130 const string& MutableProtoRunStepRequest::session_handle() const {
    131   return request_.session_handle();
    132 }
    133 void MutableProtoRunStepRequest::set_session_handle(const string& handle) {
    134   request_.set_session_handle(handle);
    135 }
    136 
    137 const string& MutableProtoRunStepRequest::partial_run_handle() const {
    138   return request_.partial_run_handle();
    139 }
    140 void MutableProtoRunStepRequest::set_partial_run_handle(const string& handle) {
    141   request_.set_partial_run_handle(handle);
    142 }
    143 
    144 size_t MutableProtoRunStepRequest::num_feeds() const {
    145   return request_.feed_size();
    146 }
    147 const string& MutableProtoRunStepRequest::feed_name(size_t i) const {
    148   return request_.feed(i).name();
    149 }
    150 Status MutableProtoRunStepRequest::FeedValue(size_t i,
    151                                              Tensor* out_tensor) const {
    152   if (!ParseTensorProtoToTensor(request_.feed(i).tensor(), out_tensor)) {
    153     return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
    154   } else {
    155     return Status::OK();
    156   }
    157 }
    158 
    159 Status MutableProtoRunStepRequest::FeedValue(size_t i,
    160                                              TensorProto* out_tensor) const {
    161   *out_tensor = request_.feed(i).tensor();
    162   return Status::OK();
    163 }
    164 
    165 void MutableProtoRunStepRequest::add_feed(const string& name,
    166                                           const Tensor& value) {
    167   NamedTensorProto* feed = request_.add_feed();
    168   feed->set_name(name);
    169   TensorProto* value_proto = feed->mutable_tensor();
    170   value.AsProtoTensorContent(value_proto);
    171 }
    172 
    173 size_t MutableProtoRunStepRequest::num_fetches() const {
    174   return request_.fetch_size();
    175 }
    176 
    177 const string& MutableProtoRunStepRequest::fetch_name(size_t i) const {
    178   return request_.fetch(i);
    179 }
    180 void MutableProtoRunStepRequest::add_fetch(const string& name) {
    181   request_.add_fetch(name);
    182 }
    183 
    184 size_t MutableProtoRunStepRequest::num_targets() const {
    185   return request_.target_size();
    186 }
    187 
    188 const string& MutableProtoRunStepRequest::target_name(size_t i) const {
    189   return request_.target(i);
    190 }
    191 
    192 void MutableProtoRunStepRequest::add_target(const string& name) {
    193   request_.add_target(name);
    194 }
    195 
    196 const RunOptions& MutableProtoRunStepRequest::options() const {
    197   return request_.options();
    198 }
    199 
    200 RunOptions* MutableProtoRunStepRequest::mutable_options() {
    201   return request_.mutable_options();
    202 }
    203 
    204 bool MutableProtoRunStepRequest::store_errors_in_response_body() const {
    205   return request_.store_errors_in_response_body();
    206 }
    207 
    208 void MutableProtoRunStepRequest::set_store_errors_in_response_body(
    209     bool store_errors) {
    210   request_.set_store_errors_in_response_body(store_errors);
    211 }
    212 
    213 string MutableProtoRunStepRequest::DebugString() const {
    214   return request_.DebugString();
    215 }
    216 
    217 const RunStepRequest& MutableProtoRunStepRequest::ToProto() const {
    218   return request_;
    219 }
    220 
    221 ProtoRunStepRequest::ProtoRunStepRequest(const RunStepRequest* request)
    222     : request_(request) {}
    223 
    224 const string& ProtoRunStepRequest::session_handle() const {
    225   return request_->session_handle();
    226 }
    227 
    228 const string& ProtoRunStepRequest::partial_run_handle() const {
    229   return request_->partial_run_handle();
    230 }
    231 
    232 size_t ProtoRunStepRequest::num_feeds() const { return request_->feed_size(); }
    233 
    234 const string& ProtoRunStepRequest::feed_name(size_t i) const {
    235   return request_->feed(i).name();
    236 }
    237 
    238 Status ProtoRunStepRequest::FeedValue(size_t i, Tensor* out_tensor) const {
    239   if (!ParseTensorProtoToTensor(request_->feed(i).tensor(), out_tensor)) {
    240     return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
    241   } else {
    242     return Status::OK();
    243   }
    244 }
    245 
    246 Status ProtoRunStepRequest::FeedValue(size_t i, TensorProto* out_tensor) const {
    247   *out_tensor = request_->feed(i).tensor();
    248   return Status::OK();
    249 }
    250 
    251 size_t ProtoRunStepRequest::num_fetches() const {
    252   return request_->fetch_size();
    253 }
    254 
    255 const string& ProtoRunStepRequest::fetch_name(size_t i) const {
    256   return request_->fetch(i);
    257 }
    258 
    259 size_t ProtoRunStepRequest::num_targets() const {
    260   return request_->target_size();
    261 }
    262 
    263 const string& ProtoRunStepRequest::target_name(size_t i) const {
    264   return request_->target(i);
    265 }
    266 
    267 const RunOptions& ProtoRunStepRequest::options() const {
    268   return request_->options();
    269 }
    270 
    271 bool ProtoRunStepRequest::store_errors_in_response_body() const {
    272   return request_->store_errors_in_response_body();
    273 }
    274 
    275 string ProtoRunStepRequest::DebugString() const {
    276   return request_->DebugString();
    277 }
    278 
    279 const RunStepRequest& ProtoRunStepRequest::ToProto() const { return *request_; }
    280 
    281 const string& InMemoryRunGraphRequest::session_handle() const {
    282   return session_handle_;
    283 }
    284 
    285 void InMemoryRunGraphRequest::set_session_handle(const string& handle) {
    286   session_handle_ = handle;
    287 }
    288 
    289 const string& InMemoryRunGraphRequest::graph_handle() const {
    290   return graph_handle_;
    291 }
    292 
    293 void InMemoryRunGraphRequest::set_graph_handle(const string& handle) {
    294   graph_handle_ = handle;
    295 }
    296 
    297 int64 InMemoryRunGraphRequest::step_id() const { return step_id_; }
    298 
    299 void InMemoryRunGraphRequest::set_step_id(int64 step_id) { step_id_ = step_id; }
    300 
    301 const ExecutorOpts& InMemoryRunGraphRequest::exec_opts() const {
    302   return exec_opts_;
    303 }
    304 
    305 ExecutorOpts* InMemoryRunGraphRequest::mutable_exec_opts() {
    306   return &exec_opts_;
    307 }
    308 
    309 size_t InMemoryRunGraphRequest::num_sends() const { return sends_.size(); }
    310 
    311 const string& InMemoryRunGraphRequest::send_key(size_t i) const {
    312   return sends_[i].first;
    313 }
    314 
    315 Status InMemoryRunGraphRequest::SendValue(size_t i, Tensor* out_tensor) const {
    316   *out_tensor = sends_[i].second;
    317   return Status::OK();
    318 }
    319 
    320 Status InMemoryRunGraphRequest::AddSendFromRunStepRequest(
    321     const RunStepRequestWrapper& run_step_request, size_t i,
    322     const string& send_key) {
    323   Tensor tensor;
    324   TF_RETURN_IF_ERROR(run_step_request.FeedValue(i, &tensor));
    325   sends_.emplace_back(send_key, std::move(tensor));
    326   return Status::OK();
    327 }
    328 
    329 size_t InMemoryRunGraphRequest::num_recvs() const { return recvs_.size(); }
    330 
    331 const string& InMemoryRunGraphRequest::recv_key(size_t i) const {
    332   return recvs_[i];
    333 }
    334 
    335 void InMemoryRunGraphRequest::add_recv_key(const string& recv_key) {
    336   recvs_.push_back(recv_key);
    337 }
    338 
    339 bool InMemoryRunGraphRequest::is_partial() const { return is_partial_; }
    340 
    341 void InMemoryRunGraphRequest::set_is_partial(bool is_partial) {
    342   is_partial_ = is_partial;
    343 }
    344 
    345 bool InMemoryRunGraphRequest::is_last_partial_run() const {
    346   return is_last_partial_run_;
    347 }
    348 
    349 void InMemoryRunGraphRequest::set_is_last_partial_run(
    350     bool is_last_partial_run) {
    351   is_last_partial_run_ = is_last_partial_run;
    352 }
    353 
    354 bool InMemoryRunGraphRequest::store_errors_in_response_body() const {
    355   return store_errors_in_response_body_;
    356 }
    357 
    358 void InMemoryRunGraphRequest::set_store_errors_in_response_body(
    359     bool store_errors) {
    360   store_errors_in_response_body_ = store_errors;
    361 }
    362 
    363 const RunGraphRequest& InMemoryRunGraphRequest::ToProto() const {
    364   if (!proto_version_) {
    365     proto_version_.reset(new RunGraphRequest);
    366     proto_version_->set_session_handle(session_handle());
    367     proto_version_->set_graph_handle(graph_handle());
    368     proto_version_->set_step_id(step_id());
    369     *proto_version_->mutable_exec_opts() = exec_opts();
    370     for (size_t i = 0; i < num_sends(); ++i) {
    371       auto send = proto_version_->add_send();
    372       send->set_name(send_key(i));
    373       sends_[i].second.AsProtoTensorContent(send->mutable_tensor());
    374     }
    375     for (size_t i = 0; i < num_recvs(); ++i) {
    376       proto_version_->add_recv_key(recv_key(i));
    377     }
    378     proto_version_->set_is_partial(is_partial());
    379     proto_version_->set_is_last_partial_run(is_last_partial_run());
    380   }
    381   return *proto_version_;
    382 }
    383 
    384 const string& MutableProtoRunGraphRequest::session_handle() const {
    385   return request_.session_handle();
    386 }
    387 
    388 void MutableProtoRunGraphRequest::set_session_handle(const string& handle) {
    389   request_.set_session_handle(handle);
    390 }
    391 
    392 const string& MutableProtoRunGraphRequest::graph_handle() const {
    393   return request_.graph_handle();
    394 }
    395 
    396 void MutableProtoRunGraphRequest::set_graph_handle(const string& handle) {
    397   request_.set_graph_handle(handle);
    398 }
    399 
    400 int64 MutableProtoRunGraphRequest::step_id() const {
    401   return request_.step_id();
    402 }
    403 
    404 void MutableProtoRunGraphRequest::set_step_id(int64 step_id) {
    405   request_.set_step_id(step_id);
    406 }
    407 
    408 const ExecutorOpts& MutableProtoRunGraphRequest::exec_opts() const {
    409   return request_.exec_opts();
    410 }
    411 
    412 ExecutorOpts* MutableProtoRunGraphRequest::mutable_exec_opts() {
    413   return request_.mutable_exec_opts();
    414 }
    415 
    416 size_t MutableProtoRunGraphRequest::num_sends() const {
    417   return request_.send_size();
    418 }
    419 
    420 const string& MutableProtoRunGraphRequest::send_key(size_t i) const {
    421   return request_.send(i).name();
    422 }
    423 
    424 Status MutableProtoRunGraphRequest::SendValue(size_t i,
    425                                               Tensor* out_tensor) const {
    426   if (!ParseTensorProtoToTensor(request_.send(i).tensor(), out_tensor)) {
    427     return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
    428   } else {
    429     return Status::OK();
    430   }
    431 }
    432 
    433 Status MutableProtoRunGraphRequest::AddSendFromRunStepRequest(
    434     const RunStepRequestWrapper& run_step_request, size_t i,
    435     const string& send_key) {
    436   NamedTensorProto* send = request_.add_send();
    437   send->set_name(send_key);
    438   TF_RETURN_IF_ERROR(run_step_request.FeedValue(i, send->mutable_tensor()));
    439   return Status::OK();
    440 }
    441 
    442 size_t MutableProtoRunGraphRequest::num_recvs() const {
    443   return request_.recv_key_size();
    444 }
    445 
    446 const string& MutableProtoRunGraphRequest::recv_key(size_t i) const {
    447   return request_.recv_key(i);
    448 }
    449 
    450 void MutableProtoRunGraphRequest::add_recv_key(const string& recv_key) {
    451   request_.add_recv_key(recv_key);
    452 }
    453 
    454 bool MutableProtoRunGraphRequest::is_partial() const {
    455   return request_.is_partial();
    456 }
    457 
    458 void MutableProtoRunGraphRequest::set_is_partial(bool is_partial) {
    459   request_.set_is_partial(is_partial);
    460 }
    461 
    462 bool MutableProtoRunGraphRequest::is_last_partial_run() const {
    463   return request_.is_last_partial_run();
    464 }
    465 
    466 void MutableProtoRunGraphRequest::set_is_last_partial_run(
    467     bool is_last_partial_run) {
    468   request_.set_is_last_partial_run(is_last_partial_run);
    469 }
    470 
    471 bool MutableProtoRunGraphRequest::store_errors_in_response_body() const {
    472   return request_.store_errors_in_response_body();
    473 }
    474 
    475 void MutableProtoRunGraphRequest::set_store_errors_in_response_body(
    476     bool store_errors) {
    477   request_.set_store_errors_in_response_body(store_errors);
    478 }
    479 
    480 const RunGraphRequest& MutableProtoRunGraphRequest::ToProto() const {
    481   return request_;
    482 }
    483 
    484 ProtoRunGraphRequest::ProtoRunGraphRequest(const RunGraphRequest* request)
    485     : request_(request) {}
    486 
    487 const string& ProtoRunGraphRequest::session_handle() const {
    488   return request_->session_handle();
    489 }
    490 
    491 const string& ProtoRunGraphRequest::graph_handle() const {
    492   return request_->graph_handle();
    493 }
    494 
    495 int64 ProtoRunGraphRequest::step_id() const { return request_->step_id(); }
    496 
    497 const ExecutorOpts& ProtoRunGraphRequest::exec_opts() const {
    498   return request_->exec_opts();
    499 }
    500 
    501 size_t ProtoRunGraphRequest::num_sends() const { return request_->send_size(); }
    502 
    503 const string& ProtoRunGraphRequest::send_key(size_t i) const {
    504   return request_->send(i).name();
    505 }
    506 
    507 Status ProtoRunGraphRequest::SendValue(size_t i, Tensor* out_tensor) const {
    508   if (!ParseTensorProtoToTensor(request_->send(i).tensor(), out_tensor)) {
    509     return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
    510   } else {
    511     return Status::OK();
    512   }
    513 }
    514 
    515 size_t ProtoRunGraphRequest::num_recvs() const {
    516   return request_->recv_key_size();
    517 }
    518 
    519 const string& ProtoRunGraphRequest::recv_key(size_t i) const {
    520   return request_->recv_key(i);
    521 }
    522 
    523 bool ProtoRunGraphRequest::is_partial() const { return request_->is_partial(); }
    524 
    525 bool ProtoRunGraphRequest::is_last_partial_run() const {
    526   return request_->is_last_partial_run();
    527 }
    528 
    529 bool ProtoRunGraphRequest::store_errors_in_response_body() const {
    530   return request_->store_errors_in_response_body();
    531 }
    532 
    533 const RunGraphRequest& ProtoRunGraphRequest::ToProto() const {
    534   return *request_;
    535 }
    536 
    537 size_t InMemoryRunGraphResponse::num_recvs() const { return recvs_.size(); }
    538 
    539 const string& InMemoryRunGraphResponse::recv_key(size_t i) const {
    540   return recvs_[i].first;
    541 }
    542 
    543 Status InMemoryRunGraphResponse::RecvValue(size_t i, TensorProto* out_tensor) {
    544   recvs_[i].second.AsProtoTensorContent(out_tensor);
    545   return Status::OK();
    546 }
    547 
    548 Status InMemoryRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
    549   *out_tensor = recvs_[i].second;
    550   return Status::OK();
    551 }
    552 
    553 void InMemoryRunGraphResponse::AddRecv(const string& key, const Tensor& value) {
    554   recvs_.emplace_back(key, value);
    555 }
    556 
    557 StepStats* InMemoryRunGraphResponse::mutable_step_stats() {
    558   return &step_stats_;
    559 }
    560 
    561 CostGraphDef* InMemoryRunGraphResponse::mutable_cost_graph() {
    562   return &cost_graph_;
    563 }
    564 
    565 errors::Code InMemoryRunGraphResponse::status_code() const {
    566   return status_.code();
    567 }
    568 
    569 const string& InMemoryRunGraphResponse::status_error_message() const {
    570   return status_.error_message();
    571 }
    572 
    573 void InMemoryRunGraphResponse::set_status(const Status& status) {
    574   status_ = status;
    575 }
    576 
    577 RunGraphResponse* InMemoryRunGraphResponse::get_proto() {
    578   LOG(FATAL) << "Cannot get a mutable protobuf for an InMemoryRunGraphResponse";
    579   return nullptr;
    580 }
    581 
    582 size_t InMemoryRunGraphResponse::num_partition_graphs() const {
    583   return partition_graphs_.size();
    584 }
    585 
    586 GraphDef* InMemoryRunGraphResponse::mutable_partition_graph(size_t i) {
    587   return &partition_graphs_[i];
    588 }
    589 
    590 void InMemoryRunGraphResponse::AddPartitionGraph(
    591     const GraphDef& partition_graph) {
    592   partition_graphs_.push_back(partition_graph);
    593 }
    594 
    595 size_t OwnedProtoRunGraphResponse::num_recvs() const {
    596   return response_.recv_size();
    597 }
    598 
    599 const string& OwnedProtoRunGraphResponse::recv_key(size_t i) const {
    600   return response_.recv(i).name();
    601 }
    602 
    603 Status OwnedProtoRunGraphResponse::RecvValue(size_t i,
    604                                              TensorProto* out_tensor) {
    605   out_tensor->Swap(response_.mutable_recv(i)->mutable_tensor());
    606   return Status::OK();
    607 }
    608 
    609 Status OwnedProtoRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
    610   if (!ParseTensorProtoToTensor(response_.recv(i).tensor(), out_tensor)) {
    611     return errors::InvalidArgument("Invalid TensorProto for recv value ", i);
    612   } else {
    613     return Status::OK();
    614   }
    615 }
    616 
    617 void OwnedProtoRunGraphResponse::AddRecv(const string& key,
    618                                          const Tensor& value) {
    619   NamedTensorProto* recv = response_.add_recv();
    620   recv->set_name(key);
    621   TensorProto* value_proto = recv->mutable_tensor();
    622   value.AsProtoTensorContent(value_proto);
    623 }
    624 
    625 StepStats* OwnedProtoRunGraphResponse::mutable_step_stats() {
    626   return response_.mutable_step_stats();
    627 }
    628 
    629 CostGraphDef* OwnedProtoRunGraphResponse::mutable_cost_graph() {
    630   return response_.mutable_cost_graph();
    631 }
    632 
    633 errors::Code OwnedProtoRunGraphResponse::status_code() const {
    634   return response_.status_code();
    635 }
    636 
    637 const string& OwnedProtoRunGraphResponse::status_error_message() const {
    638   return response_.status_error_message();
    639 }
    640 
    641 void OwnedProtoRunGraphResponse::set_status(const Status& status) {
    642   response_.set_status_code(status.code());
    643   response_.set_status_error_message(status.error_message());
    644 }
    645 
    646 RunGraphResponse* OwnedProtoRunGraphResponse::get_proto() { return &response_; }
    647 
    648 size_t OwnedProtoRunGraphResponse::num_partition_graphs() const {
    649   return response_.partition_graph_size();
    650 }
    651 
    652 GraphDef* OwnedProtoRunGraphResponse::mutable_partition_graph(size_t i) {
    653   return response_.mutable_partition_graph(i);
    654 }
    655 
    656 void OwnedProtoRunGraphResponse::AddPartitionGraph(
    657     const GraphDef& partition_graph) {
    658   GraphDef* graph_def = response_.mutable_partition_graph()->Add();
    659   *graph_def = partition_graph;
    660 }
    661 
    662 NonOwnedProtoRunGraphResponse::NonOwnedProtoRunGraphResponse(
    663     RunGraphResponse* response)
    664     : response_(response) {}
    665 
    666 size_t NonOwnedProtoRunGraphResponse::num_recvs() const {
    667   return response_->recv_size();
    668 }
    669 
    670 const string& NonOwnedProtoRunGraphResponse::recv_key(size_t i) const {
    671   return response_->recv(i).name();
    672 }
    673 
    674 Status NonOwnedProtoRunGraphResponse::RecvValue(size_t i,
    675                                                 TensorProto* out_tensor) {
    676   out_tensor->Swap(response_->mutable_recv(i)->mutable_tensor());
    677   return Status::OK();
    678 }
    679 
    680 Status NonOwnedProtoRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
    681   if (!ParseTensorProtoToTensor(response_->recv(i).tensor(), out_tensor)) {
    682     return errors::InvalidArgument("Invalid TensorProto for recv value ", i);
    683   } else {
    684     return Status::OK();
    685   }
    686 }
    687 
    688 void NonOwnedProtoRunGraphResponse::AddRecv(const string& key,
    689                                             const Tensor& value) {
    690   NamedTensorProto* recv = response_->add_recv();
    691   recv->set_name(key);
    692   TensorProto* value_proto = recv->mutable_tensor();
    693   value.AsProtoTensorContent(value_proto);
    694 }
    695 
    696 StepStats* NonOwnedProtoRunGraphResponse::mutable_step_stats() {
    697   return response_->mutable_step_stats();
    698 }
    699 
    700 CostGraphDef* NonOwnedProtoRunGraphResponse::mutable_cost_graph() {
    701   return response_->mutable_cost_graph();
    702 }
    703 
    704 errors::Code NonOwnedProtoRunGraphResponse::status_code() const {
    705   return response_->status_code();
    706 }
    707 
    708 const string& NonOwnedProtoRunGraphResponse::status_error_message() const {
    709   return response_->status_error_message();
    710 }
    711 
    712 void NonOwnedProtoRunGraphResponse::set_status(const Status& status) {
    713   response_->set_status_code(status.code());
    714   response_->set_status_error_message(status.error_message());
    715 }
    716 
    717 RunGraphResponse* NonOwnedProtoRunGraphResponse::get_proto() {
    718   return response_;
    719 }
    720 
    721 size_t NonOwnedProtoRunGraphResponse::num_partition_graphs() const {
    722   return response_->partition_graph_size();
    723 }
    724 
    725 GraphDef* NonOwnedProtoRunGraphResponse::mutable_partition_graph(size_t i) {
    726   return response_->mutable_partition_graph(i);
    727 }
    728 
    729 void NonOwnedProtoRunGraphResponse::AddPartitionGraph(
    730     const GraphDef& partition_graph) {
    731   GraphDef* graph_def = response_->add_partition_graph();
    732   *graph_def = partition_graph;
    733 }
    734 
    735 MutableRunStepResponseWrapper::~MutableRunStepResponseWrapper() {}
    736 
    737 size_t InMemoryRunStepResponse::num_tensors() const { return tensors_.size(); }
    738 
    739 const string& InMemoryRunStepResponse::tensor_name(size_t i) const {
    740   return tensors_[i].first;
    741 }
    742 
    743 Status InMemoryRunStepResponse::TensorValue(size_t i,
    744                                             Tensor* out_tensor) const {
    745   *out_tensor = tensors_[i].second;
    746   return Status::OK();
    747 }
    748 
    749 const RunMetadata& InMemoryRunStepResponse::metadata() const {
    750   return metadata_;
    751 }
    752 
    753 Status InMemoryRunStepResponse::AddTensorFromRunGraphResponse(
    754     const string& name, MutableRunGraphResponseWrapper* wrapper, size_t i) {
    755   Tensor tensor;
    756   TF_RETURN_IF_ERROR(wrapper->RecvValue(i, &tensor));
    757   tensors_.emplace_back(name, tensor);
    758   return Status::OK();
    759 }
    760 
    761 RunMetadata* InMemoryRunStepResponse::mutable_metadata() { return &metadata_; }
    762 
    763 errors::Code InMemoryRunStepResponse::status_code() const {
    764   return status_.code();
    765 }
    766 
    767 const string& InMemoryRunStepResponse::status_error_message() const {
    768   return status_.error_message();
    769 }
    770 
    771 void InMemoryRunStepResponse::set_status(const Status& status) {
    772   status_ = status;
    773 }
    774 
    775 RunStepResponse* InMemoryRunStepResponse::get_proto() {
    776   LOG(FATAL) << "Cannot get a mutable protobuf for an InMemoryRunStepResponse";
    777   return nullptr;
    778 }
    779 
    780 size_t OwnedProtoRunStepResponse::num_tensors() const {
    781   return response_.tensor_size();
    782 }
    783 
    784 const string& OwnedProtoRunStepResponse::tensor_name(size_t i) const {
    785   return response_.tensor(i).name();
    786 }
    787 
    788 Status OwnedProtoRunStepResponse::TensorValue(size_t i,
    789                                               Tensor* out_tensor) const {
    790   if (!ParseTensorProtoToTensor(response_.tensor(i).tensor(), out_tensor)) {
    791     return errors::InvalidArgument("Invalid TensorProto for fetch value ", i);
    792   } else {
    793     return Status::OK();
    794   }
    795 }
    796 
    797 const RunMetadata& OwnedProtoRunStepResponse::metadata() const {
    798   return response_.metadata();
    799 }
    800 
    801 Status OwnedProtoRunStepResponse::AddTensorFromRunGraphResponse(
    802     const string& name, MutableRunGraphResponseWrapper* run_graph_response,
    803     size_t i) {
    804   NamedTensorProto* response_tensor = response_.add_tensor();
    805   response_tensor->set_name(name);
    806   return run_graph_response->RecvValue(i, response_tensor->mutable_tensor());
    807 }
    808 
    809 RunMetadata* OwnedProtoRunStepResponse::mutable_metadata() {
    810   return response_.mutable_metadata();
    811 }
    812 
    813 errors::Code OwnedProtoRunStepResponse::status_code() const {
    814   return response_.status_code();
    815 }
    816 
    817 const string& OwnedProtoRunStepResponse::status_error_message() const {
    818   return response_.status_error_message();
    819 }
    820 
    821 void OwnedProtoRunStepResponse::set_status(const Status& status) {
    822   response_.set_status_code(status.code());
    823   response_.set_status_error_message(status.error_message());
    824 }
    825 
    826 RunStepResponse* OwnedProtoRunStepResponse::get_proto() { return &response_; }
    827 
    828 NonOwnedProtoRunStepResponse::NonOwnedProtoRunStepResponse(
    829     RunStepResponse* response)
    830     : response_(response) {}
    831 
    832 size_t NonOwnedProtoRunStepResponse::num_tensors() const {
    833   return response_->tensor_size();
    834 }
    835 
    836 const string& NonOwnedProtoRunStepResponse::tensor_name(size_t i) const {
    837   return response_->tensor(i).name();
    838 }
    839 
    840 Status NonOwnedProtoRunStepResponse::TensorValue(size_t i,
    841                                                  Tensor* out_tensor) const {
    842   if (!ParseTensorProtoToTensor(response_->tensor(i).tensor(), out_tensor)) {
    843     return errors::InvalidArgument("Invalid TensorProto for fetch value ", i);
    844   } else {
    845     return Status::OK();
    846   }
    847 }
    848 
    849 const RunMetadata& NonOwnedProtoRunStepResponse::metadata() const {
    850   return response_->metadata();
    851 }
    852 
    853 Status NonOwnedProtoRunStepResponse::AddTensorFromRunGraphResponse(
    854     const string& name, MutableRunGraphResponseWrapper* run_graph_response,
    855     size_t i) {
    856   NamedTensorProto* response_tensor = response_->add_tensor();
    857   response_tensor->set_name(name);
    858   return run_graph_response->RecvValue(i, response_tensor->mutable_tensor());
    859 }
    860 
    861 RunMetadata* NonOwnedProtoRunStepResponse::mutable_metadata() {
    862   return response_->mutable_metadata();
    863 }
    864 
    865 errors::Code NonOwnedProtoRunStepResponse::status_code() const {
    866   return response_->status_code();
    867 }
    868 
    869 const string& NonOwnedProtoRunStepResponse::status_error_message() const {
    870   return response_->status_error_message();
    871 }
    872 
    873 void NonOwnedProtoRunStepResponse::set_status(const Status& status) {
    874   response_->set_status_code(status.code());
    875   response_->set_status_error_message(status.error_message());
    876 }
    877 
    878 RunStepResponse* NonOwnedProtoRunStepResponse::get_proto() { return response_; }
    879 
    880 }  // namespace tensorflow
    881