Home | History | Annotate | Download | only in distributed_runtime
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_WRAPPER_H_
     17 #define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_WRAPPER_H_
     18 
     19 #include <string>
     20 #include <vector>
     21 
     22 #include "tensorflow/core/distributed_runtime/worker_cache.h"
     23 
     24 namespace tensorflow {
     25 
     26 class WorkerCacheWrapper : public WorkerCacheInterface {
     27  public:
     28   WorkerCacheWrapper(WorkerCacheInterface* wrapped) : wrapped_(wrapped) {}
     29 
     30   // Updates *workers with strings naming the remote worker tasks to
     31   // which open channels have been established.
     32   virtual void ListWorkers(std::vector<string>* workers) const {
     33     return wrapped_->ListWorkers(workers);
     34   }
     35 
     36   // If "target" names a remote task for which an RPC channel exists
     37   // or can be constructed, returns a pointer to a WorkerInterface object
     38   // wrapping that channel. The returned value must be destroyed by
     39   // calling `this->ReleaseWorker(target, ret)`
     40   // TODO(mrry): rename this to GetOrCreateWorker() or something that
     41   // makes it more obvious that this method returns a potentially
     42   // shared object.
     43   virtual WorkerInterface* CreateWorker(const string& target) {
     44     return wrapped_->CreateWorker(target);
     45   }
     46 
     47   // Release a worker previously returned by this->CreateWorker(target).
     48   //
     49   // TODO(jeff,sanjay): Consider moving target into WorkerInterface.
     50   // TODO(jeff,sanjay): Unify all worker-cache impls and factor out a
     51   //                    per-rpc-subsystem WorkerInterface creator.
     52   virtual void ReleaseWorker(const string& target, WorkerInterface* worker) {
     53     return wrapped_->ReleaseWorker(target, worker);
     54   }
     55 
     56   // Set *locality with the DeviceLocality of the specified remote device
     57   // within its local environment.  Returns true if *locality
     58   // was set, using only locally cached data.  Returns false
     59   // if status data for that device was not available.  Never blocks.
     60   virtual bool GetDeviceLocalityNonBlocking(const string& device,
     61                                             DeviceLocality* locality) {
     62     return wrapped_->GetDeviceLocalityNonBlocking(device, locality);
     63   }
     64 
     65   // Set *locality with the DeviceLocality of the specified remote device
     66   // within its local environment.  Callback gets Status::OK if *locality
     67   // was set.
     68   virtual void GetDeviceLocalityAsync(const string& device,
     69                                       DeviceLocality* locality,
     70                                       StatusCallback done) {
     71     return wrapped_->GetDeviceLocalityAsync(device, locality, std::move(done));
     72   }
     73 
     74   // Start/stop logging activity.
     75   virtual void SetLogging(bool active) { wrapped_->SetLogging(active); }
     76 
     77   // Discard any saved log data.
     78   virtual void ClearLogs() { wrapped_->ClearLogs(); }
     79 
     80   // Return logs for the identified step in *ss.  Any returned data will no
     81   // longer be stored.
     82   virtual bool RetrieveLogs(int64 step_id, StepStats* ss) {
     83     return wrapped_->RetrieveLogs(step_id, ss);
     84   }
     85 
     86  private:
     87   WorkerCacheInterface* wrapped_;  // Not owned.
     88 };
     89 }  // namespace tensorflow
     90 #endif  // TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_WRAPPER_H_
     91