1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_H_
17 #define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_H_
18 
19 #include <string>
20 #include <vector>
21 
22 #include "tensorflow/core/distributed_runtime/eager/eager_client.h"
23 #include "tensorflow/core/distributed_runtime/worker_interface.h"
24 #include "tensorflow/core/framework/device_attributes.pb.h"  // for DeviceLocality
25 #include "tensorflow/core/lib/core/status.h"
26 
27 namespace tensorflow {
28 typedef std::function<void(const Status&)> StatusCallback;
29 
30 class ChannelCache;
31 class StepStats;
32 
33 class WorkerCacheInterface {
34  public:
~WorkerCacheInterface()35   virtual ~WorkerCacheInterface() {}
36 
37   // Updates *workers with strings naming the remote worker tasks to
38   // which open channels have been established.
39   virtual void ListWorkers(std::vector<string>* workers) const = 0;
40   virtual void ListWorkersInJob(const string& job_name,
41                                 std::vector<string>* workers) const = 0;
42 
43   // If "target" names a remote task for which an RPC channel exists
44   // or can be constructed, returns a pointer to a WorkerInterface object
45   // wrapping that channel. The returned value must be destroyed by
46   // calling `this->ReleaseWorker(target, ret)`
47   virtual WorkerInterface* GetOrCreateWorker(const string& target) = 0;
48 
49   // Release a worker previously returned by this->GetOrCreateWorker(target).
50   //
51   // TODO(jeff,sanjay): Consider moving target into WorkerInterface.
52   // TODO(jeff,sanjay): Unify all worker-cache impls and factor out a
53   //                    per-rpc-subsystem WorkerInterface creator.
ReleaseWorker(const string & target,WorkerInterface * worker)54   virtual void ReleaseWorker(const string& target, WorkerInterface* worker) {
55     // Subclasses may override to reuse worker objects.
56     delete worker;
57   }
58 
59   // Set *locality with the DeviceLocality of the specified remote device
60   // within its local environment.  Returns true if *locality
61   // was set, using only locally cached data.  Returns false
62   // if status data for that device was not available.  Never blocks.
63   virtual bool GetDeviceLocalityNonBlocking(const string& device,
64                                             DeviceLocality* locality) = 0;
65 
66   // Set *locality with the DeviceLocality of the specified remote device
67   // within its local environment.  Callback gets Status::OK if *locality
68   // was set.
69   virtual void GetDeviceLocalityAsync(const string& device,
70                                       DeviceLocality* locality,
71                                       StatusCallback done) = 0;
72 
73   // Build and return a EagerClientCache object wrapping that channel.
74   virtual Status GetEagerClientCache(
75       std::unique_ptr<eager::EagerClientCache>* eager_client_cache) = 0;
76 
77   // Start/stop logging activity.
SetLogging(bool active)78   virtual void SetLogging(bool active) {}
79 
80   // Discard any saved log data.
ClearLogs()81   virtual void ClearLogs() {}
82 
83   // Return logs for the identified step in *ss.  Any returned data will no
84   // longer be stored.
RetrieveLogs(int64 step_id,StepStats * ss)85   virtual bool RetrieveLogs(int64 step_id, StepStats* ss) { return false; }
86 };
87 }  // namespace tensorflow
88 #endif  // TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_WORKER_CACHE_H_
89