1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_THUNK_SCHEDULE_H_
17 #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_THUNK_SCHEDULE_H_
18 
19 #include <list>
20 #include <memory>
21 #include <unordered_map>
22 #include <vector>
23 
24 #include "absl/container/flat_hash_map.h"
25 #include "absl/container/flat_hash_set.h"
26 #include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
27 #include "tensorflow/compiler/xla/service/gpu/thunk.h"
28 #include "tensorflow/compiler/xla/service/hlo_instruction.h"
29 #include "tensorflow/compiler/xla/types.h"
30 
31 namespace xla {
32 namespace gpu {
33 
34 // Encapsulates in which order and on which streams the thunks are executed. A
35 // schedule contains
36 //
37 // 1. A stream assignment indicating which stream each thunk is executed on.
38 //
39 // 2. A total order of all thunks. If A is ordered before B and they are
40 // assigned to the same stream, then A completes before B starts. If A is
41 // ordered before B and they are on different streams, their actual execution
42 // order is not determined.
43 //
44 // 3. A set of dependency edges. If A and B are scheduled on different streams
45 // and A has to complete before B starts (e.g. A produces an input of B), then B
46 // "depends" on A.
47 class ThunkSchedule {
48  public:
49   ThunkSchedule(std::unique_ptr<ThunkSequence> thunks,
50                 std::unique_ptr<StreamAssignment> stream_assignment,
51                 const std::vector<HloInstruction*>& hlo_total_order);
52 
53   // Returns the total order of executing all the thunks.
TotalOrder()54   const std::vector<Thunk*>& TotalOrder() const { return thunk_total_order_; }
55 
56   // Thunks that `thunk` depends on.
57   const std::list<const Thunk*>& DependsOn(const Thunk* thunk) const;
58   // Whether `thunk` is depended by another thunk.
Depended(const Thunk * thunk)59   bool Depended(const Thunk* thunk) const {
60     return depended_by_.contains(thunk);
61   }
62 
63   // Delegates to StreamAssignment.
StreamCount()64   int StreamCount() const { return stream_assignment_->StreamCount(); }
StreamNumberForHlo(const HloInstruction & hlo)65   int StreamNumberForHlo(const HloInstruction& hlo) const {
66     return stream_assignment_->StreamNumberForHlo(hlo);
67   }
68 
69   string ToString() const;
70 
71  private:
72   void RemoveRedundantDependencyEdges();
73 
74   // Adds `operand` and its transitive operands to the dependency list of
75   // `thunk`.
76   //
77   // Precondition: `operand` is a non-trivial (i.e. excluding
78   // thunk.hlo_instruction() itself) transitive operand of
79   // thunk.hlo_instruction().
80   void AddDependenciesOnTransitiveOperands(
81       const Thunk& thunk, const HloInstruction& operand,
82       const absl::flat_hash_map<const HloInstruction*, Thunk*>& hlo_to_thunk);
83 
84   std::unique_ptr<ThunkSequence> thunks_;
85   std::vector<Thunk*> thunk_total_order_;
86 
87   absl::flat_hash_map<const Thunk*, std::list<const Thunk*>> depends_on_;
88   absl::flat_hash_set<const Thunk*> depended_by_;
89   std::list<const Thunk*> empty_thunk_list_;
90 
91   std::unique_ptr<StreamAssignment> stream_assignment_;
92 };
93 
94 }  // namespace gpu
95 }  // namespace xla
96 
97 #endif  // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_THUNK_SCHEDULE_H_
98