1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/xla/service/while_loop_invariant_code_motion.h"
17 #include "absl/algorithm/container.h"
18 #include "absl/container/flat_hash_map.h"
19 #include "absl/container/flat_hash_set.h"
20 #include "absl/container/inlined_vector.h"
21 #include "tensorflow/compiler/xla/service/tuple_util.h"
22 #include "tensorflow/compiler/xla/service/while_loop_analysis.h"
23 #include "tensorflow/compiler/xla/service/while_util.h"
24 #include "tensorflow/compiler/xla/shape_util.h"
25 #include "tensorflow/compiler/xla/util.h"
26
27 namespace xla {
28
29 using absl::flat_hash_map;
30 using absl::flat_hash_set;
31 using absl::InlinedVector;
32
33 // Copies `to_hoist` to the computation containing `while_instr`, hoisting its
34 // operands as needed. All of its transitive operands are expected to be either
35 // in `hoisted_instructions` or `unhoisted_invariant_instructions`. This
36 // function hoists the operands in `unhoisted_invariant_instructions` and moves
37 // them into `hoisted_instructions`.
CreateLoopInvariantCopy(flat_hash_map<HloInstruction *,HloInstruction * > * hoisted_instructions,flat_hash_set<HloInstruction * > * unhoisted_invariant_instructions,HloInstruction * while_instr,HloInstruction * to_hoist)38 static void CreateLoopInvariantCopy(
39 flat_hash_map<HloInstruction*, HloInstruction*>* hoisted_instructions,
40 flat_hash_set<HloInstruction*>* unhoisted_invariant_instructions,
41 HloInstruction* while_instr, HloInstruction* to_hoist) {
42 HloComputation* parent_of_while = while_instr->parent();
43 HloComputation* while_body = while_instr->while_body();
44
45 struct DFSFrame {
46 HloInstruction* instruction;
47 int64 operand_index;
48 };
49
50 InlinedVector<DFSFrame, 8> dfs_stack;
51 dfs_stack.push_back({to_hoist, 0});
52
53 HloInstruction* while_body_param = while_body->parameter_instruction(0);
54 HloInstruction* while_operand = while_instr->mutable_operand(0);
55
56 do {
57 DFSFrame* frame = &dfs_stack.back();
58 if (frame->operand_index == frame->instruction->operand_count()) {
59 HloInstruction* old_instruction = frame->instruction;
60
61 // All of the operands for old_instruction have been cloned, so it is
62 // time to clone old_instruction itself.
63
64 auto get_new_operand = [&](HloInstruction* old_operand) {
65 return old_operand == while_body_param
66 ? while_operand
67 : FindOrDie(*hoisted_instructions, old_operand);
68 };
69
70 InlinedVector<HloInstruction*, 4> new_operands;
71 absl::c_transform(old_instruction->operands(),
72 std::back_inserter(new_operands), get_new_operand);
73
74 HloInstruction* new_instruction =
75 parent_of_while->AddInstruction(old_instruction->CloneWithNewOperands(
76 old_instruction->shape(), new_operands));
77
78 InsertOrDie(hoisted_instructions, old_instruction, new_instruction);
79
80 // Approximately half of the instructions that would normally be present
81 // in unhoisted_invariant_instructions are constants. We save a bit of
82 // compile time by not putting these in the hashtable.
83 CHECK_EQ(unhoisted_invariant_instructions->erase(old_instruction),
84 to_hoist != old_instruction &&
85 old_instruction->opcode() != HloOpcode::kConstant);
86 dfs_stack.pop_back();
87 continue;
88 }
89
90 HloInstruction* next_operand =
91 frame->instruction->mutable_operand(frame->operand_index++);
92 if (hoisted_instructions->contains(next_operand) ||
93 next_operand == while_body_param) {
94 continue;
95 }
96
97 dfs_stack.push_back({next_operand, 0});
98 } while (!dfs_stack.empty());
99 }
100
101 // Returns true if `instruction` is worth hoisting only if it lets us hoist some
102 // instruction using it. The rationale is that hoisting these instructions will
103 // prevent simplification and fusion in the while body.
NotWorthHoistingIndividually(const HloInstruction & instruction)104 bool WhileLoopInvariantCodeMotion::NotWorthHoistingIndividually(
105 const HloInstruction& instruction) {
106 switch (instruction.opcode()) {
107 default:
108 return false;
109
110 case HloOpcode::kConstant:
111 return !hoist_constants_;
112
113 case HloOpcode::kBitcast:
114 case HloOpcode::kBroadcast:
115 case HloOpcode::kIota:
116 case HloOpcode::kReshape:
117 case HloOpcode::kReverse:
118 case HloOpcode::kSlice:
119 case HloOpcode::kTranspose:
120 case HloOpcode::kTuple:
121 return true;
122 }
123 }
124
125 StatusOr<bool>
TryHoistingInvariantInstructionsFromWhileBody(HloInstruction * while_instr)126 WhileLoopInvariantCodeMotion::TryHoistingInvariantInstructionsFromWhileBody(
127 HloInstruction* while_instr) {
128 auto print_no_metadata = HloPrintOptions{}.set_print_metadata(false);
129
130 if (!while_instr->shape().IsTuple()) {
131 // This restriction leaves one interesting pattern on the table:
132 //
133 // while_body(f32[1024, 1024] %param) {
134 // %value = expensive_op(%param)
135 // outfeed(%value)
136 // ROOT = %param
137 // }
138 //
139 // If we see that pattern in the while, instead of generalizing this
140 // algorithm to work with non-tuples, we should instead add a pass that
141 // canonicalizes while loops like the above to use a tuple state.
142 return false;
143 }
144
145 string while_instr_name = while_instr->ToString(print_no_metadata);
146 VLOG(2) << "Trying to hoist from " << while_instr_name;
147
148 auto maybe_upper_bound = ComputeWhileLoopTripCountUpperBound(while_instr);
149 if (maybe_upper_bound && *maybe_upper_bound <= 1) {
150 VLOG(2) << "Loop has a trip count of at most 1, skipping.";
151 return false;
152 }
153
154 HloComputation* while_body = while_instr->while_body();
155
156 // Maps instructions in the while body to instructions hoisted outside the
157 // while that compute the same value.
158 flat_hash_map<HloInstruction*, HloInstruction*> hoisted_instructions;
159
160 // Contains instructions that can be legally hoisted, but were deemed to be
161 // unprofitable to be hoisted alone by NotWorthHoistingIndividually. When we
162 // hoist an instruction in this set, we move it from
163 // unhoisted_invariant_instructions to hoisted_instructions.
164 flat_hash_set<HloInstruction*> unhoisted_invariant_instructions;
165
166 // Invariant GTE's axiomatically satisfy the constraints for
167 // unhoisted_invariant_instructions -- they can be legally hoisted, but there
168 // is no benefit to hoisting them unless something that uses it is also
169 // hoisted.
170 for (auto* instr : WhileUtil::GetInvariantGTEsForWhileBody(*while_body)) {
171 if (instr->shape().IsArray()) {
172 // TODO(b/79147885): We should try to generalize this to tuples for
173 // uniformity's sake, if nothing else.
174 InsertOrDie(&unhoisted_invariant_instructions, instr);
175 }
176 }
177
178 if (unhoisted_invariant_instructions.empty() && !hoist_constants_) {
179 // There are no obviously loop invariant elements in the state being
180 // threaded through the while loop so give up. In theory this precondition
181 // is too strong -- we could have code that e.g. permutes the elements in
182 // the while state but uses a select to pick the same value on every
183 // iteration.
184 //
185 // If we were asked to hoist constants, we need to scan the while body for
186 // constants even if we didn't find any loop invariant values in the while
187 // state tuple.
188 return false;
189 }
190
191 // LICM in the presence of domain instructions is complex, bail.
192 for (auto* instruction : while_body->MakeInstructionPostOrder()) {
193 if (instruction->opcode() == HloOpcode::kDomain) {
194 return false;
195 }
196 }
197
198 // instructions_to_replace[i] is hoisted into a loop invariant instruction
199 // replacement_instructions[i].
200 std::vector<HloInstruction*> instructions_to_replace;
201 std::vector<HloInstruction*> replacement_instructions;
202
203 for (auto* instruction : while_body->MakeInstructionPostOrder()) {
204 if (instruction->HasSideEffect() ||
205 instruction->opcode() == HloOpcode::kParameter ||
206 !instruction->control_predecessors().empty() ||
207 !instruction->control_successors().empty()) {
208 continue;
209 }
210
211 if (!hoist_size_inflating_ops_) {
212 // Check that hoisting the instruction doesn't cause a significant memory
213 // blow-up. LICM extends the live-range of the output of the hoisted
214 // instruction to be the entire while loop, which may be problematic on
215 // platforms where memory is limited. This can be especially harmful if
216 // the instruction has a significantly larger output than its input, e.g.
217 // kIota, kBroadcast or kConstant.
218 int64 input_size = 0, output_size = 0;
219
220 for (auto* operand : instruction->operands()) {
221 ShapeUtil::ForEachSubshape(
222 operand->shape(),
223 [&input_size](const Shape& subshape, const ShapeIndex& /*index*/) {
224 if (subshape.IsArray()) {
225 input_size += ShapeUtil::ByteSizeOfElements(subshape);
226 }
227 });
228 }
229 ShapeUtil::ForEachSubshape(
230 instruction->shape(),
231 [&output_size](const Shape& subshape, const ShapeIndex& /*index*/) {
232 if (subshape.IsArray()) {
233 output_size += ShapeUtil::ByteSizeOfElements(subshape);
234 }
235 });
236
237 if (output_size > input_size) {
238 continue;
239 }
240 }
241
242 auto is_invariant = [&](HloInstruction* op) {
243 return hoisted_instructions.find(op) != hoisted_instructions.end() ||
244 unhoisted_invariant_instructions.contains(op) ||
245 op->opcode() == HloOpcode::kConstant;
246 };
247
248 if (!absl::c_all_of(instruction->operands(), is_invariant)) {
249 continue;
250 }
251
252 if (NotWorthHoistingIndividually(*instruction)) {
253 VLOG(2) << "Adding " << instruction->ToString(print_no_metadata)
254 << " to unhoisted invariant set.";
255 // Approximately half of the instructions that reach this point are
256 // constants. We save a bit of compile time by not putting these in the
257 // hashtable.
258 if (instruction->opcode() != HloOpcode::kConstant) {
259 InsertOrDie(&unhoisted_invariant_instructions, instruction);
260 }
261 continue;
262 }
263
264 VLOG(2) << "Hoisting " << instruction->ToString(print_no_metadata);
265
266 CreateLoopInvariantCopy(&hoisted_instructions,
267 &unhoisted_invariant_instructions, while_instr,
268 instruction);
269
270 instructions_to_replace.push_back(instruction);
271 replacement_instructions.push_back(
272 FindOrDie(hoisted_instructions, instruction));
273 }
274
275 if (instructions_to_replace.empty()) {
276 return false;
277 }
278
279 TF_ASSIGN_OR_RETURN(
280 WhileUtil::MakeInstructionsLiveInResult live_in_instructions_result,
281 WhileUtil::MakeInstructionsLiveIn(while_instr, replacement_instructions));
282
283 HloComputation* new_while_body =
284 live_in_instructions_result.new_while_instr->while_body();
285
286 for (int i = 0; i < instructions_to_replace.size(); i++) {
287 HloInstruction* instruction_to_replace_in_new_while =
288 FindOrDie(live_in_instructions_result.while_body_instruction_map,
289 instructions_to_replace[i]);
290 TF_RETURN_IF_ERROR(new_while_body->ReplaceInstruction(
291 instruction_to_replace_in_new_while,
292 live_in_instructions_result.while_body_live_in_values[i]));
293 }
294
295 VLOG(1) << "Hoisted " << instructions_to_replace.size()
296 << " instructions from " << while_instr_name;
297
298 return true;
299 }
300
Run(HloModule * module)301 StatusOr<bool> WhileLoopInvariantCodeMotion::Run(HloModule* module) {
302 VLOG(2) << "HLO module before WhileLoopConstantSinking:";
303 XLA_VLOG_LINES(2, module->ToString());
304
305 bool changed = false;
306 std::vector<HloInstruction*> while_instrs;
307 for (auto* comp : module->computations()) {
308 absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
309 [](const HloInstruction* instr) {
310 return instr->opcode() == HloOpcode::kWhile;
311 });
312 }
313
314 for (HloInstruction* while_instr : while_instrs) {
315 // Right now we only hoist computations from the while body, but
316 // TryHoistingInvariantInstructionsFromWhileBody can be generalized to
317 // optimize the condition computation too, if needed.
318 //
319 // The transform we do here is a pessmization for while loops that execute
320 // zero times*, but at this time we expect those to be rare. If this
321 // becomes a problem we can consider using the conditional HLO to avoid
322 // doing extra work for while loops with zero trip count.
323 //
324 // * We delete while loops that have a zero trip count, so this would have
325 // to be a while loop with a somewhat opaque condition expression.
326
327 TF_ASSIGN_OR_RETURN(
328 bool result,
329 TryHoistingInvariantInstructionsFromWhileBody(while_instr));
330 changed |= result;
331 }
332
333 if (changed) {
334 VLOG(2) << "HLO module after WhileLoopConstantSinking:";
335 XLA_VLOG_LINES(2, module->ToString());
336 } else {
337 VLOG(2) << "HLO module unchanged after WhileLoopConstantSinking";
338 }
339
340 return changed;
341 }
342 } // namespace xla
343