1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
30
31 static const bool debug = false;
32
33 /**
34 * Variable-based copy propagation
35 *
36 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
37 * However, there are cases, especially when dealing with indirects, where SSA
38 * won't help you. This pass is for those times. Specifically, it handles
39 * the following things that the rest of NIR can't:
40 *
41 * 1) Copy-propagation on variables that have indirect access. This includes
42 * propagating from indirect stores into indirect loads.
43 *
44 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
45 * to do this because it isn't aware of variable writes that may alias the
46 * value and make the former load invalid.
47 *
48 * This pass uses an intermediate solution between being local / "per-block"
49 * and a complete data-flow analysis. It follows the control flow graph, and
50 * propagate the available copy information forward, invalidating data at each
51 * cf_node.
52 *
53 * Removal of dead writes to variables is handled by another pass.
54 */
55
56 struct vars_written {
57 nir_variable_mode modes;
58
59 /* Key is deref and value is the uintptr_t with the write mask. */
60 struct hash_table *derefs;
61 };
62
63 struct value {
64 bool is_ssa;
65 union {
66 struct {
67 nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS];
68 uint8_t component[NIR_MAX_VEC_COMPONENTS];
69 } ssa;
70 nir_deref_instr *deref;
71 };
72 };
73
74 static void
value_set_ssa_components(struct value * value,nir_ssa_def * def,unsigned num_components)75 value_set_ssa_components(struct value *value, nir_ssa_def *def,
76 unsigned num_components)
77 {
78 if (!value->is_ssa)
79 memset(&value->ssa, 0, sizeof(value->ssa));
80 value->is_ssa = true;
81 for (unsigned i = 0; i < num_components; i++) {
82 value->ssa.def[i] = def;
83 value->ssa.component[i] = i;
84 }
85 }
86
87 struct copy_entry {
88 struct value src;
89
90 nir_deref_instr *dst;
91 };
92
93 struct copy_prop_var_state {
94 nir_function_impl *impl;
95
96 void *mem_ctx;
97 void *lin_ctx;
98
99 /* Maps nodes to vars_written. Used to invalidate copy entries when
100 * visiting each node.
101 */
102 struct hash_table *vars_written_map;
103
104 bool progress;
105 };
106
107 static bool
value_equals_store_src(struct value * value,nir_intrinsic_instr * intrin)108 value_equals_store_src(struct value *value, nir_intrinsic_instr *intrin)
109 {
110 assert(intrin->intrinsic == nir_intrinsic_store_deref);
111 uintptr_t write_mask = nir_intrinsic_write_mask(intrin);
112
113 for (unsigned i = 0; i < intrin->num_components; i++) {
114 if ((write_mask & (1 << i)) &&
115 (value->ssa.def[i] != intrin->src[1].ssa ||
116 value->ssa.component[i] != i))
117 return false;
118 }
119
120 return true;
121 }
122
123 static struct vars_written *
create_vars_written(struct copy_prop_var_state * state)124 create_vars_written(struct copy_prop_var_state *state)
125 {
126 struct vars_written *written =
127 linear_zalloc_child(state->lin_ctx, sizeof(struct vars_written));
128 written->derefs = _mesa_pointer_hash_table_create(state->mem_ctx);
129 return written;
130 }
131
132 static void
gather_vars_written(struct copy_prop_var_state * state,struct vars_written * written,nir_cf_node * cf_node)133 gather_vars_written(struct copy_prop_var_state *state,
134 struct vars_written *written,
135 nir_cf_node *cf_node)
136 {
137 struct vars_written *new_written = NULL;
138
139 switch (cf_node->type) {
140 case nir_cf_node_function: {
141 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
142 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
143 gather_vars_written(state, NULL, cf_node);
144 break;
145 }
146
147 case nir_cf_node_block: {
148 if (!written)
149 break;
150
151 nir_block *block = nir_cf_node_as_block(cf_node);
152 nir_foreach_instr(instr, block) {
153 if (instr->type == nir_instr_type_call) {
154 written->modes |= nir_var_shader_out |
155 nir_var_shader_temp |
156 nir_var_function_temp |
157 nir_var_mem_ssbo |
158 nir_var_mem_shared |
159 nir_var_mem_global;
160 continue;
161 }
162
163 if (instr->type != nir_instr_type_intrinsic)
164 continue;
165
166 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
167 switch (intrin->intrinsic) {
168 case nir_intrinsic_control_barrier:
169 case nir_intrinsic_group_memory_barrier:
170 case nir_intrinsic_memory_barrier:
171 written->modes |= nir_var_shader_out |
172 nir_var_mem_ssbo |
173 nir_var_mem_shared |
174 nir_var_mem_global;
175 break;
176
177 case nir_intrinsic_scoped_barrier:
178 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
179 written->modes |= nir_intrinsic_memory_modes(intrin);
180 break;
181
182 case nir_intrinsic_emit_vertex:
183 case nir_intrinsic_emit_vertex_with_counter:
184 written->modes = nir_var_shader_out;
185 break;
186
187 case nir_intrinsic_trace_ray:
188 case nir_intrinsic_execute_callable: {
189 nir_deref_instr *payload =
190 nir_src_as_deref(*nir_get_shader_call_payload_src(intrin));
191
192 nir_component_mask_t mask =
193 (nir_component_mask_t) BITFIELD_MASK(glsl_get_vector_elements(payload->type));
194
195 struct hash_entry *ht_entry =
196 _mesa_hash_table_search(written->derefs, payload);
197 if (ht_entry) {
198 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
199 } else {
200 _mesa_hash_table_insert(written->derefs, payload,
201 (void *)(uintptr_t)mask);
202 }
203 break;
204 }
205
206 case nir_intrinsic_report_ray_intersection:
207 written->modes |= nir_var_mem_ssbo |
208 nir_var_mem_global |
209 nir_var_shader_call_data |
210 nir_var_ray_hit_attrib;
211 break;
212
213 case nir_intrinsic_ignore_ray_intersection:
214 case nir_intrinsic_terminate_ray:
215 written->modes |= nir_var_mem_ssbo |
216 nir_var_mem_global |
217 nir_var_shader_call_data;
218 break;
219
220 case nir_intrinsic_deref_atomic_add:
221 case nir_intrinsic_deref_atomic_fadd:
222 case nir_intrinsic_deref_atomic_imin:
223 case nir_intrinsic_deref_atomic_umin:
224 case nir_intrinsic_deref_atomic_fmin:
225 case nir_intrinsic_deref_atomic_imax:
226 case nir_intrinsic_deref_atomic_umax:
227 case nir_intrinsic_deref_atomic_fmax:
228 case nir_intrinsic_deref_atomic_and:
229 case nir_intrinsic_deref_atomic_or:
230 case nir_intrinsic_deref_atomic_xor:
231 case nir_intrinsic_deref_atomic_exchange:
232 case nir_intrinsic_deref_atomic_comp_swap:
233 case nir_intrinsic_deref_atomic_fcomp_swap:
234 case nir_intrinsic_store_deref:
235 case nir_intrinsic_copy_deref:
236 case nir_intrinsic_memcpy_deref: {
237 /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
238 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
239
240 uintptr_t mask = intrin->intrinsic == nir_intrinsic_store_deref ?
241 nir_intrinsic_write_mask(intrin) : (1 << glsl_get_vector_elements(dst->type)) - 1;
242
243 struct hash_entry *ht_entry = _mesa_hash_table_search(written->derefs, dst);
244 if (ht_entry)
245 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
246 else
247 _mesa_hash_table_insert(written->derefs, dst, (void *)mask);
248
249 break;
250 }
251
252 default:
253 break;
254 }
255 }
256
257 break;
258 }
259
260 case nir_cf_node_if: {
261 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
262
263 new_written = create_vars_written(state);
264
265 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
266 gather_vars_written(state, new_written, cf_node);
267
268 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
269 gather_vars_written(state, new_written, cf_node);
270
271 break;
272 }
273
274 case nir_cf_node_loop: {
275 nir_loop *loop = nir_cf_node_as_loop(cf_node);
276
277 new_written = create_vars_written(state);
278
279 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
280 gather_vars_written(state, new_written, cf_node);
281
282 break;
283 }
284
285 default:
286 unreachable("Invalid CF node type");
287 }
288
289 if (new_written) {
290 /* Merge new information to the parent control flow node. */
291 if (written) {
292 written->modes |= new_written->modes;
293 hash_table_foreach(new_written->derefs, new_entry) {
294 struct hash_entry *old_entry =
295 _mesa_hash_table_search_pre_hashed(written->derefs, new_entry->hash,
296 new_entry->key);
297 if (old_entry) {
298 nir_component_mask_t merged = (uintptr_t) new_entry->data |
299 (uintptr_t) old_entry->data;
300 old_entry->data = (void *) ((uintptr_t) merged);
301 } else {
302 _mesa_hash_table_insert_pre_hashed(written->derefs, new_entry->hash,
303 new_entry->key, new_entry->data);
304 }
305 }
306 }
307 _mesa_hash_table_insert(state->vars_written_map, cf_node, new_written);
308 }
309 }
310
311 static struct copy_entry *
copy_entry_create(struct util_dynarray * copies,nir_deref_instr * dst_deref)312 copy_entry_create(struct util_dynarray *copies,
313 nir_deref_instr *dst_deref)
314 {
315 struct copy_entry new_entry = {
316 .dst = dst_deref,
317 };
318 util_dynarray_append(copies, struct copy_entry, new_entry);
319 return util_dynarray_top_ptr(copies, struct copy_entry);
320 }
321
322 /* Remove copy entry by swapping it with the last element and reducing the
323 * size. If used inside an iteration on copies, it must be a reverse
324 * (backwards) iteration. It is safe to use in those cases because the swap
325 * will not affect the rest of the iteration.
326 */
327 static void
copy_entry_remove(struct util_dynarray * copies,struct copy_entry * entry)328 copy_entry_remove(struct util_dynarray *copies,
329 struct copy_entry *entry)
330 {
331 const struct copy_entry *src =
332 util_dynarray_pop_ptr(copies, struct copy_entry);
333 if (src != entry)
334 *entry = *src;
335 }
336
337 static bool
is_array_deref_of_vector(nir_deref_instr * deref)338 is_array_deref_of_vector(nir_deref_instr *deref)
339 {
340 if (deref->deref_type != nir_deref_type_array)
341 return false;
342 nir_deref_instr *parent = nir_deref_instr_parent(deref);
343 return glsl_type_is_vector(parent->type);
344 }
345
346 static struct copy_entry *
lookup_entry_for_deref(struct util_dynarray * copies,nir_deref_instr * deref,nir_deref_compare_result allowed_comparisons)347 lookup_entry_for_deref(struct util_dynarray *copies,
348 nir_deref_instr *deref,
349 nir_deref_compare_result allowed_comparisons)
350 {
351 struct copy_entry *entry = NULL;
352 util_dynarray_foreach(copies, struct copy_entry, iter) {
353 nir_deref_compare_result result = nir_compare_derefs(iter->dst, deref);
354 if (result & allowed_comparisons) {
355 entry = iter;
356 if (result & nir_derefs_equal_bit)
357 break;
358 /* Keep looking in case we have an equal match later in the array. */
359 }
360 }
361 return entry;
362 }
363
364 static struct copy_entry *
lookup_entry_and_kill_aliases(struct util_dynarray * copies,nir_deref_instr * deref,unsigned write_mask)365 lookup_entry_and_kill_aliases(struct util_dynarray *copies,
366 nir_deref_instr *deref,
367 unsigned write_mask)
368 {
369 /* TODO: Take into account the write_mask. */
370
371 nir_deref_instr *dst_match = NULL;
372 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
373 if (!iter->src.is_ssa) {
374 /* If this write aliases the source of some entry, get rid of it */
375 if (nir_compare_derefs(iter->src.deref, deref) & nir_derefs_may_alias_bit) {
376 copy_entry_remove(copies, iter);
377 continue;
378 }
379 }
380
381 nir_deref_compare_result comp = nir_compare_derefs(iter->dst, deref);
382
383 if (comp & nir_derefs_equal_bit) {
384 /* Removing entries invalidate previous iter pointers, so we'll
385 * collect the matching entry later. Just make sure it is unique.
386 */
387 assert(!dst_match);
388 dst_match = iter->dst;
389 } else if (comp & nir_derefs_may_alias_bit) {
390 copy_entry_remove(copies, iter);
391 }
392 }
393
394 struct copy_entry *entry = NULL;
395 if (dst_match) {
396 util_dynarray_foreach(copies, struct copy_entry, iter) {
397 if (iter->dst == dst_match) {
398 entry = iter;
399 break;
400 }
401 }
402 assert(entry);
403 }
404 return entry;
405 }
406
407 static void
kill_aliases(struct util_dynarray * copies,nir_deref_instr * deref,unsigned write_mask)408 kill_aliases(struct util_dynarray *copies,
409 nir_deref_instr *deref,
410 unsigned write_mask)
411 {
412 /* TODO: Take into account the write_mask. */
413
414 struct copy_entry *entry =
415 lookup_entry_and_kill_aliases(copies, deref, write_mask);
416 if (entry)
417 copy_entry_remove(copies, entry);
418 }
419
420 static struct copy_entry *
get_entry_and_kill_aliases(struct util_dynarray * copies,nir_deref_instr * deref,unsigned write_mask)421 get_entry_and_kill_aliases(struct util_dynarray *copies,
422 nir_deref_instr *deref,
423 unsigned write_mask)
424 {
425 /* TODO: Take into account the write_mask. */
426
427 struct copy_entry *entry =
428 lookup_entry_and_kill_aliases(copies, deref, write_mask);
429
430 if (entry == NULL)
431 entry = copy_entry_create(copies, deref);
432
433 return entry;
434 }
435
436 static void
apply_barrier_for_modes(struct util_dynarray * copies,nir_variable_mode modes)437 apply_barrier_for_modes(struct util_dynarray *copies,
438 nir_variable_mode modes)
439 {
440 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
441 if (nir_deref_mode_may_be(iter->dst, modes) ||
442 (!iter->src.is_ssa && nir_deref_mode_may_be(iter->src.deref, modes)))
443 copy_entry_remove(copies, iter);
444 }
445 }
446
447 static void
value_set_from_value(struct value * value,const struct value * from,unsigned base_index,unsigned write_mask)448 value_set_from_value(struct value *value, const struct value *from,
449 unsigned base_index, unsigned write_mask)
450 {
451 /* We can't have non-zero indexes with non-trivial write masks */
452 assert(base_index == 0 || write_mask == 1);
453
454 if (from->is_ssa) {
455 /* Clear value if it was being used as non-SSA. */
456 if (!value->is_ssa)
457 memset(&value->ssa, 0, sizeof(value->ssa));
458 value->is_ssa = true;
459 /* Only overwrite the written components */
460 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
461 if (write_mask & (1 << i)) {
462 value->ssa.def[base_index + i] = from->ssa.def[i];
463 value->ssa.component[base_index + i] = from->ssa.component[i];
464 }
465 }
466 } else {
467 /* Non-ssa stores always write everything */
468 value->is_ssa = false;
469 value->deref = from->deref;
470 }
471 }
472
473 /* Try to load a single element of a vector from the copy_entry. If the data
474 * isn't available, just let the original intrinsic do the work.
475 */
476 static bool
load_element_from_ssa_entry_value(struct copy_prop_var_state * state,struct copy_entry * entry,nir_builder * b,nir_intrinsic_instr * intrin,struct value * value,unsigned index)477 load_element_from_ssa_entry_value(struct copy_prop_var_state *state,
478 struct copy_entry *entry,
479 nir_builder *b, nir_intrinsic_instr *intrin,
480 struct value *value, unsigned index)
481 {
482 assert(index < glsl_get_vector_elements(entry->dst->type));
483
484 /* We don't have the element available, so let the instruction do the work. */
485 if (!entry->src.ssa.def[index])
486 return false;
487
488 b->cursor = nir_instr_remove(&intrin->instr);
489 intrin->instr.block = NULL;
490
491 assert(entry->src.ssa.component[index] <
492 entry->src.ssa.def[index]->num_components);
493 nir_ssa_def *def = nir_channel(b, entry->src.ssa.def[index],
494 entry->src.ssa.component[index]);
495
496 *value = (struct value) {
497 .is_ssa = true,
498 {
499 .ssa = {
500 .def = { def },
501 .component = { 0 },
502 },
503 }
504 };
505
506 return true;
507 }
508
509 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
510 * single SSA def. Because an entry could reference multiple different SSA
511 * defs, a vecN operation may be inserted to combine them into a single SSA
512 * def before handing it back to the caller. If the load instruction is no
513 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
514 * possible, in some cases, for the load to be used in the vecN operation in
515 * which case it isn't deleted.)
516 */
517 static bool
load_from_ssa_entry_value(struct copy_prop_var_state * state,struct copy_entry * entry,nir_builder * b,nir_intrinsic_instr * intrin,nir_deref_instr * src,struct value * value)518 load_from_ssa_entry_value(struct copy_prop_var_state *state,
519 struct copy_entry *entry,
520 nir_builder *b, nir_intrinsic_instr *intrin,
521 nir_deref_instr *src, struct value *value)
522 {
523 if (is_array_deref_of_vector(src)) {
524 if (nir_src_is_const(src->arr.index)) {
525 return load_element_from_ssa_entry_value(state, entry, b, intrin, value,
526 nir_src_as_uint(src->arr.index));
527 }
528
529 /* An SSA copy_entry for the vector won't help indirect load. */
530 if (glsl_type_is_vector(entry->dst->type)) {
531 assert(entry->dst->type == nir_deref_instr_parent(src)->type);
532 /* TODO: If all SSA entries are there, try an if-ladder. */
533 return false;
534 }
535 }
536
537 *value = entry->src;
538 assert(value->is_ssa);
539
540 const struct glsl_type *type = entry->dst->type;
541 unsigned num_components = glsl_get_vector_elements(type);
542
543 nir_component_mask_t available = 0;
544 bool all_same = true;
545 for (unsigned i = 0; i < num_components; i++) {
546 if (value->ssa.def[i])
547 available |= (1 << i);
548
549 if (value->ssa.def[i] != value->ssa.def[0])
550 all_same = false;
551
552 if (value->ssa.component[i] != i)
553 all_same = false;
554 }
555
556 if (all_same) {
557 /* Our work here is done */
558 b->cursor = nir_instr_remove(&intrin->instr);
559 intrin->instr.block = NULL;
560 return true;
561 }
562
563 if (available != (1 << num_components) - 1 &&
564 intrin->intrinsic == nir_intrinsic_load_deref &&
565 (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
566 /* If none of the components read are available as SSA values, then we
567 * should just bail. Otherwise, we would end up replacing the uses of
568 * the load_deref a vecN() that just gathers up its components.
569 */
570 return false;
571 }
572
573 b->cursor = nir_after_instr(&intrin->instr);
574
575 nir_ssa_def *load_def =
576 intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
577
578 bool keep_intrin = false;
579 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
580 for (unsigned i = 0; i < num_components; i++) {
581 if (value->ssa.def[i]) {
582 comps[i] = nir_channel(b, value->ssa.def[i], value->ssa.component[i]);
583 } else {
584 /* We don't have anything for this component in our
585 * list. Just re-use a channel from the load.
586 */
587 if (load_def == NULL)
588 load_def = nir_load_deref(b, entry->dst);
589
590 if (load_def->parent_instr == &intrin->instr)
591 keep_intrin = true;
592
593 comps[i] = nir_channel(b, load_def, i);
594 }
595 }
596
597 nir_ssa_def *vec = nir_vec(b, comps, num_components);
598 value_set_ssa_components(value, vec, num_components);
599
600 if (!keep_intrin) {
601 /* Removing this instruction should not touch the cursor because we
602 * created the cursor after the intrinsic and have added at least one
603 * instruction (the vec) since then.
604 */
605 assert(b->cursor.instr != &intrin->instr);
606 nir_instr_remove(&intrin->instr);
607 intrin->instr.block = NULL;
608 }
609
610 return true;
611 }
612
613 /**
614 * Specialize the wildcards in a deref chain
615 *
616 * This function returns a deref chain identical to \param deref except that
617 * some of its wildcards are replaced with indices from \param specific. The
618 * process is guided by \param guide which references the same type as \param
619 * specific but has the same wildcard array lengths as \param deref.
620 */
621 static nir_deref_instr *
specialize_wildcards(nir_builder * b,nir_deref_path * deref,nir_deref_path * guide,nir_deref_path * specific)622 specialize_wildcards(nir_builder *b,
623 nir_deref_path *deref,
624 nir_deref_path *guide,
625 nir_deref_path *specific)
626 {
627 nir_deref_instr **deref_p = &deref->path[1];
628 nir_deref_instr **guide_p = &guide->path[1];
629 nir_deref_instr **spec_p = &specific->path[1];
630 nir_deref_instr *ret_tail = deref->path[0];
631 for (; *deref_p; deref_p++) {
632 if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
633 /* This is where things get tricky. We have to search through
634 * the entry deref to find its corresponding wildcard and fill
635 * this slot in with the value from the src.
636 */
637 while (*guide_p &&
638 (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
639 guide_p++;
640 spec_p++;
641 }
642 assert(*guide_p && *spec_p);
643
644 ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
645
646 guide_p++;
647 spec_p++;
648 } else {
649 ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
650 }
651 }
652
653 return ret_tail;
654 }
655
656 /* Do a "load" from an deref-based entry return it in "value" as a value. The
657 * deref returned in "value" will always be a fresh copy so the caller can
658 * steal it and assign it to the instruction directly without copying it
659 * again.
660 */
661 static bool
load_from_deref_entry_value(struct copy_prop_var_state * state,struct copy_entry * entry,nir_builder * b,nir_intrinsic_instr * intrin,nir_deref_instr * src,struct value * value)662 load_from_deref_entry_value(struct copy_prop_var_state *state,
663 struct copy_entry *entry,
664 nir_builder *b, nir_intrinsic_instr *intrin,
665 nir_deref_instr *src, struct value *value)
666 {
667 *value = entry->src;
668
669 b->cursor = nir_instr_remove(&intrin->instr);
670
671 nir_deref_path entry_dst_path, src_path;
672 nir_deref_path_init(&entry_dst_path, entry->dst, state->mem_ctx);
673 nir_deref_path_init(&src_path, src, state->mem_ctx);
674
675 bool need_to_specialize_wildcards = false;
676 nir_deref_instr **entry_p = &entry_dst_path.path[1];
677 nir_deref_instr **src_p = &src_path.path[1];
678 while (*entry_p && *src_p) {
679 nir_deref_instr *entry_tail = *entry_p++;
680 nir_deref_instr *src_tail = *src_p++;
681
682 if (src_tail->deref_type == nir_deref_type_array &&
683 entry_tail->deref_type == nir_deref_type_array_wildcard)
684 need_to_specialize_wildcards = true;
685 }
686
687 /* If the entry deref is longer than the source deref then it refers to a
688 * smaller type and we can't source from it.
689 */
690 assert(*entry_p == NULL);
691
692 if (need_to_specialize_wildcards) {
693 /* The entry has some wildcards that are not in src. This means we need
694 * to construct a new deref based on the entry but using the wildcards
695 * from the source and guided by the entry dst. Oof.
696 */
697 nir_deref_path entry_src_path;
698 nir_deref_path_init(&entry_src_path, entry->src.deref, state->mem_ctx);
699 value->deref = specialize_wildcards(b, &entry_src_path,
700 &entry_dst_path, &src_path);
701 nir_deref_path_finish(&entry_src_path);
702 }
703
704 /* If our source deref is longer than the entry deref, that's ok because
705 * it just means the entry deref needs to be extended a bit.
706 */
707 while (*src_p) {
708 nir_deref_instr *src_tail = *src_p++;
709 value->deref = nir_build_deref_follower(b, value->deref, src_tail);
710 }
711
712 nir_deref_path_finish(&entry_dst_path);
713 nir_deref_path_finish(&src_path);
714
715 return true;
716 }
717
718 static bool
try_load_from_entry(struct copy_prop_var_state * state,struct copy_entry * entry,nir_builder * b,nir_intrinsic_instr * intrin,nir_deref_instr * src,struct value * value)719 try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
720 nir_builder *b, nir_intrinsic_instr *intrin,
721 nir_deref_instr *src, struct value *value)
722 {
723 if (entry == NULL)
724 return false;
725
726 if (entry->src.is_ssa) {
727 return load_from_ssa_entry_value(state, entry, b, intrin, src, value);
728 } else {
729 return load_from_deref_entry_value(state, entry, b, intrin, src, value);
730 }
731 }
732
733 static void
invalidate_copies_for_cf_node(struct copy_prop_var_state * state,struct util_dynarray * copies,nir_cf_node * cf_node)734 invalidate_copies_for_cf_node(struct copy_prop_var_state *state,
735 struct util_dynarray *copies,
736 nir_cf_node *cf_node)
737 {
738 struct hash_entry *ht_entry = _mesa_hash_table_search(state->vars_written_map, cf_node);
739 assert(ht_entry);
740
741 struct vars_written *written = ht_entry->data;
742 if (written->modes) {
743 util_dynarray_foreach_reverse(copies, struct copy_entry, entry) {
744 if (nir_deref_mode_may_be(entry->dst, written->modes))
745 copy_entry_remove(copies, entry);
746 }
747 }
748
749 hash_table_foreach (written->derefs, entry) {
750 nir_deref_instr *deref_written = (nir_deref_instr *)entry->key;
751 kill_aliases(copies, deref_written, (uintptr_t)entry->data);
752 }
753 }
754
755 static void
print_value(struct value * value,unsigned num_components)756 print_value(struct value *value, unsigned num_components)
757 {
758 if (!value->is_ssa) {
759 printf(" %s ", glsl_get_type_name(value->deref->type));
760 nir_print_deref(value->deref, stdout);
761 return;
762 }
763
764 bool same_ssa = true;
765 for (unsigned i = 0; i < num_components; i++) {
766 if (value->ssa.component[i] != i ||
767 (i > 0 && value->ssa.def[i - 1] != value->ssa.def[i])) {
768 same_ssa = false;
769 break;
770 }
771 }
772 if (same_ssa) {
773 printf(" ssa_%d", value->ssa.def[0]->index);
774 } else {
775 for (int i = 0; i < num_components; i++) {
776 if (value->ssa.def[i])
777 printf(" ssa_%d[%u]", value->ssa.def[i]->index, value->ssa.component[i]);
778 else
779 printf(" _");
780 }
781 }
782 }
783
784 static void
print_copy_entry(struct copy_entry * entry)785 print_copy_entry(struct copy_entry *entry)
786 {
787 printf(" %s ", glsl_get_type_name(entry->dst->type));
788 nir_print_deref(entry->dst, stdout);
789 printf(":\t");
790
791 unsigned num_components = glsl_get_vector_elements(entry->dst->type);
792 print_value(&entry->src, num_components);
793 printf("\n");
794 }
795
796 static void
dump_instr(nir_instr * instr)797 dump_instr(nir_instr *instr)
798 {
799 printf(" ");
800 nir_print_instr(instr, stdout);
801 printf("\n");
802 }
803
804 static void
dump_copy_entries(struct util_dynarray * copies)805 dump_copy_entries(struct util_dynarray *copies)
806 {
807 util_dynarray_foreach(copies, struct copy_entry, iter)
808 print_copy_entry(iter);
809 printf("\n");
810 }
811
812 static void
copy_prop_vars_block(struct copy_prop_var_state * state,nir_builder * b,nir_block * block,struct util_dynarray * copies)813 copy_prop_vars_block(struct copy_prop_var_state *state,
814 nir_builder *b, nir_block *block,
815 struct util_dynarray *copies)
816 {
817 if (debug) {
818 printf("# block%d\n", block->index);
819 dump_copy_entries(copies);
820 }
821
822 nir_foreach_instr_safe(instr, block) {
823 if (debug && instr->type == nir_instr_type_deref)
824 dump_instr(instr);
825
826 if (instr->type == nir_instr_type_call) {
827 if (debug) dump_instr(instr);
828 apply_barrier_for_modes(copies, nir_var_shader_out |
829 nir_var_shader_temp |
830 nir_var_function_temp |
831 nir_var_mem_ssbo |
832 nir_var_mem_shared |
833 nir_var_mem_global);
834 if (debug) dump_copy_entries(copies);
835 continue;
836 }
837
838 if (instr->type != nir_instr_type_intrinsic)
839 continue;
840
841 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
842 switch (intrin->intrinsic) {
843 case nir_intrinsic_control_barrier:
844 case nir_intrinsic_memory_barrier:
845 if (debug) dump_instr(instr);
846
847 apply_barrier_for_modes(copies, nir_var_shader_out |
848 nir_var_mem_ssbo |
849 nir_var_mem_shared |
850 nir_var_mem_global);
851 break;
852
853 case nir_intrinsic_memory_barrier_buffer:
854 if (debug) dump_instr(instr);
855
856 apply_barrier_for_modes(copies, nir_var_mem_ssbo |
857 nir_var_mem_global);
858 break;
859
860 case nir_intrinsic_memory_barrier_shared:
861 if (debug) dump_instr(instr);
862
863 apply_barrier_for_modes(copies, nir_var_mem_shared);
864 break;
865
866 case nir_intrinsic_memory_barrier_tcs_patch:
867 if (debug) dump_instr(instr);
868
869 apply_barrier_for_modes(copies, nir_var_shader_out);
870 break;
871
872 case nir_intrinsic_scoped_barrier:
873 if (debug) dump_instr(instr);
874
875 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
876 apply_barrier_for_modes(copies, nir_intrinsic_memory_modes(intrin));
877 break;
878
879 case nir_intrinsic_emit_vertex:
880 case nir_intrinsic_emit_vertex_with_counter:
881 if (debug) dump_instr(instr);
882
883 apply_barrier_for_modes(copies, nir_var_shader_out);
884 break;
885
886 case nir_intrinsic_report_ray_intersection:
887 apply_barrier_for_modes(copies, nir_var_mem_ssbo |
888 nir_var_mem_global |
889 nir_var_shader_call_data |
890 nir_var_ray_hit_attrib);
891 break;
892
893 case nir_intrinsic_ignore_ray_intersection:
894 case nir_intrinsic_terminate_ray:
895 apply_barrier_for_modes(copies, nir_var_mem_ssbo |
896 nir_var_mem_global |
897 nir_var_shader_call_data);
898 break;
899
900 case nir_intrinsic_load_deref: {
901 if (debug) dump_instr(instr);
902
903 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
904 break;
905
906 nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
907
908 /* Direct array_derefs of vectors operate on the vectors (the parent
909 * deref). Indirects will be handled like other derefs.
910 */
911 int vec_index = 0;
912 nir_deref_instr *vec_src = src;
913 if (is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) {
914 vec_src = nir_deref_instr_parent(src);
915 unsigned vec_comps = glsl_get_vector_elements(vec_src->type);
916 vec_index = nir_src_as_uint(src->arr.index);
917
918 /* Loading from an invalid index yields an undef */
919 if (vec_index >= vec_comps) {
920 b->cursor = nir_instr_remove(instr);
921 nir_ssa_def *u = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
922 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(u));
923 state->progress = true;
924 break;
925 }
926 }
927
928 struct copy_entry *src_entry =
929 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
930 struct value value = {0};
931 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
932 if (value.is_ssa) {
933 /* lookup_load has already ensured that we get a single SSA
934 * value that has all of the channels. We just have to do the
935 * rewrite operation. Note for array derefs of vectors, the
936 * channel 0 is used.
937 */
938 if (intrin->instr.block) {
939 /* The lookup left our instruction in-place. This means it
940 * must have used it to vec up a bunch of different sources.
941 * We need to be careful when rewriting uses so we don't
942 * rewrite the vecN itself.
943 */
944 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
945 nir_src_for_ssa(value.ssa.def[0]),
946 value.ssa.def[0]->parent_instr);
947 } else {
948 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
949 nir_src_for_ssa(value.ssa.def[0]));
950 }
951 } else {
952 /* We're turning it into a load of a different variable */
953 intrin->src[0] = nir_src_for_ssa(&value.deref->dest.ssa);
954
955 /* Put it back in again. */
956 nir_builder_instr_insert(b, instr);
957 value_set_ssa_components(&value, &intrin->dest.ssa,
958 intrin->num_components);
959 }
960 state->progress = true;
961 } else {
962 value_set_ssa_components(&value, &intrin->dest.ssa,
963 intrin->num_components);
964 }
965
966 /* Now that we have a value, we're going to store it back so that we
967 * have the right value next time we come looking for it. In order
968 * to do this, we need an exact match, not just something that
969 * contains what we're looking for.
970 */
971 struct copy_entry *entry =
972 lookup_entry_for_deref(copies, vec_src, nir_derefs_equal_bit);
973 if (!entry)
974 entry = copy_entry_create(copies, vec_src);
975
976 /* Update the entry with the value of the load. This way
977 * we can potentially remove subsequent loads.
978 */
979 value_set_from_value(&entry->src, &value, vec_index,
980 (1 << intrin->num_components) - 1);
981 break;
982 }
983
984 case nir_intrinsic_store_deref: {
985 if (debug) dump_instr(instr);
986
987 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
988 assert(glsl_type_is_vector_or_scalar(dst->type));
989
990 /* Direct array_derefs of vectors operate on the vectors (the parent
991 * deref). Indirects will be handled like other derefs.
992 */
993 int vec_index = 0;
994 nir_deref_instr *vec_dst = dst;
995 if (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index)) {
996 vec_dst = nir_deref_instr_parent(dst);
997 unsigned vec_comps = glsl_get_vector_elements(vec_dst->type);
998
999 vec_index = nir_src_as_uint(dst->arr.index);
1000
1001 /* Storing to an invalid index is a no-op. */
1002 if (vec_index >= vec_comps) {
1003 nir_instr_remove(instr);
1004 state->progress = true;
1005 break;
1006 }
1007 }
1008
1009 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) {
1010 unsigned wrmask = nir_intrinsic_write_mask(intrin);
1011 kill_aliases(copies, dst, wrmask);
1012 break;
1013 }
1014
1015 struct copy_entry *entry =
1016 lookup_entry_for_deref(copies, dst, nir_derefs_equal_bit);
1017 if (entry && value_equals_store_src(&entry->src, intrin)) {
1018 /* If we are storing the value from a load of the same var the
1019 * store is redundant so remove it.
1020 */
1021 nir_instr_remove(instr);
1022 state->progress = true;
1023 } else {
1024 struct value value = {0};
1025 value_set_ssa_components(&value, intrin->src[1].ssa,
1026 intrin->num_components);
1027 unsigned wrmask = nir_intrinsic_write_mask(intrin);
1028 struct copy_entry *entry =
1029 get_entry_and_kill_aliases(copies, vec_dst, wrmask);
1030 value_set_from_value(&entry->src, &value, vec_index, wrmask);
1031 }
1032
1033 break;
1034 }
1035
1036 case nir_intrinsic_copy_deref: {
1037 if (debug) dump_instr(instr);
1038
1039 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
1040 nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
1041
1042 /* The copy_deref intrinsic doesn't keep track of num_components, so
1043 * get it ourselves.
1044 */
1045 unsigned num_components = glsl_get_vector_elements(dst->type);
1046 unsigned full_mask = (1 << num_components) - 1;
1047
1048 if ((nir_intrinsic_src_access(intrin) & ACCESS_VOLATILE) ||
1049 (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE)) {
1050 kill_aliases(copies, dst, full_mask);
1051 break;
1052 }
1053
1054 if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
1055 /* This is a no-op self-copy. Get rid of it */
1056 nir_instr_remove(instr);
1057 state->progress = true;
1058 continue;
1059 }
1060
1061 /* Copy of direct array derefs of vectors are not handled. Just
1062 * invalidate what's written and bail.
1063 */
1064 if ((is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) ||
1065 (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index))) {
1066 kill_aliases(copies, dst, full_mask);
1067 break;
1068 }
1069
1070 struct copy_entry *src_entry =
1071 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
1072 struct value value;
1073 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
1074 /* If load works, intrin (the copy_deref) is removed. */
1075 if (value.is_ssa) {
1076 nir_store_deref(b, dst, value.ssa.def[0], full_mask);
1077 } else {
1078 /* If this would be a no-op self-copy, don't bother. */
1079 if (nir_compare_derefs(value.deref, dst) & nir_derefs_equal_bit)
1080 continue;
1081
1082 /* Just turn it into a copy of a different deref */
1083 intrin->src[1] = nir_src_for_ssa(&value.deref->dest.ssa);
1084
1085 /* Put it back in again. */
1086 nir_builder_instr_insert(b, instr);
1087 }
1088
1089 state->progress = true;
1090 } else {
1091 value = (struct value) {
1092 .is_ssa = false,
1093 { .deref = src },
1094 };
1095 }
1096
1097 nir_variable *src_var = nir_deref_instr_get_variable(src);
1098 if (src_var && src_var->data.cannot_coalesce) {
1099 /* The source cannot be coaleseced, which means we can't propagate
1100 * this copy.
1101 */
1102 break;
1103 }
1104
1105 struct copy_entry *dst_entry =
1106 get_entry_and_kill_aliases(copies, dst, full_mask);
1107 value_set_from_value(&dst_entry->src, &value, 0, full_mask);
1108 break;
1109 }
1110
1111 case nir_intrinsic_trace_ray:
1112 case nir_intrinsic_execute_callable: {
1113 if (debug) dump_instr(instr);
1114
1115 nir_deref_instr *payload =
1116 nir_src_as_deref(*nir_get_shader_call_payload_src(intrin));
1117 nir_component_mask_t full_mask =
1118 (nir_component_mask_t) BITFIELD_MASK(glsl_get_vector_elements(payload->type));
1119 kill_aliases(copies, payload, full_mask);
1120 break;
1121 }
1122
1123 case nir_intrinsic_memcpy_deref:
1124 case nir_intrinsic_deref_atomic_add:
1125 case nir_intrinsic_deref_atomic_fadd:
1126 case nir_intrinsic_deref_atomic_imin:
1127 case nir_intrinsic_deref_atomic_umin:
1128 case nir_intrinsic_deref_atomic_fmin:
1129 case nir_intrinsic_deref_atomic_imax:
1130 case nir_intrinsic_deref_atomic_umax:
1131 case nir_intrinsic_deref_atomic_fmax:
1132 case nir_intrinsic_deref_atomic_and:
1133 case nir_intrinsic_deref_atomic_or:
1134 case nir_intrinsic_deref_atomic_xor:
1135 case nir_intrinsic_deref_atomic_exchange:
1136 case nir_intrinsic_deref_atomic_comp_swap:
1137 case nir_intrinsic_deref_atomic_fcomp_swap:
1138 if (debug) dump_instr(instr);
1139
1140 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
1141 unsigned num_components = glsl_get_vector_elements(dst->type);
1142 unsigned full_mask = (1 << num_components) - 1;
1143 kill_aliases(copies, dst, full_mask);
1144 break;
1145
1146 case nir_intrinsic_store_deref_block_intel: {
1147 if (debug) dump_instr(instr);
1148
1149 /* Invalidate the whole variable (or cast) and anything that alias
1150 * with it.
1151 */
1152 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
1153 while (nir_deref_instr_parent(dst))
1154 dst = nir_deref_instr_parent(dst);
1155 assert(dst->deref_type == nir_deref_type_var ||
1156 dst->deref_type == nir_deref_type_cast);
1157
1158 unsigned num_components = glsl_get_vector_elements(dst->type);
1159 unsigned full_mask = (1 << num_components) - 1;
1160 kill_aliases(copies, dst, full_mask);
1161 break;
1162 }
1163
1164 default:
1165 continue; /* To skip the debug below. */
1166 }
1167
1168 if (debug) dump_copy_entries(copies);
1169 }
1170 }
1171
1172 static void
copy_prop_vars_cf_node(struct copy_prop_var_state * state,struct util_dynarray * copies,nir_cf_node * cf_node)1173 copy_prop_vars_cf_node(struct copy_prop_var_state *state,
1174 struct util_dynarray *copies,
1175 nir_cf_node *cf_node)
1176 {
1177 switch (cf_node->type) {
1178 case nir_cf_node_function: {
1179 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
1180
1181 struct util_dynarray impl_copies;
1182 util_dynarray_init(&impl_copies, state->mem_ctx);
1183
1184 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
1185 copy_prop_vars_cf_node(state, &impl_copies, cf_node);
1186
1187 break;
1188 }
1189
1190 case nir_cf_node_block: {
1191 nir_block *block = nir_cf_node_as_block(cf_node);
1192 nir_builder b;
1193 nir_builder_init(&b, state->impl);
1194 copy_prop_vars_block(state, &b, block, copies);
1195 break;
1196 }
1197
1198 case nir_cf_node_if: {
1199 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
1200
1201 /* Clone the copies for each branch of the if statement. The idea is
1202 * that they both see the same state of available copies, but do not
1203 * interfere to each other.
1204 */
1205
1206 struct util_dynarray then_copies;
1207 util_dynarray_clone(&then_copies, state->mem_ctx, copies);
1208
1209 struct util_dynarray else_copies;
1210 util_dynarray_clone(&else_copies, state->mem_ctx, copies);
1211
1212 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
1213 copy_prop_vars_cf_node(state, &then_copies, cf_node);
1214
1215 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
1216 copy_prop_vars_cf_node(state, &else_copies, cf_node);
1217
1218 /* Both branches copies can be ignored, since the effect of running both
1219 * branches was captured in the first pass that collects vars_written.
1220 */
1221
1222 invalidate_copies_for_cf_node(state, copies, cf_node);
1223
1224 break;
1225 }
1226
1227 case nir_cf_node_loop: {
1228 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1229
1230 /* Invalidate before cloning the copies for the loop, since the loop
1231 * body can be executed more than once.
1232 */
1233
1234 invalidate_copies_for_cf_node(state, copies, cf_node);
1235
1236 struct util_dynarray loop_copies;
1237 util_dynarray_clone(&loop_copies, state->mem_ctx, copies);
1238
1239 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
1240 copy_prop_vars_cf_node(state, &loop_copies, cf_node);
1241
1242 break;
1243 }
1244
1245 default:
1246 unreachable("Invalid CF node type");
1247 }
1248 }
1249
1250 static bool
nir_copy_prop_vars_impl(nir_function_impl * impl)1251 nir_copy_prop_vars_impl(nir_function_impl *impl)
1252 {
1253 void *mem_ctx = ralloc_context(NULL);
1254
1255 if (debug) {
1256 nir_metadata_require(impl, nir_metadata_block_index);
1257 printf("## nir_copy_prop_vars_impl for %s\n", impl->function->name);
1258 }
1259
1260 struct copy_prop_var_state state = {
1261 .impl = impl,
1262 .mem_ctx = mem_ctx,
1263 .lin_ctx = linear_zalloc_parent(mem_ctx, 0),
1264
1265 .vars_written_map = _mesa_pointer_hash_table_create(mem_ctx),
1266 };
1267
1268 gather_vars_written(&state, NULL, &impl->cf_node);
1269
1270 copy_prop_vars_cf_node(&state, NULL, &impl->cf_node);
1271
1272 if (state.progress) {
1273 nir_metadata_preserve(impl, nir_metadata_block_index |
1274 nir_metadata_dominance);
1275 } else {
1276 nir_metadata_preserve(impl, nir_metadata_all);
1277 }
1278
1279 ralloc_free(mem_ctx);
1280 return state.progress;
1281 }
1282
1283 bool
nir_opt_copy_prop_vars(nir_shader * shader)1284 nir_opt_copy_prop_vars(nir_shader *shader)
1285 {
1286 bool progress = false;
1287
1288 nir_foreach_function(function, shader) {
1289 if (!function->impl)
1290 continue;
1291 progress |= nir_copy_prop_vars_impl(function->impl);
1292 }
1293
1294 return progress;
1295 }
1296