1// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=1 gpu-thread-dims=1" %s | FileCheck %s 2 3// CHECK-LABEL: @step_var 4func @step_var(%A : memref<?x?xf32>, %B : memref<?x?xf32>) { 5 // Check that we divide by step. 6 // CHECK: %[[range_i:.*]] = divi_signed {{.*}}, %{{.*}} 7 // CHECK: %[[range_j:.*]] = divi_signed {{.*}}, %{{.*}} 8 9 // CHECK: gpu.launch 10 // CHECK-SAME: blocks(%{{[^)]*}}, %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[range_i]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}}) 11 // CHECK-SAME: threads(%{{[^)]*}}, %{{[^)]*}}, %{{[^)]*}}) in (%{{[^)]*}} = %[[range_j]], %{{[^)]*}} = %{{[^)]*}}, %{{[^)]*}} = %{{[^)]*}}) 12 affine.for %i = 5 to 15 step 4 { 13 affine.for %j = 3 to 19 step 7 { 14 // Loop induction variable remapping: 15 // iv = thread(block)_id * step + lower_bound 16 // CHECK: %[[prod_i:.*]] = muli %{{.*}}, %{{.*}} : index 17 // CHECK-NEXT: %[[i:.*]] = addi %{{.*}}, %[[prod_i]] : index 18 // CHECK-NEXT: %[[prod_j:.*]] = muli %{{.*}}, %{{.*}} : index 19 // CHECK-NEXT: %[[j:.*]] = addi %{{.*}}, %[[prod_j]] : index 20 21 // CHECK: {{.*}} = load %{{.*}}[%[[i]], %[[j]]] : memref<?x?xf32> 22 %0 = load %A[%i, %j] : memref<?x?xf32> 23 // CHECK: store {{.*}}, %{{.*}}[%[[i]], %[[j]]] : memref<?x?xf32> 24 store %0, %B[%i, %j] : memref<?x?xf32> 25 } 26 } 27 return 28} 29