1// RUN: mlir-opt %s -tensor-constant-bufferize -std-bufferize -linalg-bufferize \
2// RUN: -func-bufferize -finalizing-bufferize -convert-linalg-to-loops \
3// RUN: -convert-linalg-to-llvm -convert-std-to-llvm | \
4// RUN: mlir-cpu-runner -e main -entry-point-result=void \
5// RUN:   -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
6// RUN: | FileCheck %s
7
8func @foo() -> tensor<4xf32> {
9  %0 = constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
10  return %0 : tensor<4xf32>
11}
12
13func @main() {
14  %0 = call @foo() : () -> tensor<4xf32>
15
16  // Instead of relying on tensor_store which introduces aliasing, we rely on
17  // the conversion of print_memref_f32(tensor<*xf32>) to
18  // print_memref_f32(memref<*xf32>).
19  // Note that this is skipping a step and we would need at least some function
20  // attribute to declare that this conversion is valid (e.g. when we statically
21  // know that things will play nicely at the C ABI boundary).
22  %unranked = tensor_cast %0 : tensor<4xf32> to tensor<*xf32>
23  call @print_memref_f32(%unranked) : (tensor<*xf32>) -> ()
24
25  //      CHECK: Unranked Memref base@ = {{0x[-9a-f]*}}
26  // CHECK-SAME: rank = 1 offset = 0 sizes = [4] strides = [1] data =
27  // CHECK-NEXT: [1, 2, 3, 4]
28
29  return
30}
31
32// This gets converted to a function operating on memref<*xf32>.
33// Note that this is skipping a step and we would need at least some function
34// attribute to declare that this conversion is valid (e.g. when we statically
35// know that things will play nicely at the C ABI boundary).
36func private @print_memref_f32(%ptr : tensor<*xf32>)
37