iree-org / iree

A retargetable MLIR-based machine learning compiler and runtime toolkit.
http://iree.dev/
Apache License 2.0
2.82k stars 609 forks source link

[CPU] Improve vector sizes for mixed-length data types (e.g., `i32`/`i8`) #15246

Open dcaballe opened 1 year ago

dcaballe commented 1 year ago

We are currently not doing a great job picking up the vector sizes for generic ops with mixed-length data types. The dispatch below shows a fully parallel element-wise op with i32 and i8 operations. We decide to vectorize with [1, 8, 16] for 512-bit vectors. This means that the i8 operations will be using only 128 bits/512-bits.

In our, hopefully new, tile size selection infra, we should improve the data-type analysis to get a better understanding of the type of the operations and make sure we fully utilize the 512-bit of the vector. This will lead to using 4 registers for the i32 ones so we should take that into account when deciding the unroll factor (and perhaps go with something smaller than 8 for the second dimension).

Input:

hal.executable public @forward_dispatch_35 {
  hal.executable.variant public @system_elf_x86_64, target = <"llvm-cpu", "system-elf-x86_64", {cpu = "cascadelake", cpu_features = "+cmov,+mmx,+popcnt,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+avx,+avx2,+fma,+avx512f,+bmi,+bmi2,+aes,+pclmul,+avx512vl,+avx512bw,+avx512dq,+avx512cd,+avx512vnni,+adx,+clflushopt,+clwb,+cx16,+cx8,+crc32,+f16c,+fsgsbase,+fxsr,+invpcid,+lzcnt,+movbe,+pku,+prfchw,+rdrnd,+rdseed,+sahf,+x87,+xsave,+xsavec,+xsaveopt,+xsaves,+evex512", data_layout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", link_embedded = false, native_vector_size = 64 : index, target_triple = "x86_64-unknown-linux-gnu", ukernels = true}> {
    hal.executable.export public @forward_dispatch_35_generic_568x8x18176_i32xi32xi8 ordinal(0) layout(#hal.pipeline.layout<push_constants = 2, sets = [<0, bindings = [<0, storage_buffer, ReadOnly>, <1, storage_buffer>]>]>) {
    ^bb0(%arg0: !hal.device):
      %x, %y, %z = flow.dispatch.workgroup_count_from_slice 
      hal.return %x, %y, %z : index, index, index
    }
    builtin.module {
      func.func @forward_dispatch_35_generic_568x8x18176_i32xi32xi8() {
        %cst = arith.constant dense<[0, 4, 8, 12, 16, 20, 24, 28]> : tensor<8xi32>
        %c15_i8 = arith.constant 15 : i8
        %0 = hal.interface.constant.load[0] : i32
        %1 = hal.interface.constant.load[1] : i32
        %2 = arith.index_castui %0 : i32 to index
        %3 = arith.index_castui %1 : i32 to index
        %4 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%2) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<568x18176xi32>>
        %5 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%3) : !flow.dispatch.tensor<writeonly:tensor<568x8x18176xi8>>
        %6 = flow.dispatch.tensor.load %4, offsets = [0, 0], sizes = [568, 18176], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<568x18176xi32>> -> tensor<568x18176xi32>
        %7 = tensor.empty() : tensor<568x8x18176xi8>
        %8 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d1)>, affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} ins(%6, %cst : tensor<568x18176xi32>, tensor<8xi32>) outs(%7 : tensor<568x8x18176xi8>) {
        ^bb0(%in: i32, %in_0: i32, %out: i8):
          %9 = arith.shrsi %in, %in_0 : i32
          %10 = arith.trunci %9 : i32 to i8
          %11 = arith.andi %10, %c15_i8 : i8
          linalg.yield %11 : i8
        } -> tensor<568x8x18176xi8>
        flow.dispatch.tensor.store %8, %5, offsets = [0, 0, 0], sizes = [568, 8, 18176], strides = [1, 1, 1] : tensor<568x8x18176xi8> -> !flow.dispatch.tensor<writeonly:tensor<568x8x18176xi8>>
        return
      }
    }
  }
}

After vectorization:

func.func @forward_dispatch_35_generic_568x8x18176_i32xi32xi8() {
  %cst = arith.constant dense<15> : vector<1x8x16xi8>
  %c0_i32 = arith.constant 0 : i32
  %c16 = arith.constant 16 : index
  %c64 = arith.constant 64 : index
  %c1 = arith.constant 1 : index
  %c0 = arith.constant 0 : index
  %c18176 = arith.constant 18176 : index
  %c8 = arith.constant 8 : index
  %c568 = arith.constant 568 : index
  %cst_0 = arith.constant dense<[0, 4, 8, 12, 16, 20, 24, 28]> : tensor<8xi32>
  %0 = hal.interface.constant.load[0] : i32
  %1 = hal.interface.constant.load[1] : i32
  %2 = arith.index_castui %0 : i32 to index
  %3 = arith.index_castui %1 : i32 to index
  %4 = hal.interface.binding.subspan set(0) binding(0) type(storage_buffer) alignment(64) offset(%2) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<568x18176xi32>>
  %5 = hal.interface.binding.subspan set(0) binding(1) type(storage_buffer) alignment(64) offset(%3) : !flow.dispatch.tensor<writeonly:tensor<568x8x18176xi8>>
  %workgroup_id_x = hal.interface.workgroup.id[0] : index
  %workgroup_count_x = hal.interface.workgroup.count[0] : index
  %workgroup_id_y = hal.interface.workgroup.id[1] : index
  %workgroup_count_y = hal.interface.workgroup.count[1] : index
  %workgroup_id_z = hal.interface.workgroup.id[2] : index
  %workgroup_count_z = hal.interface.workgroup.count[2] : index
  %6 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%workgroup_id_z]
  %7 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%workgroup_count_z]
  scf.for %arg0 = %6 to %c568 step %7 {
    %8 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%workgroup_id_y]
    %9 = affine.apply affine_map<()[s0] -> (s0 * 8)>()[%workgroup_count_y]
    scf.for %arg1 = %8 to %c8 step %9 {
      %10 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_id_x]
      %11 = affine.apply affine_map<()[s0] -> (s0 * 64)>()[%workgroup_count_x]
      scf.for %arg2 = %10 to %c18176 step %11 {
        %12 = flow.dispatch.tensor.load %5, offsets = [%arg0, %arg1, %arg2], sizes = [8, 8, 64], strides = [1, 1, 1] : !flow.dispatch.tensor<writeonly:tensor<568x8x18176xi8>> -> tensor<8x8x64xi8>
        %13 = flow.dispatch.tensor.load %4, offsets = [%arg0, %arg2], sizes = [8, 64], strides = [1, 1] : !flow.dispatch.tensor<readonly:tensor<568x18176xi32>> -> tensor<8x64xi32>
        %14 = scf.for %arg3 = %c0 to %c8 step %c1 iter_args(%arg4 = %12) -> (tensor<8x8x64xi8>) {
          %15 = scf.for %arg5 = %c0 to %c64 step %c16 iter_args(%arg6 = %arg4) -> (tensor<8x8x64xi8>) {
            %16 = vector.transfer_read %13[%arg3, %arg5], %c0_i32 {in_bounds = [true, true]} : tensor<8x64xi32>, vector<1x16xi32>
            %17 = vector.broadcast %16 : vector<1x16xi32> to vector<8x1x16xi32>
            %18 = vector.transpose %17, [1, 0, 2] : vector<8x1x16xi32> to vector<1x8x16xi32>
            %19 = vector.transfer_read %cst_0[%arg1], %c0_i32 {in_bounds = [true]} : tensor<8xi32>, vector<8xi32>
            %20 = vector.broadcast %19 : vector<8xi32> to vector<1x16x8xi32>
            %21 = vector.transpose %20, [0, 2, 1] : vector<1x16x8xi32> to vector<1x8x16xi32>
            %22 = arith.shrsi %18, %21 : vector<1x8x16xi32>
            %23 = arith.trunci %22 : vector<1x8x16xi32> to vector<1x8x16xi8>
            %24 = arith.andi %23, %cst : vector<1x8x16xi8>
            %25 = vector.transfer_write %24, %arg6[%arg3, %c0, %arg5] {in_bounds = [true, true, true]} : vector<1x8x16xi8>, tensor<8x8x64xi8>
            scf.yield %25 : tensor<8x8x64xi8>
          }
          scf.yield %15 : tensor<8x8x64xi8>
        }
        flow.dispatch.tensor.store %14, %5, offsets = [%arg0, %arg1, %arg2], sizes = [8, 8, 64], strides = [1, 1, 1] : tensor<8x8x64xi8> -> !flow.dispatch.tensor<writeonly:tensor<568x8x18176xi8>>
      }
    }
  }
  return
}
allieculp commented 1 year ago

@pzread Are you picking this one up?

pzread commented 1 year ago

Yes, this is a bug which will be considered in an upcoming tile sizes infra plan

pzread commented 6 months ago

Unassigned myself as I'm not working on this currently