llvm / llvm-project

The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.
http://llvm.org
Other
27.97k stars 11.54k forks source link

[aarch64] `vbslq_u8` / `BSL` is being folded into `AND`+`OR` and not being optimized back #88690

Open Validark opened 5 months ago

Validark commented 5 months ago

This movemask routine (Godbolt link):

#include <arm_neon.h>

uint64_t vmovmaskq_u8_(const uint8x16_t p0, const uint8x16_t p1, const uint8x16_t p2, const uint8x16_t p3) {
  const uint8x16_t bitmask1 = { 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10,
                                0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10};
  const uint8x16_t bitmask2 = { 0x02, 0x20, 0x02, 0x20, 0x02, 0x20, 0x02, 0x20,
                                0x02, 0x20, 0x02, 0x20, 0x02, 0x20, 0x02, 0x20};
  const uint8x16_t bitmask3 = { 0x04, 0x40, 0x04, 0x40, 0x04, 0x40, 0x04, 0x40,
                                0x04, 0x40, 0x04, 0x40, 0x04, 0x40, 0x04, 0x40};
  const uint8x16_t bitmask4 = { 0x08, 0x80, 0x08, 0x80, 0x08, 0x80, 0x08, 0x80,
                                0x08, 0x80, 0x08, 0x80, 0x08, 0x80, 0x08, 0x80};

  uint8x16_t t0 = vandq_u8(p0, bitmask1);
  uint8x16_t t1 = vbslq_u8(bitmask2, p1, t0);
  uint8x16_t t2 = vbslq_u8(bitmask3, p2, t1);
  uint8x16_t tmp = vbslq_u8(bitmask4, p3, t2);
  uint8x16_t sum = vpaddq_u8(tmp, tmp);
  return vgetq_lane_u64(vreinterpretq_u64_u8(sum), 0);
}

Results in this emit (excluding table data):

vmovmaskq_u8_:                          // @vmovmaskq_u8_
        adrp    x8, .LCPI0_0
        ldr     q4, [x8, :lo12:.LCPI0_0]
        adrp    x8, .LCPI0_1
        and     v0.16b, v0.16b, v4.16b
        ldr     q4, [x8, :lo12:.LCPI0_1]
        adrp    x8, .LCPI0_2
        and     v1.16b, v1.16b, v4.16b
        orr     v0.16b, v1.16b, v0.16b
        ldr     q1, [x8, :lo12:.LCPI0_2]
        and     v1.16b, v2.16b, v1.16b
        movi    v2.8h, #8
        fneg    v2.8h, v2.8h
        and     v2.16b, v3.16b, v2.16b
        orr     v1.16b, v1.16b, v2.16b
        orr     v0.16b, v0.16b, v1.16b
        addp    v0.16b, v0.16b, v0.16b
        fmov    x0, d0
        ret

Using inline assembly, one of these should be right (I don't know which order I am supposed to use):

vmovmaskq_u8_:                          // @vmovmaskq_u8_
        adrp    x8, .LCPI0_0
        ldr     q4, [x8, :lo12:.LCPI0_0]
        adrp    x8, .LCPI0_1
        and     v0.16b, v0.16b, v4.16b
        ldr     q4, [x8, :lo12:.LCPI0_1]
        adrp    x8, .LCPI0_2
        bsl     v0.16b, v1.16b, v4.16b
        ldr     q1, [x8, :lo12:.LCPI0_2]
        adrp    x8, .LCPI0_3
        bsl     v0.16b, v2.16b, v1.16b
        ldr     q1, [x8, :lo12:.LCPI0_3]
        bsl     v0.16b, v3.16b, v1.16b
        addp    v0.16b, v0.16b, v0.16b
        fmov    x0, d0
        ret

vmovmaskq_u8_:                          // @vmovmaskq_u8_
        adrp    x8, .LCPI0_0
        ldr     q4, [x8, :lo12:.LCPI0_0]
        adrp    x8, .LCPI0_1
        and     v0.16b, v0.16b, v4.16b
        ldr     q4, [x8, :lo12:.LCPI0_1]
        adrp    x8, .LCPI0_2
        bsl     v4.16b, v1.16b, v0.16b
        ldr     q1, [x8, :lo12:.LCPI0_2]
        adrp    x8, .LCPI0_3
        bsl     v1.16b, v2.16b, v0.16b
        ldr     q1, [x8, :lo12:.LCPI0_3]
        bsl     v1.16b, v3.16b, v0.16b
        addp    v0.16b, v0.16b, v0.16b
        fmov    x0, d0
        ret
llvmbot commented 5 months ago

@llvm/issue-subscribers-backend-aarch64

Author: Niles Salter (Validark)

This movemask routine ([Godbolt link](https://godbolt.org/z/feq485549)): ```c #include <arm_neon.h> uint64_t vmovmaskq_u8_(const uint8x16_t p0, const uint8x16_t p1, const uint8x16_t p2, const uint8x16_t p3) { const uint8x16_t bitmask1 = { 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10}; const uint8x16_t bitmask2 = { 0x02, 0x20, 0x02, 0x20, 0x02, 0x20, 0x02, 0x20, 0x02, 0x20, 0x02, 0x20, 0x02, 0x20, 0x02, 0x20}; const uint8x16_t bitmask3 = { 0x04, 0x40, 0x04, 0x40, 0x04, 0x40, 0x04, 0x40, 0x04, 0x40, 0x04, 0x40, 0x04, 0x40, 0x04, 0x40}; const uint8x16_t bitmask4 = { 0x08, 0x80, 0x08, 0x80, 0x08, 0x80, 0x08, 0x80, 0x08, 0x80, 0x08, 0x80, 0x08, 0x80, 0x08, 0x80}; uint8x16_t t0 = vandq_u8(p0, bitmask1); uint8x16_t t1 = vbslq_u8(bitmask2, p1, t0); uint8x16_t t2 = vbslq_u8(bitmask3, p2, t1); uint8x16_t tmp = vbslq_u8(bitmask4, p3, t2); uint8x16_t sum = vpaddq_u8(tmp, tmp); return vgetq_lane_u64(vreinterpretq_u64_u8(sum), 0); } ``` Results in this emit (excluding table data): ```asm vmovmaskq_u8_: // @vmovmaskq_u8_ adrp x8, .LCPI0_0 ldr q4, [x8, :lo12:.LCPI0_0] adrp x8, .LCPI0_1 and v0.16b, v0.16b, v4.16b ldr q4, [x8, :lo12:.LCPI0_1] adrp x8, .LCPI0_2 and v1.16b, v1.16b, v4.16b orr v0.16b, v1.16b, v0.16b ldr q1, [x8, :lo12:.LCPI0_2] and v1.16b, v2.16b, v1.16b movi v2.8h, #8 fneg v2.8h, v2.8h and v2.16b, v3.16b, v2.16b orr v1.16b, v1.16b, v2.16b orr v0.16b, v0.16b, v1.16b addp v0.16b, v0.16b, v0.16b fmov x0, d0 ret ``` Using inline assembly, one of these should be right (I don't know which order I am supposed to use): ```asm vmovmaskq_u8_: // @vmovmaskq_u8_ adrp x8, .LCPI0_0 ldr q4, [x8, :lo12:.LCPI0_0] adrp x8, .LCPI0_1 and v0.16b, v0.16b, v4.16b ldr q4, [x8, :lo12:.LCPI0_1] adrp x8, .LCPI0_2 bsl v0.16b, v1.16b, v4.16b ldr q1, [x8, :lo12:.LCPI0_2] adrp x8, .LCPI0_3 bsl v0.16b, v2.16b, v1.16b ldr q1, [x8, :lo12:.LCPI0_3] bsl v0.16b, v3.16b, v1.16b addp v0.16b, v0.16b, v0.16b fmov x0, d0 ret vmovmaskq_u8_: // @vmovmaskq_u8_ adrp x8, .LCPI0_0 ldr q4, [x8, :lo12:.LCPI0_0] adrp x8, .LCPI0_1 and v0.16b, v0.16b, v4.16b ldr q4, [x8, :lo12:.LCPI0_1] adrp x8, .LCPI0_2 bsl v4.16b, v1.16b, v0.16b ldr q1, [x8, :lo12:.LCPI0_2] adrp x8, .LCPI0_3 bsl v1.16b, v2.16b, v0.16b ldr q1, [x8, :lo12:.LCPI0_3] bsl v1.16b, v3.16b, v0.16b addp v0.16b, v0.16b, v0.16b fmov x0, d0 ret ```