llvm / llvm-project

The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.
http://llvm.org
Other
28.06k stars 11.59k forks source link

Slow code generated for avx2 four-way vector interleave #102666

Open abadams opened 1 month ago

abadams commented 1 month ago

Four-way interleaves with avx2 aren't generating good code. Consider the following:

typedef float floatx8_vec __attribute__((ext_vector_type(8)));
typedef float floatx16_vec __attribute__((ext_vector_type(16)));

auto interleave2(floatx8_vec a, floatx8_vec b) {
    return __builtin_shufflevector(a, b, 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15);
}

auto interleave2(floatx16_vec a, floatx16_vec b) {
    return __builtin_shufflevector(a, b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
}

auto interleave4(floatx8_vec a, floatx8_vec b, floatx8_vec c, floatx8_vec &d) {
    return interleave2(interleave2(a, c), interleave2(b, d));
}

With -O3 -mavx2 it generates good code for the interleave2 functions, but makes a mess with interleave4. It should generate 16 unpckh/unpckl/vperm2 instructions (18 cycles on skylake according to llvm-mca), but instead it generates 44 vshuf/vperm/blend instructions (32 cycles). See below (copy-pasted from godbolt):

interleave2(float vector[8], float vector[8]):
        vunpckhps       ymm2, ymm0, ymm1
        vunpcklps       ymm1, ymm0, ymm1
        vperm2f128      ymm0, ymm1, ymm2, 32
        vperm2f128      ymm1, ymm1, ymm2, 49
        ret

interleave2(float vector[16], float vector[16]):
        push    rbp
        mov     rbp, rsp
        and     rsp, -64
        sub     rsp, 64
        vmovaps ymm0, ymmword ptr [rbp + 16]
        vmovaps ymm2, ymmword ptr [rbp + 48]
        vmovaps ymm1, ymmword ptr [rbp + 80]
        vmovaps ymm3, ymmword ptr [rbp + 112]
        vunpckhps       ymm4, ymm0, ymm1
        vunpcklps       ymm1, ymm0, ymm1
        vperm2f128      ymm0, ymm1, ymm4, 32
        vperm2f128      ymm1, ymm1, ymm4, 49
        vunpckhps       ymm4, ymm2, ymm3
        vunpcklps       ymm3, ymm2, ymm3
        vperm2f128      ymm2, ymm3, ymm4, 32
        vperm2f128      ymm3, ymm3, ymm4, 49
        mov     rsp, rbp
        pop     rbp
        ret

interleave4(float vector[8], float vector[8], float vector[8], float vector[8]&):
        vmovaps ymm3, ymmword ptr [rdi]
        vshufps xmm4, xmm2, xmm2, 212
        vpermpd ymm4, ymm4, 96
        vshufps xmm5, xmm0, xmm0, 212
        vpermpd ymm5, ymm5, 212
        vblendps        ymm4, ymm5, ymm4, 204
        vshufps xmm5, xmm1, xmm1, 96
        vpermpd ymm5, ymm5, 212
        vshufps xmm6, xmm3, xmm3, 96
        vpermpd ymm6, ymm6, 96
        vblendps        ymm5, ymm5, ymm6, 204
        vblendps        ymm6, ymm4, ymm5, 170
        vshufps xmm4, xmm2, xmm2, 246
        vpermpd ymm4, ymm4, 96
        vshufps xmm5, xmm0, xmm0, 246
        vpermpd ymm5, ymm5, 212
        vblendps        ymm4, ymm5, ymm4, 204
        vshufps xmm5, xmm1, xmm1, 232
        vpermpd ymm5, ymm5, 212
        vshufps xmm7, xmm3, xmm3, 232
        vpermpd ymm7, ymm7, 96
        vblendps        ymm5, ymm5, ymm7, 204
        vblendps        ymm4, ymm4, ymm5, 170
        vshufps ymm5, ymm2, ymm2, 212
        vpermpd ymm5, ymm5, 232
        vshufps ymm7, ymm0, ymm0, 212
        vpermpd ymm7, ymm7, 246
        vblendps        ymm5, ymm7, ymm5, 204
        vshufps ymm7, ymm1, ymm1, 96
        vpermpd ymm7, ymm7, 246
        vshufps ymm8, ymm3, ymm3, 96
        vpermpd ymm8, ymm8, 232
        vblendps        ymm7, ymm7, ymm8, 204
        vblendps        ymm5, ymm5, ymm7, 170
        vshufps ymm2, ymm2, ymm2, 246
        vpermpd ymm2, ymm2, 232
        vshufps ymm0, ymm0, ymm0, 246
        vpermpd ymm0, ymm0, 246
        vblendps        ymm0, ymm0, ymm2, 204
        vshufps ymm1, ymm1, ymm1, 232
        vpermpd ymm1, ymm1, 246
        vshufps ymm2, ymm3, ymm3, 232
        vpermpd ymm2, ymm2, 232
        vblendps        ymm1, ymm1, ymm2, 204
        vblendps        ymm3, ymm0, ymm1, 170
        vmovaps ymm0, ymm6
        vmovaps ymm1, ymm4
        vmovaps ymm2, ymm5
        ret

If you force materialization of the intermediate values, you get the expected code (minus the spill/reload used to force materialization):

auto interleave4_v2(floatx8_vec a, floatx8_vec b, floatx8_vec c, floatx8_vec d) {
    auto ac = interleave2(a, c);
    auto bd = interleave2(b, d);

    decltype(ac) mem[2];
    mem[0] = ac;
    mem[1] = bd;
    asm volatile ("" : "+m"(mem));
    ac = mem[0];
    bd = mem[1];

    return interleave2(ac, bd);
}
interleave4_v2(float vector[8], float vector[8], float vector[8], float vector[8]):
        push    rbp
        mov     rbp, rsp
        and     rsp, -64
        sub     rsp, 192
        vunpckhps       ymm4, ymm0, ymm2
        vunpcklps       ymm0, ymm0, ymm2
        vperm2f128      ymm2, ymm0, ymm4, 32
        vperm2f128      ymm0, ymm0, ymm4, 49
        vunpckhps       ymm4, ymm1, ymm3
        vunpcklps       ymm1, ymm1, ymm3
        vperm2f128      ymm3, ymm1, ymm4, 32
        vperm2f128      ymm1, ymm1, ymm4, 49
        vmovaps ymmword ptr [rsp + 32], ymm0
        vmovaps ymmword ptr [rsp], ymm2
        vmovaps ymmword ptr [rsp + 96], ymm1
        vmovaps ymmword ptr [rsp + 64], ymm3
        vmovaps ymm0, ymmword ptr [rsp]
        vmovaps ymm2, ymmword ptr [rsp + 32]
        vmovaps ymm1, ymmword ptr [rsp + 64]
        vmovaps ymm3, ymmword ptr [rsp + 96]
        vunpckhps       ymm4, ymm0, ymm1
        vunpcklps       ymm1, ymm0, ymm1
        vperm2f128      ymm0, ymm1, ymm4, 32
        vperm2f128      ymm1, ymm1, ymm4, 49
        vunpckhps       ymm4, ymm2, ymm3
        vunpcklps       ymm3, ymm2, ymm3
        vperm2f128      ymm2, ymm3, ymm4, 32
        vperm2f128      ymm3, ymm3, ymm4, 49
        mov     rsp, rbp
        pop     rbp
        ret

If this is hard to fix, I'd love a workaround that doesn't involve memory operations or inline assembly. Tagging @RKSimon as according to git history he has been working on x86 shuffle lowering most recently.

llvmbot commented 1 month ago

@llvm/issue-subscribers-backend-x86

Author: Andrew Adams (abadams)

Four-way interleaves with avx2 aren't generating good code. Consider the following: ``` typedef float floatx8_vec __attribute__((ext_vector_type(8))); typedef float floatx16_vec __attribute__((ext_vector_type(16))); auto interleave2(floatx8_vec a, floatx8_vec b) { return __builtin_shufflevector(a, b, 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15); } auto interleave2(floatx16_vec a, floatx16_vec b) { return __builtin_shufflevector(a, b, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); } auto interleave4(floatx8_vec a, floatx8_vec b, floatx8_vec c, floatx8_vec &d) { return interleave2(interleave2(a, c), interleave2(b, d)); } ``` With -O3 -mavx2 it generates good code for the interleave2 functions, but makes a mess with interleave4. It should generate 16 unpckh/unpckl/vperm2 instructions (18 cycles on skylake according to llvm-mca), but instead it generates 44 vshuf/vperm/blend instructions (32 cycles). See below (copy-pasted from godbolt): ``` interleave2(float vector[8], float vector[8]): vunpckhps ymm2, ymm0, ymm1 vunpcklps ymm1, ymm0, ymm1 vperm2f128 ymm0, ymm1, ymm2, 32 vperm2f128 ymm1, ymm1, ymm2, 49 ret interleave2(float vector[16], float vector[16]): push rbp mov rbp, rsp and rsp, -64 sub rsp, 64 vmovaps ymm0, ymmword ptr [rbp + 16] vmovaps ymm2, ymmword ptr [rbp + 48] vmovaps ymm1, ymmword ptr [rbp + 80] vmovaps ymm3, ymmword ptr [rbp + 112] vunpckhps ymm4, ymm0, ymm1 vunpcklps ymm1, ymm0, ymm1 vperm2f128 ymm0, ymm1, ymm4, 32 vperm2f128 ymm1, ymm1, ymm4, 49 vunpckhps ymm4, ymm2, ymm3 vunpcklps ymm3, ymm2, ymm3 vperm2f128 ymm2, ymm3, ymm4, 32 vperm2f128 ymm3, ymm3, ymm4, 49 mov rsp, rbp pop rbp ret interleave4(float vector[8], float vector[8], float vector[8], float vector[8]&): vmovaps ymm3, ymmword ptr [rdi] vshufps xmm4, xmm2, xmm2, 212 vpermpd ymm4, ymm4, 96 vshufps xmm5, xmm0, xmm0, 212 vpermpd ymm5, ymm5, 212 vblendps ymm4, ymm5, ymm4, 204 vshufps xmm5, xmm1, xmm1, 96 vpermpd ymm5, ymm5, 212 vshufps xmm6, xmm3, xmm3, 96 vpermpd ymm6, ymm6, 96 vblendps ymm5, ymm5, ymm6, 204 vblendps ymm6, ymm4, ymm5, 170 vshufps xmm4, xmm2, xmm2, 246 vpermpd ymm4, ymm4, 96 vshufps xmm5, xmm0, xmm0, 246 vpermpd ymm5, ymm5, 212 vblendps ymm4, ymm5, ymm4, 204 vshufps xmm5, xmm1, xmm1, 232 vpermpd ymm5, ymm5, 212 vshufps xmm7, xmm3, xmm3, 232 vpermpd ymm7, ymm7, 96 vblendps ymm5, ymm5, ymm7, 204 vblendps ymm4, ymm4, ymm5, 170 vshufps ymm5, ymm2, ymm2, 212 vpermpd ymm5, ymm5, 232 vshufps ymm7, ymm0, ymm0, 212 vpermpd ymm7, ymm7, 246 vblendps ymm5, ymm7, ymm5, 204 vshufps ymm7, ymm1, ymm1, 96 vpermpd ymm7, ymm7, 246 vshufps ymm8, ymm3, ymm3, 96 vpermpd ymm8, ymm8, 232 vblendps ymm7, ymm7, ymm8, 204 vblendps ymm5, ymm5, ymm7, 170 vshufps ymm2, ymm2, ymm2, 246 vpermpd ymm2, ymm2, 232 vshufps ymm0, ymm0, ymm0, 246 vpermpd ymm0, ymm0, 246 vblendps ymm0, ymm0, ymm2, 204 vshufps ymm1, ymm1, ymm1, 232 vpermpd ymm1, ymm1, 246 vshufps ymm2, ymm3, ymm3, 232 vpermpd ymm2, ymm2, 232 vblendps ymm1, ymm1, ymm2, 204 vblendps ymm3, ymm0, ymm1, 170 vmovaps ymm0, ymm6 vmovaps ymm1, ymm4 vmovaps ymm2, ymm5 ret ``` If you force materialization of the intermediate values, you get the expected code (minus the spill/reload used to force materialization): ``` auto interleave4_v2(floatx8_vec a, floatx8_vec b, floatx8_vec c, floatx8_vec d) { auto ac = interleave2(a, c); auto bd = interleave2(b, d); decltype(ac) mem[2]; mem[0] = ac; mem[1] = bd; asm volatile ("" : "+m"(mem)); ac = mem[0]; bd = mem[1]; return interleave2(ac, bd); } ``` ``` interleave4_v2(float vector[8], float vector[8], float vector[8], float vector[8]): push rbp mov rbp, rsp and rsp, -64 sub rsp, 192 vunpckhps ymm4, ymm0, ymm2 vunpcklps ymm0, ymm0, ymm2 vperm2f128 ymm2, ymm0, ymm4, 32 vperm2f128 ymm0, ymm0, ymm4, 49 vunpckhps ymm4, ymm1, ymm3 vunpcklps ymm1, ymm1, ymm3 vperm2f128 ymm3, ymm1, ymm4, 32 vperm2f128 ymm1, ymm1, ymm4, 49 vmovaps ymmword ptr [rsp + 32], ymm0 vmovaps ymmword ptr [rsp], ymm2 vmovaps ymmword ptr [rsp + 96], ymm1 vmovaps ymmword ptr [rsp + 64], ymm3 vmovaps ymm0, ymmword ptr [rsp] vmovaps ymm2, ymmword ptr [rsp + 32] vmovaps ymm1, ymmword ptr [rsp + 64] vmovaps ymm3, ymmword ptr [rsp + 96] vunpckhps ymm4, ymm0, ymm1 vunpcklps ymm1, ymm0, ymm1 vperm2f128 ymm0, ymm1, ymm4, 32 vperm2f128 ymm1, ymm1, ymm4, 49 vunpckhps ymm4, ymm2, ymm3 vunpcklps ymm3, ymm2, ymm3 vperm2f128 ymm2, ymm3, ymm4, 32 vperm2f128 ymm3, ymm3, ymm4, 49 mov rsp, rbp pop rbp ret ``` If this is hard to fix, I'd love a workaround that doesn't involve memory operations or inline assembly. Tagging @RKSimon as according to git history he has been working on x86 shuffle lowering most recently.
abadams commented 1 month ago

Actually there's a 12-instruction version. You don't need to bother with the first 4 vperm2f128s, because you can account for it in the final 4 vperm2f128s. When you write this directly, llvm does the right thing, so I have my workaround. But ideally other ways of writing a four-way interleave should generate something similar.

#include <cstring>

typedef float floatx8_vec __attribute__((ext_vector_type(8)));

auto unpckl(floatx8_vec a, floatx8_vec b) {
    return __builtin_shufflevector(a, b, 0, 8, 1, 9, 4, 12, 5, 13);
}

auto unpckh(floatx8_vec a, floatx8_vec b) {
    return __builtin_shufflevector(a, b, 2, 10, 3, 11, 6, 14, 7, 15);
}

void interleave4_v3(float *a, float *b, float *c, float *d, float *out) {
    floatx8_vec va, vb, vc, vd;
    std::memcpy(&va, a, sizeof(va));
    std::memcpy(&vb, b, sizeof(vb));
    std::memcpy(&vc, c, sizeof(vc));
    std::memcpy(&vd, d, sizeof(vd));

    auto ac_lo = unpckl(va, vc);
    auto ac_hi = unpckh(va, vc);
    auto bd_lo = unpckl(vb, vd);
    auto bd_hi = unpckh(vb, vd);

    auto abcd_lo_lo = unpckl(ac_lo, bd_lo);
    auto abcd_lo_hi = unpckh(ac_lo, bd_lo);
    auto abcd_hi_lo = unpckl(ac_hi, bd_hi);
    auto abcd_hi_hi = unpckh(ac_hi, bd_hi);

    auto out0 = __builtin_shufflevector(abcd_lo_lo, abcd_lo_hi, 0, 1, 2, 3, 8, 9, 10, 11);
    auto out1 = __builtin_shufflevector(abcd_hi_lo, abcd_hi_hi, 0, 1, 2, 3, 8, 9, 10, 11);
    auto out2 = __builtin_shufflevector(abcd_lo_lo, abcd_lo_hi, 4, 5, 6, 7, 12, 13, 14, 15);
    auto out3 = __builtin_shufflevector(abcd_hi_lo, abcd_hi_hi, 4, 5, 6, 7, 12, 13, 14, 15);

    std::memcpy(out, &out0, sizeof(out0));
    std::memcpy(out + 8, &out1, sizeof(out1));
    std::memcpy(out + 16, &out2, sizeof(out2));
    std::memcpy(out + 24, &out3, sizeof(out3));
}

interleave4_v3(float*, float*, float*, float*, float*):
        vmovups ymm0, ymmword ptr [rdi]
        vmovups ymm1, ymmword ptr [rsi]
        vmovups ymm2, ymmword ptr [rdx]
        vmovups ymm3, ymmword ptr [rcx]
        vunpcklps       ymm4, ymm0, ymm2
        vunpckhps       ymm0, ymm0, ymm2
        vunpcklps       ymm2, ymm1, ymm3
        vunpckhps       ymm1, ymm1, ymm3
        vunpcklps       ymm3, ymm4, ymm2
        vunpckhps       ymm2, ymm4, ymm2
        vunpcklps       ymm4, ymm0, ymm1
        vunpckhps       ymm0, ymm0, ymm1
        vinsertf128     ymm1, ymm3, xmm2, 1
        vinsertf128     ymm5, ymm4, xmm0, 1
        vperm2f128      ymm2, ymm3, ymm2, 49
        vperm2f128      ymm0, ymm4, ymm0, 49
        vmovups ymmword ptr [r8], ymm1
        vmovups ymmword ptr [r8 + 32], ymm5
        vmovups ymmword ptr [r8 + 64], ymm2
        vmovups ymmword ptr [r8 + 96], ymm0
        vzeroupper
        ret