llvm / llvm-project

The LLVM Project is a collection of modular and reusable compiler and toolchain technologies.
http://llvm.org
Other
29.02k stars 11.96k forks source link

Spurious optimization on Zen 4 from valignq + vpalignr to vmovdqa64+vpermt2w #79799

Open Validark opened 9 months ago

Validark commented 9 months ago

Godbolt link

const std = @import("std");
const builtin = @import("builtin");

const chunk_len = if (std.mem.eql(u8, builtin.cpu.model.name, "znver4")) 64 else 32;
const Chunk = @Vector(chunk_len, u8);

fn prev(comptime N: comptime_int, a: Chunk, b: Chunk) Chunk {
    return std.simd.mergeShift(b, a, chunk_len - N);
}

export fn prev1(a: Chunk, b: Chunk) Chunk {
    return prev(1, a, b);
}

export fn prev2(a: Chunk, b: Chunk) Chunk {
    return prev(2, a, b);
}

export fn prev3(a: Chunk, b: Chunk) Chunk {
    return prev(3, a, b);
}

emit:

prev1:
        valignq zmm1, zmm0, zmm1, 6
        vpalignr        zmm0, zmm0, zmm1, 15
        ret

.LCPI1_0:
        .short  63
        .short  0
        .short  1
        .short  2
        .short  3
        .short  4
        .short  5
        .short  6
        .short  7
        .short  8
        .short  9
        .short  10
        .short  11
        .short  12
        .short  13
        .short  14
        .short  15
        .short  16
        .short  17
        .short  18
        .short  19
        .short  20
        .short  21
        .short  22
        .short  23
        .short  24
        .short  25
        .short  26
        .short  27
        .short  28
        .short  29
        .short  30
prev2:
        vmovdqa64       zmm2, zmmword ptr [rip + .LCPI1_0]
        vpermt2w        zmm0, zmm2, zmm1
        ret

prev3:
        valignq zmm1, zmm0, zmm1, 6
        vpalignr        zmm0, zmm0, zmm1, 13
        ret

Is this a real optimization or a mistake? The compiler also does not seem to revert back to valignq+vpalignr, even for size-optimized builds. It could be emitted like so in ReleaseSmall:

prev2: 
        valignq zmm1, zmm0, zmm1, 6
        vpalignr        zmm0, zmm0, zmm1, 14
        ret

According to uops.info, VALIGNQ (ZMM, ZMM, ZMM, I8) has a latency of 4 and VPALIGNR (ZMM, ZMM, ZMM, I8) has a latency of 2. Compare that to VMOVDQA64 (ZMM, M512) with a latency of [≤9;≤11] and VPERMT2W (ZMM, ZMM, ZMM) with a latency of 5;6. Is the compiler banking on the idea that maybe the CPU will hoist the load?

llvmbot commented 9 months ago

@llvm/issue-subscribers-backend-x86

Author: Niles Salter (Validark)

[Godbolt link](https://zig.godbolt.org/z/vx95jhzro) ```zig const std = @import("std"); const builtin = @import("builtin"); const chunk_len = if (std.mem.eql(u8, builtin.cpu.model.name, "znver4")) 64 else 32; const Chunk = @Vector(chunk_len, u8); fn prev(comptime N: comptime_int, a: Chunk, b: Chunk) Chunk { return std.simd.mergeShift(b, a, chunk_len - N); } export fn prev1(a: Chunk, b: Chunk) Chunk { return prev(1, a, b); } export fn prev2(a: Chunk, b: Chunk) Chunk { return prev(2, a, b); } export fn prev3(a: Chunk, b: Chunk) Chunk { return prev(3, a, b); } ``` emit: ```asm prev1: valignq zmm1, zmm0, zmm1, 6 vpalignr zmm0, zmm0, zmm1, 15 ret .LCPI1_0: .short 63 .short 0 .short 1 .short 2 .short 3 .short 4 .short 5 .short 6 .short 7 .short 8 .short 9 .short 10 .short 11 .short 12 .short 13 .short 14 .short 15 .short 16 .short 17 .short 18 .short 19 .short 20 .short 21 .short 22 .short 23 .short 24 .short 25 .short 26 .short 27 .short 28 .short 29 .short 30 prev2: vmovdqa64 zmm2, zmmword ptr [rip + .LCPI1_0] vpermt2w zmm0, zmm2, zmm1 ret prev3: valignq zmm1, zmm0, zmm1, 6 vpalignr zmm0, zmm0, zmm1, 13 ret ``` Is this a real optimization or a mistake? The compiler also does not seem to revert back to `valignq+vpalignr`, even for size-optimized builds. It could be emitted like so in ReleaseSmall: ```asm prev2: valignq zmm1, zmm0, zmm1, 6 vpalignr zmm0, zmm0, zmm1, 14 ret ``` According to uops.info, [VALIGNQ (ZMM, ZMM, ZMM, I8)](https://uops.info/html-instr/VALIGNQ_ZMM_ZMM_ZMM_I8.html) has a latency of [4](https://uops.info/html-lat/ZEN4/VALIGNQ_ZMM_ZMM_ZMM_I8-Measurements.html) and [VPALIGNR (ZMM, ZMM, ZMM, I8)](https://uops.info/html-instr/VPALIGNR_ZMM_ZMM_ZMM_I8.html) has a latency of [2](https://uops.info/html-lat/ZEN4/VPALIGNR_ZMM_ZMM_ZMM_I8-Measurements.html). Compare that to [VMOVDQA64 (ZMM, M512)](https://uops.info/html-instr/VMOVDQA64_ZMM_M512.html) with a latency of [[≤9;≤11]](https://uops.info/html-lat/ZEN4/VMOVDQA64_ZMM_M512-Measurements.html) and [VPERMT2W (ZMM, ZMM, ZMM)](https://uops.info/html-instr/VPERMT2W_ZMM_ZMM_ZMM.html) with a latency of [5;6](https://uops.info/html-lat/ZEN4/VPERMT2W_ZMM_ZMM_ZMM-Measurements.html). Is the compiler banking on the idea that maybe the CPU will hoist the load?
RKSimon commented 9 months ago
define dso_local <64 x i8> @prev1(<64 x i8> %0, <64 x i8> %1) local_unnamed_addr {
  %3 = shufflevector <64 x i8> %1, <64 x i8> %0, <64 x i32> <i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126>
  ret <64 x i8> %3
}

define dso_local <64 x i8> @prev2(<64 x i8> %0, <64 x i8> %1) local_unnamed_addr {
  %3 = shufflevector <64 x i8> %1, <64 x i8> %0, <64 x i32> <i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125>
  ret <64 x i8> %3
}

define dso_local <64 x i8> @prev3(<64 x i8> %0, <64 x i8> %1) local_unnamed_addr {
  %3 = shufflevector <64 x i8> %1, <64 x i8> %0, <64 x i32> <i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124>
  ret <64 x i8> %3
}
RKSimon commented 9 months ago

@Validark Interestingly the zig IR output doesn't use optsize (or minsize) function attributes - it doesn't help here but its something I might look at adding support for to decide when to fold multiple shuffles into a variable shuffle mask

Validark commented 1 month ago

It's even worse for 32-byte vectors. VALIGNQ (YMM, YMM, YMM, I8) has a latency of 3 and VPALIGNR (YMM, YMM, YMM, I8) has a latency of 1, whereas VPERMT2W on 32-byte vectors is also 4 cycles, but requires a load from memory and/or an additional table occupying a register.