cuda-mode / ring-attention

ring-attention experiments
Apache License 2.0
89 stars 10 forks source link

[info] flash attention benchmark #10

Closed Iron-Bound closed 6 months ago

Iron-Bound commented 7 months ago

results from the dual RTX A5000 system

causal=False, headdim=64, batch_size=32, seqlen=512

Flash2 fwd: 84.08 TFLOPs/s, bwd: 52.88 TFLOPs/s, fwd + bwd: 59.15 TFLOPs/s Pytorch fwd: 14.52 TFLOPs/s, bwd: 17.06 TFLOPs/s, fwd + bwd: 16.25 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=64, batch_size=16, seqlen=1024

Flash2 fwd: 81.02 TFLOPs/s, bwd: 62.54 TFLOPs/s, fwd + bwd: 66.90 TFLOPs/s Pytorch fwd: 16.72 TFLOPs/s, bwd: 19.12 TFLOPs/s, fwd + bwd: 18.36 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=64, batch_size=8, seqlen=2048

Flash2 fwd: 81.31 TFLOPs/s, bwd: 70.07 TFLOPs/s, fwd + bwd: 72.95 TFLOPs/s Pytorch fwd: 15.50 TFLOPs/s, bwd: 18.70 TFLOPs/s, fwd + bwd: 17.66 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=64, batch_size=4, seqlen=4096

Flash2 fwd: 81.69 TFLOPs/s, bwd: 74.80 TFLOPs/s, fwd + bwd: 76.64 TFLOPs/s Pytorch fwd: 18.56 TFLOPs/s, bwd: 19.67 TFLOPs/s, fwd + bwd: 19.34 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=64, batch_size=2, seqlen=8192

Flash2 fwd: 81.86 TFLOPs/s, bwd: 77.42 TFLOPs/s, fwd + bwd: 78.64 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=64, batch_size=1, seqlen=16384

Flash2 fwd: 82.60 TFLOPs/s, bwd: 78.50 TFLOPs/s, fwd + bwd: 79.63 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=32, seqlen=512

Flash2 fwd: 82.91 TFLOPs/s, bwd: 49.25 TFLOPs/s, fwd + bwd: 55.71 TFLOPs/s Pytorch fwd: 20.51 TFLOPs/s, bwd: 26.73 TFLOPs/s, fwd + bwd: 24.60 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=16, seqlen=1024

Flash2 fwd: 79.48 TFLOPs/s, bwd: 57.66 TFLOPs/s, fwd + bwd: 62.57 TFLOPs/s Pytorch fwd: 25.90 TFLOPs/s, bwd: 32.16 TFLOPs/s, fwd + bwd: 30.08 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=8, seqlen=2048

Flash2 fwd: 80.54 TFLOPs/s, bwd: 64.37 TFLOPs/s, fwd + bwd: 68.29 TFLOPs/s Pytorch fwd: 26.50 TFLOPs/s, bwd: 33.92 TFLOPs/s, fwd + bwd: 31.41 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=4, seqlen=4096

Flash2 fwd: 82.49 TFLOPs/s, bwd: 68.40 TFLOPs/s, fwd + bwd: 71.91 TFLOPs/s Pytorch fwd: 31.77 TFLOPs/s, bwd: 35.66 TFLOPs/s, fwd + bwd: 34.46 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=2, seqlen=8192

Flash2 fwd: 83.24 TFLOPs/s, bwd: 70.70 TFLOPs/s, fwd + bwd: 73.88 TFLOPs/s Pytorch fwd: 32.55 TFLOPs/s, bwd: 36.49 TFLOPs/s, fwd + bwd: 35.27 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=1, seqlen=16384

Flash2 fwd: 83.51 TFLOPs/s, bwd: 70.94 TFLOPs/s, fwd + bwd: 74.13 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=64, batch_size=32, seqlen=512

Flash2 fwd: 51.81 TFLOPs/s, bwd: 36.22 TFLOPs/s, fwd + bwd: 39.62 TFLOPs/s Pytorch fwd: 5.24 TFLOPs/s, bwd: 8.53 TFLOPs/s, fwd + bwd: 7.23 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=64, batch_size=16, seqlen=1024

Flash2 fwd: 68.11 TFLOPs/s, bwd: 46.40 TFLOPs/s, fwd + bwd: 51.05 TFLOPs/s Pytorch fwd: 5.43 TFLOPs/s, bwd: 9.60 TFLOPs/s, fwd + bwd: 7.87 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=64, batch_size=8, seqlen=2048

Flash2 fwd: 70.29 TFLOPs/s, bwd: 59.55 TFLOPs/s, fwd + bwd: 62.27 TFLOPs/s Pytorch fwd: 5.41 TFLOPs/s, bwd: 9.38 TFLOPs/s, fwd + bwd: 7.76 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=64, batch_size=4, seqlen=4096

Flash2 fwd: 74.57 TFLOPs/s, bwd: 65.41 TFLOPs/s, fwd + bwd: 67.79 TFLOPs/s Pytorch fwd: 5.60 TFLOPs/s, bwd: 9.81 TFLOPs/s, fwd + bwd: 8.08 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=64, batch_size=2, seqlen=8192

Flash2 fwd: 75.38 TFLOPs/s, bwd: 71.20 TFLOPs/s, fwd + bwd: 72.35 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=64, batch_size=1, seqlen=16384

Flash2 fwd: 75.68 TFLOPs/s, bwd: 73.99 TFLOPs/s, fwd + bwd: 74.46 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=32, seqlen=512

Flash2 fwd: 59.04 TFLOPs/s, bwd: 34.96 TFLOPs/s, fwd + bwd: 39.57 TFLOPs/s Pytorch fwd: 7.99 TFLOPs/s, bwd: 13.42 TFLOPs/s, fwd + bwd: 11.24 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=16, seqlen=1024

Flash2 fwd: 67.22 TFLOPs/s, bwd: 45.18 TFLOPs/s, fwd + bwd: 49.85 TFLOPs/s Pytorch fwd: 9.14 TFLOPs/s, bwd: 16.29 TFLOPs/s, fwd + bwd: 13.31 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=8, seqlen=2048

Flash2 fwd: 67.44 TFLOPs/s, bwd: 54.87 TFLOPs/s, fwd + bwd: 57.96 TFLOPs/s Pytorch fwd: 9.58 TFLOPs/s, bwd: 17.10 TFLOPs/s, fwd + bwd: 13.97 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=4, seqlen=4096

Flash2 fwd: 68.65 TFLOPs/s, bwd: 62.42 TFLOPs/s, fwd + bwd: 64.08 TFLOPs/s Pytorch fwd: 10.09 TFLOPs/s, bwd: 18.08 TFLOPs/s, fwd + bwd: 14.75 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=2, seqlen=8192

Flash2 fwd: 68.06 TFLOPs/s, bwd: 67.21 TFLOPs/s, fwd + bwd: 67.45 TFLOPs/s Pytorch fwd: 10.03 TFLOPs/s, bwd: 18.42 TFLOPs/s, fwd + bwd: 14.87 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=1, seqlen=16384

Flash2 fwd: 65.36 TFLOPs/s, bwd: 69.11 TFLOPs/s, fwd + bwd: 68.00 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

Iron-Bound commented 6 months ago

Benchmark results with Triton==2.2.0

causal=False, headdim=64, batch_size=32, seqlen=512

Flash2 fwd: 85.28 TFLOPs/s, bwd: 53.39 TFLOPs/s, fwd + bwd: 59.78 TFLOPs/s Pytorch fwd: 14.53 TFLOPs/s, bwd: 17.08 TFLOPs/s, fwd + bwd: 16.26 TFLOPs/s Triton fwd: 66.27 TFLOPs/s, bwd: 45.67 TFLOPs/s, fwd + bwd: 50.12 TFLOPs/s

causal=False, headdim=64, batch_size=16, seqlen=1024

Flash2 fwd: 81.81 TFLOPs/s, bwd: 63.92 TFLOPs/s, fwd + bwd: 68.18 TFLOPs/s Pytorch fwd: 16.77 TFLOPs/s, bwd: 19.16 TFLOPs/s, fwd + bwd: 18.41 TFLOPs/s Triton fwd: 68.82 TFLOPs/s, bwd: 52.40 TFLOPs/s, fwd + bwd: 56.23 TFLOPs/s

causal=False, headdim=64, batch_size=8, seqlen=2048

Flash2 fwd: 82.21 TFLOPs/s, bwd: 71.46 TFLOPs/s, fwd + bwd: 74.23 TFLOPs/s Pytorch fwd: 15.56 TFLOPs/s, bwd: 18.72 TFLOPs/s, fwd + bwd: 17.69 TFLOPs/s Triton fwd: 73.65 TFLOPs/s, bwd: 55.19 TFLOPs/s, fwd + bwd: 59.44 TFLOPs/s

causal=False, headdim=64, batch_size=4, seqlen=4096

Flash2 fwd: 83.81 TFLOPs/s, bwd: 76.09 TFLOPs/s, fwd + bwd: 78.15 TFLOPs/s Pytorch fwd: 18.59 TFLOPs/s, bwd: 19.61 TFLOPs/s, fwd + bwd: 19.31 TFLOPs/s Triton fwd: 74.10 TFLOPs/s, bwd: 56.55 TFLOPs/s, fwd + bwd: 60.66 TFLOPs/s

causal=False, headdim=64, batch_size=2, seqlen=8192

Flash2 fwd: 84.11 TFLOPs/s, bwd: 78.16 TFLOPs/s, fwd + bwd: 79.78 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 74.33 TFLOPs/s, bwd: 56.87 TFLOPs/s, fwd + bwd: 60.96 TFLOPs/s

causal=False, headdim=64, batch_size=1, seqlen=16384

Flash2 fwd: 83.47 TFLOPs/s, bwd: 78.51 TFLOPs/s, fwd + bwd: 79.86 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 75.31 TFLOPs/s, bwd: 45.66 TFLOPs/s, fwd + bwd: 51.45 TFLOPs/s

causal=False, headdim=128, batch_size=32, seqlen=512

Flash2 fwd: 82.37 TFLOPs/s, bwd: 49.15 TFLOPs/s, fwd + bwd: 55.56 TFLOPs/s Pytorch fwd: 20.32 TFLOPs/s, bwd: 26.43 TFLOPs/s, fwd + bwd: 24.34 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=16, seqlen=1024

Flash2 fwd: 84.91 TFLOPs/s, bwd: 57.65 TFLOPs/s, fwd + bwd: 63.47 TFLOPs/s Pytorch fwd: 26.11 TFLOPs/s, bwd: 32.19 TFLOPs/s, fwd + bwd: 30.18 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=8, seqlen=2048

Flash2 fwd: 81.42 TFLOPs/s, bwd: 64.40 TFLOPs/s, fwd + bwd: 68.49 TFLOPs/s Pytorch fwd: 26.35 TFLOPs/s, bwd: 33.90 TFLOPs/s, fwd + bwd: 31.33 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=4, seqlen=4096

Flash2 fwd: 83.41 TFLOPs/s, bwd: 68.49 TFLOPs/s, fwd + bwd: 72.18 TFLOPs/s Pytorch fwd: 31.66 TFLOPs/s, bwd: 35.50 TFLOPs/s, fwd + bwd: 34.31 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=2, seqlen=8192

Flash2 fwd: 82.60 TFLOPs/s, bwd: 70.66 TFLOPs/s, fwd + bwd: 73.71 TFLOPs/s Pytorch fwd: 32.46 TFLOPs/s, bwd: 36.36 TFLOPs/s, fwd + bwd: 35.15 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=False, headdim=128, batch_size=1, seqlen=16384

Flash2 fwd: 82.61 TFLOPs/s, bwd: 70.71 TFLOPs/s, fwd + bwd: 73.75 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=64, batch_size=32, seqlen=512

Flash2 fwd: 51.72 TFLOPs/s, bwd: 36.13 TFLOPs/s, fwd + bwd: 39.53 TFLOPs/s Pytorch fwd: 5.23 TFLOPs/s, bwd: 8.54 TFLOPs/s, fwd + bwd: 7.23 TFLOPs/s Triton fwd: 47.24 TFLOPs/s, bwd: 37.02 TFLOPs/s, fwd + bwd: 39.46 TFLOPs/s

causal=True, headdim=64, batch_size=16, seqlen=1024

Flash2 fwd: 68.70 TFLOPs/s, bwd: 46.28 TFLOPs/s, fwd + bwd: 51.04 TFLOPs/s Pytorch fwd: 5.41 TFLOPs/s, bwd: 9.55 TFLOPs/s, fwd + bwd: 7.83 TFLOPs/s Triton fwd: 56.34 TFLOPs/s, bwd: 43.70 TFLOPs/s, fwd + bwd: 46.70 TFLOPs/s

causal=True, headdim=64, batch_size=8, seqlen=2048

Flash2 fwd: 69.78 TFLOPs/s, bwd: 57.52 TFLOPs/s, fwd + bwd: 60.56 TFLOPs/s Pytorch fwd: 5.39 TFLOPs/s, bwd: 9.38 TFLOPs/s, fwd + bwd: 7.74 TFLOPs/s Triton fwd: 65.51 TFLOPs/s, bwd: 49.97 TFLOPs/s, fwd + bwd: 53.61 TFLOPs/s

causal=True, headdim=64, batch_size=4, seqlen=4096

Flash2 fwd: 75.11 TFLOPs/s, bwd: 65.57 TFLOPs/s, fwd + bwd: 68.04 TFLOPs/s Pytorch fwd: 5.58 TFLOPs/s, bwd: 9.82 TFLOPs/s, fwd + bwd: 8.07 TFLOPs/s Triton fwd: 69.32 TFLOPs/s, bwd: 52.99 TFLOPs/s, fwd + bwd: 56.81 TFLOPs/s

causal=True, headdim=64, batch_size=2, seqlen=8192

Flash2 fwd: 76.46 TFLOPs/s, bwd: 71.02 TFLOPs/s, fwd + bwd: 72.49 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 70.00 TFLOPs/s, bwd: 54.64 TFLOPs/s, fwd + bwd: 58.30 TFLOPs/s

causal=True, headdim=64, batch_size=1, seqlen=16384

Flash2 fwd: 76.71 TFLOPs/s, bwd: 74.41 TFLOPs/s, fwd + bwd: 75.05 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 71.67 TFLOPs/s, bwd: 37.86 TFLOPs/s, fwd + bwd: 43.76 TFLOPs/s

causal=True, headdim=128, batch_size=32, seqlen=512

Flash2 fwd: 58.99 TFLOPs/s, bwd: 34.81 TFLOPs/s, fwd + bwd: 39.43 TFLOPs/s Pytorch fwd: 8.04 TFLOPs/s, bwd: 13.45 TFLOPs/s, fwd + bwd: 11.28 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=16, seqlen=1024

Flash2 fwd: 67.16 TFLOPs/s, bwd: 44.73 TFLOPs/s, fwd + bwd: 49.45 TFLOPs/s Pytorch fwd: 9.17 TFLOPs/s, bwd: 16.32 TFLOPs/s, fwd + bwd: 13.35 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=8, seqlen=2048

Flash2 fwd: 70.97 TFLOPs/s, bwd: 55.36 TFLOPs/s, fwd + bwd: 59.07 TFLOPs/s Pytorch fwd: 9.57 TFLOPs/s, bwd: 17.10 TFLOPs/s, fwd + bwd: 13.96 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=4, seqlen=4096

Flash2 fwd: 70.27 TFLOPs/s, bwd: 62.74 TFLOPs/s, fwd + bwd: 64.73 TFLOPs/s Pytorch fwd: 10.13 TFLOPs/s, bwd: 18.08 TFLOPs/s, fwd + bwd: 14.77 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=2, seqlen=8192

Flash2 fwd: 67.38 TFLOPs/s, bwd: 66.94 TFLOPs/s, fwd + bwd: 67.07 TFLOPs/s Pytorch fwd: 10.05 TFLOPs/s, bwd: 18.44 TFLOPs/s, fwd + bwd: 14.89 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s

causal=True, headdim=128, batch_size=1, seqlen=16384

Flash2 fwd: 65.73 TFLOPs/s, bwd: 69.45 TFLOPs/s, fwd + bwd: 68.35 TFLOPs/s Pytorch fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s Triton fwd: 0.00 TFLOPs/s, bwd: 0.00 TFLOPs/s, fwd + bwd: 0.00 TFLOPs/s