Dao-AILab / flash-attention

Fast and memory-efficient exact attention
BSD 3-Clause "New" or "Revised" License
13.91k stars 1.29k forks source link

Why split_kv is only enabled on n_heads > n_heads_k #1029

Open Luke20000429 opened 3 months ago

Luke20000429 commented 3 months ago

The precondition of split_kv on varied length forward is seqlenq_ngroups_swapped=True https://github.com/Dao-AILab/flash-attention/blob/6df7e0a02edcee851744168079377a039f6d728d/csrc/flash_attn/flash_api.cpp#L702 However, that condition requires num_heads > num_heads_k, meaning splitkv only works for grouped-head attention. However, when the batch size is small, it is still more efficient to run parallel on kv dimension. Why is this condition required for split_kv? I suppose only max_seqlen_q == 1 is necessary.

Luke20000429 commented 3 months ago

The condition comes from #754 , where the n_heads_k in the benchmark code is 1 < n_heads. Changing the definition of seqlenq_ngroups_swapped to

const int seqlenq_ngroups_swapped = seqlen_q == 1 && num_heads >= num_heads_k && window_size_left < 0 && window_size_right < 0 && p_dropout == 0.f && head_size_og % 8 == 0 && !alibi_slopes_.has_value();

won't affect M-H swap as the ngroups = num_heads/num_heads_k = 1. But split_kv will be enabled for better parallelism.

Luke20000429 commented 3 months ago

Profiling with Nsight: Total time split_forward + combine = 24 us

Screenshot 2024-07-05 at 2 32 10 PM

The original code won't trigger split_kv. No split = 77 us

Screenshot 2024-07-05 at 2 33 53 PM

Significant speedup on small batch size.

Luke20000429 commented 3 months ago

Benchmark adapted from #754 , just make num_heads_k = nheads

from functools import partial
import math
import torch
import torch.nn as nn
import torch.nn.functional as F

from einops import rearrange, repeat

# from flash_attn.utils.benchmark import benchmark_forward, benchmark_backward, benchmark_combined, benchmark_all, benchmark_fwd_bwd, pytorch_profiler
from flash_attn.utils.benchmark import benchmark_forward, benchmark_backward, benchmark_combined, benchmark_all, benchmark_fwd_bwd, pytorch_profiler
from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func, flash_attn_varlen_func
# # from flash_attn.triton.fused_attention import attention as attention
# from flash_attn.flash_attn_triton import flash_attn_qkvpacked_func
# from flash_attn.flash_attn_triton_og import attention as attention_og

# from triton.ops.flash_attention import attention as attention_triton

from flash_attn import flash_attn_qkvpacked_func, flash_attn_kvpacked_func

try:
    from flash_attn.fused_softmax import scaled_upper_triang_masked_softmax
except ImportError:
    scaled_upper_triang_masked_softmax = None

def attention_pytorch(qkv, dropout_p=0.0, causal=True):
    """
    Arguments:
        qkv: (batch_size, seqlen, 3, nheads, head_dim)
        dropout_p: float
    Output:
        output: (batch_size, seqlen, nheads, head_dim)
    """
    batch_size, seqlen, _, nheads, d = qkv.shape
    q, k, v = qkv.unbind(dim=2)
    q = rearrange(q, 'b t h d -> (b h) t d')
    k = rearrange(k, 'b s h d -> (b h) d s')
    softmax_scale = 1.0 / math.sqrt(d)
    # Preallocate attn_weights for `baddbmm`
    scores = torch.empty(batch_size * nheads, seqlen, seqlen, dtype=qkv.dtype, device=qkv.device)
    scores = rearrange(torch.baddbmm(scores, q, k, beta=0, alpha=softmax_scale),
                       '(b h) t s -> b h t s', h=nheads)
    if causal:
        # "triu_tril_cuda_template" not implemented for 'BFloat16'
        # So we have to construct the mask in float
        causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
        # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
        scores = scores + causal_mask.to(dtype=scores.dtype)
    attention = torch.softmax(scores, dim=-1)
    attention_drop = F.dropout(attention, dropout_p)
    output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
    return output.to(dtype=qkv.dtype)

def attention_megatron(qkv):
    """
    Arguments:
        qkv: (batch_size, seqlen, 3, nheads, head_dim)
    Output:
        output: (batch_size, seqlen, nheads, head_dim)
    """
    batch_size, seqlen, _, nheads, d = qkv.shape
    q, k, v = qkv.unbind(dim=2)
    q = rearrange(q, 'b t h d -> (b h) t d')
    k = rearrange(k, 'b s h d -> (b h) d s')
    softmax_scale = 1.0 / math.sqrt(d)
    # Preallocate attn_weights for `baddbmm`
    scores = torch.empty(batch_size * nheads, seqlen, seqlen, dtype=qkv.dtype, device=qkv.device)
    scores = rearrange(torch.baddbmm(scores, q, k, beta=0, alpha=softmax_scale),
                       '(b h) t s -> b h t s', h=nheads)
    attention = scaled_upper_triang_masked_softmax(scores, None, scale=1.0)
    output = torch.einsum('bhts,bshd->bthd', attention, v)
    return output.to(dtype=qkv.dtype)

for seqlen in [2048, 4096, 8192]:
    for batch_size in [1, 16, 128]:
        torch.manual_seed(0)
        repeats = 30
        nheads = 12
        headdim = 128
        dropout_p = 0.0
        causal = True
        dtype = torch.float16
        device = 'cuda'

        max_seqlen_q = 1
        max_seqlen_k = seqlen

        nheads_q = nheads
        nheads_k = nheads

        requires_grad = True

        q = torch.randn(batch_size * max_seqlen_q, nheads_q, headdim, device=device, dtype=dtype,
                        requires_grad=requires_grad)
        k = torch.randn(batch_size * max_seqlen_k, nheads_k, headdim, device=device, dtype=dtype,
                        requires_grad=requires_grad)
        v = torch.randn(batch_size * max_seqlen_k, nheads_k, headdim, device=device, dtype=dtype,
                        requires_grad=requires_grad)
        cu_seqlens_q = torch.arange(0, (batch_size + 1) * max_seqlen_q, step=max_seqlen_q, dtype=torch.int32,
                                device=q.device)
        cu_seqlens_k = torch.arange(0, (batch_size + 1) * max_seqlen_k, step=max_seqlen_k, dtype=torch.int32,
                                device=q.device)

        t, m = benchmark_forward(flash_attn_varlen_func,
            q,
            k,
            v,
            cu_seqlens_q,
            cu_seqlens_k,
            max_seqlen_q,
            max_seqlen_k,
            dropout_p=0.0,
            softmax_scale=None,
            causal=True, repeats=repeats, desc='DecodingFlashAttention', verbose=False)
        print(f"{batch_size=}, {seqlen=}: {(m.mean * 1e3):.3f}us")

# pytorch_profiler(flash_attn_varlen_qkvpacked_func, qkv_unpad,
#                  cu_seqlens, seqlen, dropout_p, causal=causal, backward=True)
# benchmark_forward(flash_attn_qkvpacked_func, qkv, dropout_p, causal=causal, repeats=repeats, desc='Fav2')
# pytorch_profiler(flash_attn_qkvpacked_func, qkv, dropout_p, causal=causal, backward=False)

# for dropout_p in [0.1, 0.0]:
#     for causal in [False, True]:
#         print(f"### {dropout_p = }, {causal = } ###")
#         pytorch_profiler(fav2_qkvpacked_func, qkv, dropout_p, causal=causal, backward=True)

# nheads_k = 2
# q = torch.randn(batch_size, seqlen, nheads, headdim, device=device, dtype=dtype, requires_grad=True)
# kv = torch.randn(batch_size, seqlen, 2, nheads_k, headdim, device=device, dtype=dtype,
#                  requires_grad=True)
# if fav2_kvpacked_func is not None:
#     benchmark_all(fav2_kvpacked_func, q, kv, dropout_p, causal=causal, repeats=repeats, desc='Fav2')
#     pytorch_profiler(fav2_kvpacked_func, q, kv, dropout_p, causal=causal, backward=True)

# dropout_p = 0.0
# causal = False
# benchmark_all(attention_pytorch, qkv, dropout_p, causal=causal,
#               repeats=repeats, desc='PyTorch Attention')

# benchmark_all(flash_attn_qkvpacked_func, qkv, None, causal, repeats=repeats, desc='FlashAttention Triton')
# pytorch_profiler(flash_attn_qkvpacked_func, qkv, None, causal, backward=True)

# q, k, v = [torch.randn(batch_size, nheads, seqlen, headdim, device=device, dtype=dtype,
#                        requires_grad=True) for _ in range(3)]
# benchmark_all(attention_og, q, k, v, 1.0, repeats=repeats, desc='FlashAttention Triton OG')
# # pytorch_profiler(attention, q, k, v, 1.0, backward=True)

# if scaled_upper_triang_masked_softmax is not None:
#     benchmark_all(attention_megatron, qkv, repeats=repeats, desc='Megatron Attention')

# from src.ops.fftconv import fftconv_func

# dim = nheads * headdim
# u = torch.randn(batch_size, dim, seqlen, device=device, dtype=dtype, requires_grad=True)
# k = torch.randn(dim, seqlen, device=device, requires_grad=True)
# D = torch.randn(dim, device=device, requires_grad=True)
# benchmark_all(fftconv_func, u, k, D, repeats=repeats, desc='FFTConv')
# pytorch_profiler(fftconv_func, u, k, D, backward=True)
# pytorch_profiler(torch.fft.rfft, u.float())

flops = 4 * batch_size * seqlen ** 2 * nheads * headdim
ideal_a100_time = flops / 312 / 1e9
print(f"Ideal A100 fwd time: {ideal_a100_time:.3f}ms, bwd time: {ideal_a100_time * 2.5:.3f}ms")
exit(0)

def time_fwd_bwd(func, *args, **kwargs):
    time_f, time_b = benchmark_fwd_bwd(func, *args, **kwargs)
    return time_f[1].mean, time_b[1].mean

bs_seqlen_vals = [(32, 512), (16, 1024), (8, 2048), (4, 4096), (2, 8192), (1, 16384)]
causal_vals = [False, True]
headdim_vals = [64, 128]
dim = 2048
dropout_p = 0.0

time_f = {}
time_b = {}
for causal in causal_vals:
    for headdim in headdim_vals:
        for batch_size, seqlen in bs_seqlen_vals:
            nheads = dim // headdim
            qkv = torch.randn(batch_size, seqlen, 3, nheads, headdim, device=device, dtype=dtype,
                              requires_grad=True)
            cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
                                    device=qkv.device)
            qkv_unpad = rearrange(qkv, 'b s ... -> (b s) ...').detach().requires_grad_(True)
            f, b = time_fwd_bwd(
                flash_attn_varlen_qkvpacked_func, qkv_unpad, cu_seqlens, seqlen, dropout_p,
                causal=causal, repeats=repeats, verbose=False
            )
            time_f[(causal, headdim, batch_size, seqlen), "Flash"] = f
            time_b[(causal, headdim, batch_size, seqlen), "Flash"] = b

            # qkv = qkv.detach().requires_grad_(True)
            # f, b = time_fwd_bwd(
            #     fav2_qkvpacked_func, qkv, dropout_p, causal=causal, repeats=repeats, verbose=False
            # )
            # time_f[(causal, headdim, batch_size, seqlen), "Flash2"] = f
            # time_b[(causal, headdim, batch_size, seqlen), "Flash2"] = b

            # q, k, v = [torch.randn(batch_size, nheads, seqlen, headdim, device=device, dtype=dtype,
            #                        requires_grad=True) for _ in range(3)]
            # # Try both values of sequence_parallel and pick the faster one
            # f, b = time_fwd_bwd(
            #     attention_triton, q, k, v, causal, headdim**(-0.5),
            #     False, repeats=repeats, verbose=False
            # )
            # _, b0 = time_fwd_bwd(
            #     attention_triton, q, k, v, causal, headdim**(-0.5),
            #     True, repeats=repeats, verbose=False
            # )
            # time_f[(causal, headdim, batch_size, seqlen), "Triton"] = f
            # time_b[(causal, headdim, batch_size, seqlen), "Triton"] = min(b, b0)

            if seqlen <= 8 * 1024:
                qkv = qkv.detach().requires_grad_(True)
                f, b = time_fwd_bwd(
                    attention_pytorch, qkv, dropout_p, causal=causal, repeats=repeats, verbose=False
                )
            else:
                f, b = float('nan'), float('nan')
            time_f[(causal, headdim, batch_size, seqlen), "Pytorch"] = f
            time_b[(causal, headdim, batch_size, seqlen), "Pytorch"] = b

            # q, k, v = [torch.randn(batch_size, seqlen, nheads, headdim, device=device, dtype=dtype,
            #                        requires_grad=True) for _ in range(3)]
            # import xformers.ops as xops
            # f, b = time_fwd_bwd(
            #     xops.memory_efficient_attention, q, k, v,
            #     attn_bias=xops.LowerTriangularMask() if causal else None,
            #     op=(xops.fmha.cutlass.FwOp, xops.fmha.cutlass.BwOp)
            # )
            # time_f[(causal, headdim, batch_size, seqlen), "xformers"] = f
            # time_b[(causal, headdim, batch_size, seqlen), "xformers"] = b

import pickle
with open('flash2_attn_time_h100.plk', 'wb') as fp:
    pickle.dump((time_f, time_b), fp, protocol=pickle.HIGHEST_PROTOCOL)