skypilot
117 строк · 5.3 Кб
1import logging2from typing import List, Optional, Tuple3
4from einops import rearrange5from flash_attn.bert_padding import pad_input6from flash_attn.bert_padding import unpad_input7# pip3 install "flash-attn>=2.0"
8from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func9import torch10from torch import nn11import transformers12from transformers.models.llama.modeling_llama import apply_rotary_pos_emb13
14
15def forward(16self,17hidden_states: torch.Tensor,18attention_mask: Optional[torch.Tensor] = None,19position_ids: Optional[torch.Tensor] = None,20past_key_value: Optional[Tuple[torch.Tensor]] = None,21output_attentions: bool = False,22use_cache: bool = False,23) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:24"""Input shape: Batch x Time x Channel25
26attention_mask: [bsz, q_len]
27"""
28bsz, q_len, _ = hidden_states.size()29
30query_states = (self.q_proj(hidden_states).view(bsz, q_len, self.num_heads,31self.head_dim).transpose(321, 2))33key_states = (self.k_proj(hidden_states).view(bsz, q_len, self.num_heads,34self.head_dim).transpose(351, 2))36value_states = (self.v_proj(hidden_states).view(bsz, q_len, self.num_heads,37self.head_dim).transpose(381, 2))39# [bsz, q_len, nh, hd]40# [bsz, nh, q_len, hd]41
42kv_seq_len = key_states.shape[-2]43assert past_key_value is None, "past_key_value is not supported"44
45cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)46query_states, key_states = apply_rotary_pos_emb(query_states, key_states,47cos, sin, position_ids)48# [bsz, nh, t, hd]49assert not output_attentions, "output_attentions is not supported"50assert not use_cache, "use_cache is not supported"51
52# Flash attention codes from53# https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attention.py54
55# transform the data into the format required by flash attention56qkv = torch.stack([query_states, key_states, value_states],57dim=2) # [bsz, nh, 3, q_len, hd]58qkv = qkv.transpose(1, 3) # [bsz, q_len, 3, nh, hd]59# We have disabled _prepare_decoder_attention_mask in LlamaModel60# the attention_mask should be the same as the key_padding_mask61key_padding_mask = attention_mask62
63if key_padding_mask is None:64qkv = rearrange(qkv, "b s ... -> (b s) ...")65max_s = q_len66cu_q_lens = torch.arange(0, (bsz + 1) * q_len,67step=q_len,68dtype=torch.int32,69device=qkv.device)70output = flash_attn_varlen_qkvpacked_func(qkv,71cu_q_lens,72max_s,730.0,74softmax_scale=None,75causal=True)76output = rearrange(output, "(b s) ... -> b s ...", b=bsz)77else:78nheads = qkv.shape[-2]79x = rearrange(qkv, "b s three h d -> b s (three h d)")80x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)81x_unpad = rearrange(x_unpad,82"nnz (three h d) -> nnz three h d",83three=3,84h=nheads)85output_unpad = flash_attn_varlen_qkvpacked_func(x_unpad,86cu_q_lens,87max_s,880.0,89softmax_scale=None,90causal=True)91output = rearrange(92pad_input(rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices,93bsz, q_len),94"b s (h d) -> b s h d",95h=nheads,96)97return self.o_proj(rearrange(output, "b s h d -> b s (h d)")), None, None98
99
100# Disable the transformation of the attention mask in LlamaModel as the flash attention
101# requires the attention mask to be the same as the key_padding_mask
102def _prepare_decoder_attention_mask(self, attention_mask, input_shape,103inputs_embeds, past_key_values_length):104# [bsz, seq_len]105return attention_mask106
107
108def replace_llama_attn_with_flash_attn():109cuda_major, cuda_minor = torch.cuda.get_device_capability()110if cuda_major < 8:111logging.warning(112"Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward."113"ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593"114)115transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (116_prepare_decoder_attention_mask)117transformers.models.llama.modeling_llama.LlamaAttention.forward = forward118