skypilot
133 строки · 5.8 Кб
1# This code is based on lmsys-org/fastchat. Below is the original copyright:
2#
3# Copyright 2023 FastChat authors
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15"""
16Directly copied the code from https://raw.githubusercontent.com/oobabooga/text-generation-webui/main/modules/llama_attn_hijack.py and made some adjustments
17"""
18
19import logging
20import math
21from typing import Optional, Tuple
22
23import torch
24from torch import nn
25import transformers.models.llama.modeling_llama
26
27try:
28import xformers.ops
29except ImportError:
30logging.error(
31"xformers not found! Please install it before trying to use it.")
32
33
34def replace_llama_attn_with_xformers_attn():
35transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward
36
37
38def xformers_forward(
39self,
40hidden_states: torch.Tensor,
41attention_mask: Optional[torch.Tensor] = None,
42position_ids: Optional[torch.LongTensor] = None,
43past_key_value: Optional[Tuple[torch.Tensor]] = None,
44output_attentions: bool = False,
45use_cache: bool = False,
46) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
47# pylint: disable=duplicate-code
48bsz, q_len, _ = hidden_states.size()
49
50query_states = (self.q_proj(hidden_states).view(bsz, q_len, self.num_heads,
51self.head_dim).transpose(
521, 2))
53key_states = (self.k_proj(hidden_states).view(bsz, q_len, self.num_heads,
54self.head_dim).transpose(
551, 2))
56value_states = (self.v_proj(hidden_states).view(bsz, q_len, self.num_heads,
57self.head_dim).transpose(
581, 2))
59
60kv_seq_len = key_states.shape[-2]
61if past_key_value is not None:
62kv_seq_len += past_key_value[0].shape[-2]
63cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
64(
65query_states,
66key_states,
67) = transformers.models.llama.modeling_llama.apply_rotary_pos_emb(
68query_states, key_states, cos, sin, position_ids)
69# [bsz, nh, t, hd]
70
71if past_key_value is not None:
72# reuse k, v, self_attention
73key_states = torch.cat([past_key_value[0], key_states], dim=2)
74value_states = torch.cat([past_key_value[1], value_states], dim=2)
75
76past_key_value = (key_states, value_states) if use_cache else None
77
78# We only apply xformers optimizations if we don't need to output the whole attention matrix
79if not output_attentions:
80query_states = query_states.transpose(1, 2)
81key_states = key_states.transpose(1, 2)
82value_states = value_states.transpose(1, 2)
83
84# This is a nasty hack. We know attention_mask in transformers is either LowerTriangular or all Zeros.
85# We therefore check if one element in the upper triangular portion is zero. If it is, then the mask is all zeros.
86if attention_mask is None or attention_mask[0, 0, 0, 1] == 0:
87# input and output should be of form (bsz, q_len, num_heads, head_dim)
88attn_output = xformers.ops.memory_efficient_attention(
89query_states, key_states, value_states, attn_bias=None)
90else:
91# input and output should be of form (bsz, q_len, num_heads, head_dim)
92attn_output = xformers.ops.memory_efficient_attention(
93query_states,
94key_states,
95value_states,
96attn_bias=xformers.ops.LowerTriangularMask(),
97)
98attn_weights = None
99else:
100attn_weights = torch.matmul(query_states, key_states.transpose(
1012, 3)) / math.sqrt(self.head_dim)
102
103if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
104raise ValueError(
105f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is"
106f" {attn_weights.size()}")
107
108if attention_mask is not None:
109if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
110raise ValueError(
111f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
112)
113attn_weights = attn_weights + attention_mask
114attn_weights = torch.max(
115attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
116
117# upcast attention to fp32
118attn_weights = nn.functional.softmax(attn_weights,
119dim=-1,
120dtype=torch.float32).to(
121query_states.dtype)
122attn_output = torch.matmul(attn_weights, value_states)
123
124if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
125raise ValueError(
126f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
127f" {attn_output.size()}")
128
129attn_output = attn_output.transpose(1, 2)
130
131attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
132attn_output = self.o_proj(attn_output)
133return attn_output, attn_weights, past_key_value
134