gpt-neox
83 строки · 3.1 Кб
1/* coding=utf-8
2* Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3*
4* Licensed under the Apache License, Version 2.0 (the "License");
5* you may not use this file except in compliance with the License.
6* You may obtain a copy of the License at
7*
8* http://www.apache.org/licenses/LICENSE-2.0
9*
10* Unless required by applicable law or agreed to in writing, software
11* distributed under the License is distributed on an "AS IS" BASIS,
12* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13* See the License for the specific language governing permissions and
14* limitations under the License.
15*/
16
17#include <cuda_fp16.h>18#include <torch/extension.h>19#include <vector>20
21namespace multihead_attn {22namespace fused_softmax {23namespace scaled_masked_softmax {24
25torch::Tensor fwd_cuda(torch::Tensor const& input, torch::Tensor const& mask, float scale_factor);26
27torch::Tensor bwd_cuda(torch::Tensor const& output_grads,28torch::Tensor const& softmax_results,29float scale_factor);30
31int get_batch_per_block_cuda(int query_seq_len, int key_seq_len, int batches, int attn_heads);32
33torch::Tensor fwd(torch::Tensor const& input, torch::Tensor const& mask, float scale_factor)34{
35AT_ASSERTM(input.dim() == 4, "expected 4D tensor");36AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||37(input.scalar_type() == at::ScalarType::BFloat16),38"Only fp16 and bf16 are supported");39AT_ASSERTM(mask.dim() == 4, "expected 4D tensor");40
41return fwd_cuda(input, mask, scale_factor);42}
43
44torch::Tensor bwd(torch::Tensor const& output_grads,45torch::Tensor const& softmax_results,46float scale_factor)47{
48AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor");49AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor");50
51AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) ||52(output_grads.scalar_type() == at::ScalarType::BFloat16),53"Only fp16 and bf16 are supported");54AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) ||55(softmax_results.scalar_type() == at::ScalarType::BFloat16),56"Only fp16 and bf16 are supported");57
58return bwd_cuda(output_grads, softmax_results, scale_factor);59}
60
61int get_batch_per_block(int query_seq_len, int key_seq_len, int batches, int attn_heads)62{
63return get_batch_per_block_cuda(query_seq_len, key_seq_len, batches, attn_heads);64}
65
66} // end namespace scaled_masked_softmax67} // end namespace fused_softmax68} // end namespace multihead_attn69
70PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)71{
72m.def("forward",73&multihead_attn::fused_softmax::scaled_masked_softmax::fwd,74"Self Multihead Attention scaled, time masked softmax -- Forward.");75
76m.def("backward",77&multihead_attn::fused_softmax::scaled_masked_softmax::bwd,78"Self Multihead Attention scaled, time masked softmax -- Backward.");79
80m.def("get_batch_per_block",81&multihead_attn::fused_softmax::scaled_masked_softmax::get_batch_per_block,82"Return Batch per block size.");83}
84