colossalai

Форк
0
71 строка · 2.8 Кб
1
import torch
2
import torch.nn.functional as F
3
from coati.models.base import Actor, Critic, RewardModel
4
from coati.models.generation import generate
5
from coati.models.utils import calc_action_log_probs, compute_reward
6
from transformers import PreTrainedTokenizer
7

8
from .base import Experience, ExperienceMaker
9

10

11
class NaiveExperienceMaker(ExperienceMaker):
12
    """
13
    Naive experience maker.
14
    """
15

16
    def __init__(
17
        self,
18
        actor: Actor,
19
        critic: Critic,
20
        reward_model: RewardModel,
21
        initial_model: Actor,
22
        tokenizer: PreTrainedTokenizer,
23
        kl_coef: float = 0.1,
24
    ) -> None:
25
        super().__init__(actor, critic, reward_model, initial_model)
26
        self.tokenizer = tokenizer
27
        self.kl_coef = kl_coef
28

29
    @torch.no_grad()
30
    def make_experience(self, input_ids: torch.Tensor, **generate_kwargs) -> Experience:
31
        self.actor.eval()
32
        self.critic.eval()
33
        self.initial_model.eval()
34
        self.reward_model.eval()
35

36
        # generate sequences
37
        sequences = generate(self.actor, input_ids, self.tokenizer, **generate_kwargs)
38

39
        # calculate auxiliary tensors
40
        attention_mask = None
41
        pad_token_id = self.tokenizer.pad_token_id
42
        if pad_token_id is not None:
43
            attention_mask = sequences.not_equal(pad_token_id).to(dtype=torch.long, device=sequences.device)
44

45
        input_len = input_ids.size(1)
46
        eos_token_id = self.tokenizer.eos_token_id
47
        if eos_token_id is None:
48
            action_mask = torch.ones_like(sequences, dtype=torch.bool)
49
        else:
50
            # left padding may be applied, only mask action
51
            action_mask = (sequences[:, input_len:] == eos_token_id).cumsum(dim=-1) == 0
52
            action_mask = F.pad(action_mask, (1 + input_len, -1), value=True)  # include eos token and input
53
        action_mask[:, :input_len] = False
54
        action_mask = action_mask[:, 1:]
55
        action_mask = action_mask[:, -(sequences.size(1) - input_len) :]
56
        num_actions = action_mask.size(1)
57

58
        actor_output = self.actor(sequences, attention_mask)["logits"]
59
        action_log_probs = calc_action_log_probs(actor_output, sequences, num_actions)
60
        base_model_output = self.initial_model(sequences, attention_mask)["logits"]
61
        base_action_log_probs = calc_action_log_probs(base_model_output, sequences, num_actions)
62
        value = self.critic(sequences, attention_mask)
63
        r = self.reward_model(sequences, attention_mask)
64
        reward = compute_reward(r, self.kl_coef, action_log_probs, base_action_log_probs, action_mask=action_mask)
65

66
        advantage = reward - value
67
        # TODO(ver217): maybe normalize adv
68
        if advantage.ndim == 1:
69
            advantage = advantage.unsqueeze(-1)
70

71
        return Experience(sequences, action_log_probs, value, reward, advantage, attention_mask, action_mask)
72

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.