llama-index

Форк
0
150 строк · 4.9 Кб
1
"""Adapter utils."""
2

3
from pathlib import Path
4
from typing import Any, Callable, Dict, List, Optional, Type
5

6
import torch
7
import transformers
8
from sentence_transformers.util import cos_sim
9
from torch import Tensor, nn
10
from torch.optim import Optimizer
11
from tqdm.autonotebook import trange
12

13
from llama_index.legacy.embeddings.adapter_utils import BaseAdapter
14
from llama_index.legacy.utils import print_text
15

16

17
class MyMultipleNegativesRankingLoss(nn.Module):
18
    """Multiple negatives ranking loss.
19

20
    This loss is similar to the one in sentence_transformers,
21
    but optimized for our own embeddings.
22

23
    """
24

25
    def __init__(
26
        self,
27
        model: BaseAdapter,
28
        scale: float = 20.0,
29
        similarity_fct: Optional[Callable] = None,
30
    ):
31
        """Define ranking loss."""
32
        super().__init__()
33
        self.model = model
34
        self.scale = scale
35
        self.similarity_fct = cos_sim if similarity_fct is None else similarity_fct
36
        self.cross_entropy_loss = nn.CrossEntropyLoss()
37

38
    def forward(self, query_embeds: Tensor, context_embeds: Tensor) -> Tensor:
39
        """Forward pass."""
40
        # transform context embeds
41
        # context_embeds_2 = self.model.forward(context_embeds)
42
        query_embeds_2 = self.model.forward(query_embeds)
43

44
        scores = self.similarity_fct(query_embeds_2, context_embeds) * self.scale
45
        labels = torch.tensor(
46
            range(len(scores)), dtype=torch.long, device=scores.device
47
        )
48
        return self.cross_entropy_loss(scores, labels)
49

50

51
def train_model(
52
    model: BaseAdapter,
53
    data_loader: torch.utils.data.DataLoader,
54
    device: torch.device,
55
    epochs: int = 1,
56
    steps_per_epoch: Optional[int] = None,
57
    warmup_steps: int = 10000,
58
    optimizer_class: Type[Optimizer] = torch.optim.AdamW,
59
    optimizer_params: Dict[str, Any] = {"lr": 2e-5},
60
    output_path: str = "model_output",
61
    max_grad_norm: float = 1,
62
    show_progress_bar: bool = True,
63
    verbose: bool = False,
64
    # callback: Callable[[float, int, int], None] = None,
65
    # scheduler: str = "WarmupLinear",
66
    # weight_decay: float = 0.01,
67
    # evaluation_steps: int = 0,
68
    # save_best_model: bool = True,
69
    # use_amp: bool = False,  # disable this option for now
70
    checkpoint_path: Optional[str] = None,
71
    checkpoint_save_steps: int = 500,
72
    # checkpoint_save_total_limit: int = 0,
73
) -> None:
74
    """Train model."""
75
    model.to(device)
76
    # TODO: hardcode loss now, make customizable later
77
    loss_model = MyMultipleNegativesRankingLoss(model=model)
78
    loss_model.to(device)
79

80
    # prepare optimizer/scheduler
81
    param_optimizer = list(model.named_parameters())
82
    optimizer_grouped_parameters: List[Dict[str, Any]] = [
83
        {
84
            "params": [p for n, p in param_optimizer],
85
        },
86
    ]
87
    optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
88
    if steps_per_epoch is None or steps_per_epoch == 0:
89
        steps_per_epoch = len(data_loader)
90
    num_train_steps = int(steps_per_epoch * epochs)
91
    scheduler_obj = transformers.get_linear_schedule_with_warmup(
92
        optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_steps
93
    )
94

95
    if verbose:
96
        print_text("> Prepared optimizer, scheduler, and loss model.\n", color="blue")
97

98
    global_step = 0
99
    data_iterator = iter(data_loader)
100

101
    # if checkpoint_path is specified, create if doesn't exist
102
    if checkpoint_path is not None:
103
        Path(checkpoint_path).mkdir(parents=True, exist_ok=True)
104

105
    for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
106
        training_steps = 0
107
        loss_model.zero_grad()
108
        loss_model.train()
109
        for _ in trange(
110
            steps_per_epoch,
111
            desc="Iteration",
112
            smoothing=0.05,
113
            disable=not show_progress_bar,
114
        ):
115
            try:
116
                data = next(data_iterator)
117
            except StopIteration:
118
                data_iterator = iter(data_loader)
119
                data = next(data_iterator)
120

121
            query, context = data
122
            context = context.to(device)
123
            query = query.to(device)
124

125
            loss_value = loss_model(query, context)
126
            if verbose:
127
                print_text(
128
                    f"> [Epoch {epoch}] Current loss: {loss_value}\n", color="blue"
129
                )
130
            loss_value.backward()
131
            torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
132
            optimizer.step()
133

134
            optimizer.zero_grad()
135

136
            scheduler_obj.step()
137

138
            training_steps += 1
139
            global_step += 1
140

141
            # TODO: skip eval for now
142
            if checkpoint_path is not None and global_step % checkpoint_save_steps == 0:
143
                full_ck_path = Path(checkpoint_path) / f"step_{global_step}"
144
                model.save(str(full_ck_path))
145

146
    if verbose:
147
        print_text(f"> Finished training, saving to {output_path}\n", color="blue")
148

149
    # save model
150
    model.save(output_path)
151

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.