llama-index
150 строк · 4.9 Кб
1"""Adapter utils."""
2
3from pathlib import Path4from typing import Any, Callable, Dict, List, Optional, Type5
6import torch7import transformers8from sentence_transformers.util import cos_sim9from torch import Tensor, nn10from torch.optim import Optimizer11from tqdm.autonotebook import trange12
13from llama_index.legacy.embeddings.adapter_utils import BaseAdapter14from llama_index.legacy.utils import print_text15
16
17class MyMultipleNegativesRankingLoss(nn.Module):18"""Multiple negatives ranking loss.19
20This loss is similar to the one in sentence_transformers,
21but optimized for our own embeddings.
22
23"""
24
25def __init__(26self,27model: BaseAdapter,28scale: float = 20.0,29similarity_fct: Optional[Callable] = None,30):31"""Define ranking loss."""32super().__init__()33self.model = model34self.scale = scale35self.similarity_fct = cos_sim if similarity_fct is None else similarity_fct36self.cross_entropy_loss = nn.CrossEntropyLoss()37
38def forward(self, query_embeds: Tensor, context_embeds: Tensor) -> Tensor:39"""Forward pass."""40# transform context embeds41# context_embeds_2 = self.model.forward(context_embeds)42query_embeds_2 = self.model.forward(query_embeds)43
44scores = self.similarity_fct(query_embeds_2, context_embeds) * self.scale45labels = torch.tensor(46range(len(scores)), dtype=torch.long, device=scores.device47)48return self.cross_entropy_loss(scores, labels)49
50
51def train_model(52model: BaseAdapter,53data_loader: torch.utils.data.DataLoader,54device: torch.device,55epochs: int = 1,56steps_per_epoch: Optional[int] = None,57warmup_steps: int = 10000,58optimizer_class: Type[Optimizer] = torch.optim.AdamW,59optimizer_params: Dict[str, Any] = {"lr": 2e-5},60output_path: str = "model_output",61max_grad_norm: float = 1,62show_progress_bar: bool = True,63verbose: bool = False,64# callback: Callable[[float, int, int], None] = None,65# scheduler: str = "WarmupLinear",66# weight_decay: float = 0.01,67# evaluation_steps: int = 0,68# save_best_model: bool = True,69# use_amp: bool = False, # disable this option for now70checkpoint_path: Optional[str] = None,71checkpoint_save_steps: int = 500,72# checkpoint_save_total_limit: int = 0,73) -> None:74"""Train model."""75model.to(device)76# TODO: hardcode loss now, make customizable later77loss_model = MyMultipleNegativesRankingLoss(model=model)78loss_model.to(device)79
80# prepare optimizer/scheduler81param_optimizer = list(model.named_parameters())82optimizer_grouped_parameters: List[Dict[str, Any]] = [83{84"params": [p for n, p in param_optimizer],85},86]87optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)88if steps_per_epoch is None or steps_per_epoch == 0:89steps_per_epoch = len(data_loader)90num_train_steps = int(steps_per_epoch * epochs)91scheduler_obj = transformers.get_linear_schedule_with_warmup(92optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_steps93)94
95if verbose:96print_text("> Prepared optimizer, scheduler, and loss model.\n", color="blue")97
98global_step = 099data_iterator = iter(data_loader)100
101# if checkpoint_path is specified, create if doesn't exist102if checkpoint_path is not None:103Path(checkpoint_path).mkdir(parents=True, exist_ok=True)104
105for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):106training_steps = 0107loss_model.zero_grad()108loss_model.train()109for _ in trange(110steps_per_epoch,111desc="Iteration",112smoothing=0.05,113disable=not show_progress_bar,114):115try:116data = next(data_iterator)117except StopIteration:118data_iterator = iter(data_loader)119data = next(data_iterator)120
121query, context = data122context = context.to(device)123query = query.to(device)124
125loss_value = loss_model(query, context)126if verbose:127print_text(128f"> [Epoch {epoch}] Current loss: {loss_value}\n", color="blue"129)130loss_value.backward()131torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)132optimizer.step()133
134optimizer.zero_grad()135
136scheduler_obj.step()137
138training_steps += 1139global_step += 1140
141# TODO: skip eval for now142if checkpoint_path is not None and global_step % checkpoint_save_steps == 0:143full_ck_path = Path(checkpoint_path) / f"step_{global_step}"144model.save(str(full_ck_path))145
146if verbose:147print_text(f"> Finished training, saving to {output_path}\n", color="blue")148
149# save model150model.save(output_path)151