sft-demos

Форк
0
/
LazyMergekit_HermesBagel_34B_v0.1.ipynb 
326 строк · 10.5 Кб
1
{
2
 "cells": [
3
  {
4
   "cell_type": "markdown",
5
   "metadata": {
6
    "id": "1Wq4SB9A_9ic"
7
   },
8
   "source": [
9
    "# 🥱 LazyMergekit\n",
10
    "\n",
11
    "> 🗣️ [Large Language Model Course](https://github.com/mlabonne/llm-course)\n",
12
    "\n",
13
    "❤️ Created by [@maximelabonne](https://twitter.com/maximelabonne).\n",
14
    "\n",
15
    "This notebook allows you to easily merge multiple models using [mergekit](https://github.com/cg123/mergekit). To evaluate your merges, see [🧐 LLM AutoEval](https://colab.research.google.com/drive/1Igs3WZuXAIv9X0vwqiE90QlEPys8e8Oa?usp=sharing#scrollTo=elyxjYI_rY5W).\n",
16
    "\n",
17
    "*Special thanks to [@cg123](https://github.com/cg123) for this library and [@mrfakename](https://gist.github.com/fakerybakery) who told me about sharding (see his [Gist](https://gist.github.com/fakerybakery/d30a4d31b4f914757c1381166b9c683b)).*"
18
   ]
19
  },
20
  {
21
   "cell_type": "code",
22
   "execution_count": null,
23
   "metadata": {
24
    "id": "LGd7jlfCpNcg"
25
   },
26
   "outputs": [],
27
   "source": [
28
    "MODEL_NAME = \"HermesBagel-34B-v0.1\"\n",
29
    "yaml_config = \"\"\"\n",
30
    "slices:\n",
31
    "  - sources:\n",
32
    "      - model: NousResearch/Nous-Hermes-2-Yi-34B\n",
33
    "        layer_range: [0, 60]\n",
34
    "      - model: jondurbin/bagel-dpo-34b-v0.2\n",
35
    "        layer_range: [0, 60]\n",
36
    "merge_method: slerp\n",
37
    "base_model: NousResearch/Nous-Hermes-2-Yi-34B\n",
38
    "parameters:\n",
39
    "  t:\n",
40
    "    - filter: self_attn\n",
41
    "      value: [0, 0.5, 0.3, 0.7, 1]\n",
42
    "    - filter: mlp\n",
43
    "      value: [1, 0.5, 0.7, 0.3, 0]\n",
44
    "    - value: 0.5\n",
45
    "dtype: bfloat16\n",
46
    "\"\"\""
47
   ]
48
  },
49
  {
50
   "cell_type": "code",
51
   "execution_count": null,
52
   "metadata": {
53
    "cellView": "form",
54
    "colab": {
55
     "base_uri": "https://localhost:8080/"
56
    },
57
    "id": "d5mYzDo1q96y",
58
    "outputId": "5136bcf3-923a-4d40-d60d-12f25eaea3bd"
59
   },
60
   "outputs": [],
61
   "source": [
62
    "# @title ## Run merge\n",
63
    "\n",
64
    "# @markdown ### Runtime type\n",
65
    "# @markdown Select your runtime (CPU, High RAM, GPU)\n",
66
    "\n",
67
    "runtime = \"GPU\"  # @param [\"CPU\", \"CPU + High-RAM\", \"GPU\"]\n",
68
    "\n",
69
    "# @markdown ### Mergekit arguments\n",
70
    "# @markdown Use the `main` branch by default, [`mixtral`](https://github.com/cg123/mergekit/blob/mixtral/moe.md) if you want to create a Mixture of Experts.\n",
71
    "\n",
72
    "branch = \"main\"  # @param [\"main\", \"mixtral\"]\n",
73
    "trust_remote_code = True  # @param {type:\"boolean\"}\n",
74
    "\n",
75
    "# Install mergekit\n",
76
    "if branch == \"main\":\n",
77
    "    !git clone https://github.com/cg123/mergekit.git\n",
78
    "    !cd mergekit && pip install -qqq -e . --progress-bar off\n",
79
    "elif branch == \"mixtral\":\n",
80
    "    !git clone -b mixtral https://github.com/cg123/mergekit.git\n",
81
    "    !cd mergekit && pip install -qqq -e . --progress-bar off\n",
82
    "    !pip install -qqq -U transformers --progress-bar off\n",
83
    "\n",
84
    "# Save config as yaml file\n",
85
    "with open(\"config.yaml\", \"w\", encoding=\"utf-8\") as f:\n",
86
    "    f.write(yaml_config)\n",
87
    "\n",
88
    "# Base CLI\n",
89
    "if branch == \"main\":\n",
90
    "    cli = \"mergekit-yaml config.yaml merge --copy-tokenizer\"\n",
91
    "elif branch == \"mixtral\":\n",
92
    "    cli = \"mergekit-moe config.yaml merge --copy-tokenizer\"\n",
93
    "\n",
94
    "# Additional arguments\n",
95
    "if runtime == \"CPU\":\n",
96
    "    cli += \" --allow-crimes --out-shard-size 1B --lazy-unpickle\"\n",
97
    "elif runtime == \"GPU\":\n",
98
    "    cli += \" --cuda --low-cpu-memory\"\n",
99
    "if trust_remote_code:\n",
100
    "    cli += \" --trust-remote-code\"\n",
101
    "\n",
102
    "print(cli)\n",
103
    "\n",
104
    "# Merge models\n",
105
    "!{cli}"
106
   ]
107
  },
108
  {
109
   "cell_type": "code",
110
   "execution_count": null,
111
   "metadata": {
112
    "id": "ik0V0dF55gfU"
113
   },
114
   "outputs": [],
115
   "source": [
116
    "# @title ## Upload model to Hugging Face { display-mode: \"form\" }\n",
117
    "# @markdown Enter your username the name of Colab secret that stores your [Hugging Face access token](https://huggingface.co/settings/tokens).\n",
118
    "username = \"dfurman\"  # @param {type:\"string\"}\n",
119
    "token = \"HF_TOKEN\"  # @param {type:\"string\"}\n",
120
    "\n",
121
    "!pip install -qU huggingface_hub\n",
122
    "\n",
123
    "import yaml\n",
124
    "\n",
125
    "from huggingface_hub import ModelCard, ModelCardData, HfApi\n",
126
    "from google.colab import userdata\n",
127
    "from jinja2 import Template\n",
128
    "\n",
129
    "if branch == \"main\":\n",
130
    "    template_text = \"\"\"\n",
131
    "---\n",
132
    "license: apache-2.0\n",
133
    "tags:\n",
134
    "- merge\n",
135
    "- mergekit\n",
136
    "- lazymergekit\n",
137
    "{%- for model in models %}\n",
138
    "- {{ model }}\n",
139
    "{%- endfor %}\n",
140
    "---\n",
141
    "\n",
142
    "# {{ model_name }}\n",
143
    "\n",
144
    "{{ model_name }} is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):\n",
145
    "\n",
146
    "{%- for model in models %}\n",
147
    "* [{{ model }}](https://huggingface.co/{{ model }})\n",
148
    "{%- endfor %}\n",
149
    "\n",
150
    "## 🧩 Configuration\n",
151
    "\n",
152
    "```yaml\n",
153
    "{{- yaml_config -}}\n",
154
    "```\n",
155
    "\n",
156
    "## 💻 Usage\n",
157
    "\n",
158
    "```python\n",
159
    "!pip install -qU transformers accelerate\n",
160
    "\n",
161
    "from transformers import AutoTokenizer\n",
162
    "import transformers\n",
163
    "import torch\n",
164
    "\n",
165
    "model = \"{{ username }}/{{ model_name }}\"\n",
166
    "messages = [{\"role\": \"user\", \"content\": \"What is a large language model?\"}]\n",
167
    "\n",
168
    "tokenizer = AutoTokenizer.from_pretrained(model)\n",
169
    "prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
170
    "pipeline = transformers.pipeline(\n",
171
    "    \"text-generation\",\n",
172
    "    model=model,\n",
173
    "    torch_dtype=torch.float16,\n",
174
    "    device_map=\"auto\",\n",
175
    ")\n",
176
    "\n",
177
    "outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)\n",
178
    "print(outputs[0][\"generated_text\"])\n",
179
    "```\n",
180
    "\"\"\"\n",
181
    "\n",
182
    "    # Create a Jinja template object\n",
183
    "    jinja_template = Template(template_text.strip())\n",
184
    "\n",
185
    "    # Get list of models from config\n",
186
    "    data = yaml.safe_load(yaml_config)\n",
187
    "    if \"models\" in data:\n",
188
    "        models = [\n",
189
    "            data[\"models\"][i][\"model\"]\n",
190
    "            for i in range(len(data[\"models\"]))\n",
191
    "            if \"parameters\" in data[\"models\"][i]\n",
192
    "        ]\n",
193
    "    elif \"parameters\" in data:\n",
194
    "        models = [\n",
195
    "            data[\"slices\"][0][\"sources\"][i][\"model\"]\n",
196
    "            for i in range(len(data[\"slices\"][0][\"sources\"]))\n",
197
    "        ]\n",
198
    "    elif \"slices\" in data:\n",
199
    "        models = [\n",
200
    "            data[\"slices\"][i][\"sources\"][0][\"model\"] for i in range(len(data[\"slices\"]))\n",
201
    "        ]\n",
202
    "    else:\n",
203
    "        raise Exception(\"No models or slices found in yaml config\")\n",
204
    "\n",
205
    "    # Fill the template\n",
206
    "    content = jinja_template.render(\n",
207
    "        model_name=MODEL_NAME,\n",
208
    "        models=models,\n",
209
    "        yaml_config=yaml_config,\n",
210
    "        username=username,\n",
211
    "    )\n",
212
    "\n",
213
    "elif branch == \"mixtral\":\n",
214
    "    template_text = \"\"\"\n",
215
    "---\n",
216
    "license: apache-2.0\n",
217
    "tags:\n",
218
    "- moe\n",
219
    "- merge\n",
220
    "- mergekit\n",
221
    "- lazymergekit\n",
222
    "{%- for model in models %}\n",
223
    "- {{ model }}\n",
224
    "{%- endfor %}\n",
225
    "---\n",
226
    "\n",
227
    "# {{ model_name }}\n",
228
    "\n",
229
    "{{ model_name }} is a Mixure of Experts (MoE) made with the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):\n",
230
    "\n",
231
    "{%- for model in models %}\n",
232
    "* [{{ model }}](https://huggingface.co/{{ model }})\n",
233
    "{%- endfor %}\n",
234
    "\n",
235
    "## 🧩 Configuration\n",
236
    "\n",
237
    "```yaml\n",
238
    "{{- yaml_config -}}\n",
239
    "```\n",
240
    "\n",
241
    "## 💻 Usage\n",
242
    "\n",
243
    "```python\n",
244
    "!pip install -qU transformers bitsandbytes accelerate\n",
245
    "\n",
246
    "from transformers import AutoTokenizer\n",
247
    "import transformers\n",
248
    "import torch\n",
249
    "\n",
250
    "model = \"{{ username }}/{{ model_name }}\"\n",
251
    "\n",
252
    "tokenizer = AutoTokenizer.from_pretrained(model)\n",
253
    "pipeline = transformers.pipeline(\n",
254
    "    \"text-generation\",\n",
255
    "    model=model,\n",
256
    "    model_kwargs={\"torch_dtype\": torch.float16, \"load_in_4bit\": True},\n",
257
    ")\n",
258
    "\n",
259
    "messages = [{\"role\": \"user\", \"content\": \"Explain what a Mixture of Experts is in less than 100 words.\"}]\n",
260
    "prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
261
    "outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)\n",
262
    "print(outputs[0][\"generated_text\"])\n",
263
    "```\n",
264
    "\"\"\"\n",
265
    "\n",
266
    "    # Create a Jinja template object\n",
267
    "    jinja_template = Template(template_text.strip())\n",
268
    "\n",
269
    "    # Fill the template\n",
270
    "    data = yaml.safe_load(yaml_config)\n",
271
    "    models = [model[\"source_model\"] for model in data[\"experts\"]]\n",
272
    "\n",
273
    "    content = jinja_template.render(\n",
274
    "        model_name=MODEL_NAME,\n",
275
    "        models=models,\n",
276
    "        yaml_config=yaml_config,\n",
277
    "        username=username,\n",
278
    "    )\n",
279
    "\n",
280
    "# Save the model card\n",
281
    "card = ModelCard(content)\n",
282
    "card.save(\"merge/README.md\")\n",
283
    "\n",
284
    "# Defined in the secrets tab in Google Colab\n",
285
    "api = HfApi(token=userdata.get(token))\n",
286
    "\n",
287
    "# Upload merge folder\n",
288
    "api.create_repo(\n",
289
    "    repo_id=f\"{username}/{MODEL_NAME}\",\n",
290
    "    repo_type=\"model\",\n",
291
    "    exist_ok=True,\n",
292
    ")\n",
293
    "api.upload_folder(\n",
294
    "    repo_id=f\"{username}/{MODEL_NAME}\",\n",
295
    "    folder_path=\"merge\",\n",
296
    ")"
297
   ]
298
  },
299
  {
300
   "cell_type": "code",
301
   "execution_count": null,
302
   "metadata": {
303
    "id": "Z9SrqDnTfr5D"
304
   },
305
   "outputs": [],
306
   "source": []
307
  }
308
 ],
309
 "metadata": {
310
  "accelerator": "GPU",
311
  "colab": {
312
   "gpuType": "A100",
313
   "machine_shape": "hm",
314
   "provenance": []
315
  },
316
  "kernelspec": {
317
   "display_name": "Python 3",
318
   "name": "python3"
319
  },
320
  "language_info": {
321
   "name": "python"
322
  }
323
 },
324
 "nbformat": 4,
325
 "nbformat_minor": 0
326
}
327

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.