autogen

Форк
0
/
agentchat_guidance.ipynb 
326 строк · 12.0 Кб
1
{
2
 "cells": [
3
  {
4
   "cell_type": "markdown",
5
   "metadata": {},
6
   "source": [
7
    "# Using Guidance with AutoGen\n",
8
    "\n",
9
    "This notebook shows how Guidance can be used to enable structured responses from AutoGen agents. In particular, this notebook focuses on creating agents that always output a valid code block or valid json object.\n"
10
   ]
11
  },
12
  {
13
   "cell_type": "code",
14
   "execution_count": 14,
15
   "metadata": {},
16
   "outputs": [],
17
   "source": [
18
    "import re\n",
19
    "\n",
20
    "from guidance import assistant, gen, models, system, user\n",
21
    "from pydantic import BaseModel\n",
22
    "\n",
23
    "from autogen import Agent, AssistantAgent, UserProxyAgent, config_list_from_json\n",
24
    "\n",
25
    "llm_config = config_list_from_json(\"OAI_CONFIG_LIST\")[0]  # use the first config\n",
26
    "gpt = models.OpenAI(\"gpt-4\", api_key=llm_config.get(\"api_key\"))"
27
   ]
28
  },
29
  {
30
   "cell_type": "markdown",
31
   "metadata": {},
32
   "source": [
33
    "The example below uses guidance to create a `guidance_coder` agent that only responds with valid code blocks."
34
   ]
35
  },
36
  {
37
   "cell_type": "code",
38
   "execution_count": 20,
39
   "metadata": {},
40
   "outputs": [
41
    {
42
     "name": "stdout",
43
     "output_type": "stream",
44
     "text": [
45
      "\u001b[33muser\u001b[0m (to guidance_coder):\n",
46
      "\n",
47
      "Plot and save a chart of nvidia and tsla stock price change YTD.\n",
48
      "\n",
49
      "--------------------------------------------------------------------------------\n",
50
      "\u001b[33mguidance_coder\u001b[0m (to user):\n",
51
      "\n",
52
      "```python\n",
53
      "# filename: stock_price_change.py\n",
54
      "\n",
55
      "import pandas as pd\n",
56
      "import yfinance as yf\n",
57
      "import matplotlib.pyplot as plt\n",
58
      "from datetime import datetime\n",
59
      "\n",
60
      "# Get today's date\n",
61
      "today = datetime.today().strftime('%Y-%m-%d')\n",
62
      "\n",
63
      "# Download stock data\n",
64
      "nvda = yf.download('NVDA', start='2022-01-01', end=today)\n",
65
      "tsla = yf.download('TSLA', start='2022-01-01', end=today)\n",
66
      "\n",
67
      "# Calculate percentage change in closing price\n",
68
      "nvda['Pct Change'] = nvda['Close'].pct_change()\n",
69
      "tsla['Pct Change'] = tsla['Close'].pct_change()\n",
70
      "\n",
71
      "# Plot percentage change\n",
72
      "plt.figure(figsize=(14,7))\n",
73
      "plt.plot(nvda['Pct Change'], label='NVDA')\n",
74
      "plt.plot(tsla['Pct Change'], label='TSLA')\n",
75
      "plt.title('Nvidia and Tesla Stock Price Change YTD')\n",
76
      "plt.xlabel('Date')\n",
77
      "plt.ylabel('Percentage Change')\n",
78
      "plt.legend()\n",
79
      "plt.grid(True)\n",
80
      "\n",
81
      "# Save the plot as a PNG file\n",
82
      "plt.savefig('stock_price_change.png')\n",
83
      "```\n",
84
      "\n",
85
      "--------------------------------------------------------------------------------\n",
86
      "\u001b[31m\n",
87
      ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
88
      "\u001b[31m\n",
89
      ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n"
90
     ]
91
    },
92
    {
93
     "name": "stderr",
94
     "output_type": "stream",
95
     "text": [
96
      "execute_code was called without specifying a value for use_docker. Since the python docker package is not available, code will be run natively. Note: this fallback behavior is subject to change\n"
97
     ]
98
    },
99
    {
100
     "name": "stdout",
101
     "output_type": "stream",
102
     "text": [
103
      "\u001b[33muser\u001b[0m (to guidance_coder):\n",
104
      "\n",
105
      "exitcode: 0 (execution succeeded)\n",
106
      "Code output: \n",
107
      "\n",
108
      "[*********************100%%**********************]  1 of 1 completed\n",
109
      "\n",
110
      "[*********************100%%**********************]  1 of 1 completed\n",
111
      "\n",
112
      "\n",
113
      "--------------------------------------------------------------------------------\n",
114
      "\u001b[33mguidance_coder\u001b[0m (to user):\n",
115
      "\n",
116
      "Great! The code executed successfully and the chart of Nvidia and Tesla stock price change Year-To-Date (YTD) has been saved as 'stock_price_change.png' in the current directory. You can open this file to view the chart.\n",
117
      "\n",
118
      "TERMINATE\n",
119
      "\n",
120
      "--------------------------------------------------------------------------------\n"
121
     ]
122
    }
123
   ],
124
   "source": [
125
    "def is_valid_code_block(code):\n",
126
    "    pattern = r\"```[\\w\\s]*\\n([\\s\\S]*?)\\n```\"\n",
127
    "    match = re.search(pattern, code)\n",
128
    "    if match:\n",
129
    "        return True\n",
130
    "    else:\n",
131
    "        return False\n",
132
    "\n",
133
    "\n",
134
    "def generate_structured_response(recipient, messages, sender, config):\n",
135
    "    gpt = models.OpenAI(\"gpt-4\", api_key=llm_config.get(\"api_key\"), echo=False)\n",
136
    "\n",
137
    "    # populate the recipient with the messages from the history\n",
138
    "    with system():\n",
139
    "        lm = gpt + recipient.system_message\n",
140
    "\n",
141
    "    for message in messages:\n",
142
    "        if message.get(\"role\") == \"user\":\n",
143
    "            with user():\n",
144
    "                lm += message.get(\"content\")\n",
145
    "        else:\n",
146
    "            with assistant():\n",
147
    "                lm += message.get(\"content\")\n",
148
    "\n",
149
    "    # generate a new response and store it\n",
150
    "    with assistant():\n",
151
    "        lm += gen(name=\"initial_response\")\n",
152
    "    # ask the agent to reflect on the nature of the response and store it\n",
153
    "    with user():\n",
154
    "        lm += \"Does the very last response from you contain code? Respond with yes or no.\"\n",
155
    "    with assistant():\n",
156
    "        lm += gen(name=\"contains_code\")\n",
157
    "    # if the response contains code, ask the agent to generate a proper code block\n",
158
    "    if \"yes\" in lm[\"contains_code\"].lower():\n",
159
    "        with user():\n",
160
    "            lm += \"Respond with a single blocks containing the valid code. Valid code blocks start with ```\"\n",
161
    "        with assistant():\n",
162
    "            lm += \"```\" + gen(name=\"code\")\n",
163
    "            response = \"```\" + lm[\"code\"]\n",
164
    "\n",
165
    "            is_valid = is_valid_code_block(response)\n",
166
    "            if not is_valid:\n",
167
    "                raise ValueError(f\"Failed to generate a valid code block\\n {response}\")\n",
168
    "\n",
169
    "    # otherwise, just use the initial response\n",
170
    "    else:\n",
171
    "        response = lm[\"initial_response\"]\n",
172
    "\n",
173
    "    return True, response\n",
174
    "\n",
175
    "\n",
176
    "guidance_agent = AssistantAgent(\"guidance_coder\", llm_config=llm_config)\n",
177
    "guidance_agent.register_reply(Agent, generate_structured_response, 1)\n",
178
    "user_proxy = UserProxyAgent(\n",
179
    "    \"user\",\n",
180
    "    human_input_mode=\"TERMINATE\",\n",
181
    "    code_execution_config={\n",
182
    "        \"work_dir\": \"coding\",\n",
183
    "        \"use_docker\": False,\n",
184
    "    },  # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n",
185
    "    is_termination_msg=lambda msg: \"TERMINATE\" in msg.get(\"content\"),\n",
186
    ")\n",
187
    "user_proxy.initiate_chat(guidance_agent, message=\"Plot and save a chart of nvidia and tsla stock price change YTD.\")"
188
   ]
189
  },
190
  {
191
   "cell_type": "markdown",
192
   "metadata": {},
193
   "source": [
194
    "The example below uses Guidance to enable a `guidance_labeler` agent that only responds with a valid JSON that labels a given comment/joke."
195
   ]
196
  },
197
  {
198
   "cell_type": "code",
199
   "execution_count": 16,
200
   "metadata": {},
201
   "outputs": [
202
    {
203
     "name": "stdout",
204
     "output_type": "stream",
205
     "text": [
206
      "\u001b[33muser\u001b[0m (to guidance_labeler):\n",
207
      "\n",
208
      "\n",
209
      "Label the TEXT via the following instructions:\n",
210
      "                         \n",
211
      "The label must be a JSON of the format:\n",
212
      "{\n",
213
      "    \"label\": str,\n",
214
      "    \"explanation\": str\n",
215
      "}\n",
216
      "                         \n",
217
      "TEXT: what did the fish say when it bumped into a wall? Dam!\n",
218
      "\n",
219
      "\n",
220
      "\n",
221
      "--------------------------------------------------------------------------------\n",
222
      "\u001b[33mguidance_labeler\u001b[0m (to user):\n",
223
      "\n",
224
      "{\"label\":\"Joke\",\"explanation\":\"The text is a joke, using a play on words where the fish says 'Dam!' after bumping into a wall, which is a pun on the word 'damn' and a 'dam' which is a barrier that stops or restricts the flow of water, often creating a reservoir, and is something a fish might encounter.\"}\n",
225
      "\n",
226
      "--------------------------------------------------------------------------------\n"
227
     ]
228
    }
229
   ],
230
   "source": [
231
    "class Response(BaseModel):\n",
232
    "    label: str\n",
233
    "    explanation: str\n",
234
    "\n",
235
    "\n",
236
    "response_prompt_instructions = \"\"\"The label must be a JSON of the format:\n",
237
    "{\n",
238
    "    \"label\": str,\n",
239
    "    \"explanation\": str\n",
240
    "}\"\"\"\n",
241
    "\n",
242
    "\n",
243
    "def generate_structured_response(recipient, messages, sender, config):\n",
244
    "    gpt = models.OpenAI(\"gpt-4\", api_key=llm_config.get(\"api_key\"), echo=False)\n",
245
    "\n",
246
    "    # populate the recipient with the messages from the history\n",
247
    "    with system():\n",
248
    "        lm = gpt + recipient.system_message\n",
249
    "\n",
250
    "    for message in messages:\n",
251
    "        if message.get(\"role\") == \"user\":\n",
252
    "            with user():\n",
253
    "                lm += message.get(\"content\")\n",
254
    "        else:\n",
255
    "            with assistant():\n",
256
    "                lm += message.get(\"content\")\n",
257
    "\n",
258
    "    # generate a new response and store it\n",
259
    "    with assistant():\n",
260
    "        lm += gen(name=\"initial_response\")\n",
261
    "    # ask the agent to reflect on the nature of the response and store it\n",
262
    "    with user():\n",
263
    "        lm += \"Does the very last response from you contain JSON object? Respond with yes or no.\"\n",
264
    "    with assistant():\n",
265
    "        lm += gen(name=\"contains_json\")\n",
266
    "    # if the response contains code, ask the agent to generate a proper code block\n",
267
    "    if \"yes\" in lm[\"contains_json\"].lower():\n",
268
    "        with user():\n",
269
    "            lm += (\n",
270
    "                \"What was that JSON object? Only respond with that valid JSON string. A valid JSON string starts with {\"\n",
271
    "            )\n",
272
    "        with assistant():\n",
273
    "            lm += \"{\" + gen(name=\"json\")\n",
274
    "            response = \"{\" + lm[\"json\"]\n",
275
    "            # verify that the response is valid json\n",
276
    "            try:\n",
277
    "                response_obj = Response.model_validate_json(response)\n",
278
    "                response = response_obj.model_dump_json()\n",
279
    "            except Exception as e:\n",
280
    "                response = str(e)\n",
281
    "    # otherwise, just use the initial response\n",
282
    "    else:\n",
283
    "        response = lm[\"initial_response\"]\n",
284
    "\n",
285
    "    return True, response\n",
286
    "\n",
287
    "\n",
288
    "guidance_agent = AssistantAgent(\"guidance_labeler\", llm_config=llm_config, system_message=\"You are a helpful assistant\")\n",
289
    "guidance_agent.register_reply(Agent, generate_structured_response, 1)\n",
290
    "user_proxy = UserProxyAgent(\"user\", human_input_mode=\"ALWAYS\", code_execution_config=False)\n",
291
    "user_proxy.initiate_chat(\n",
292
    "    guidance_agent,\n",
293
    "    message=f\"\"\"\n",
294
    "Label the TEXT via the following instructions:\n",
295
    "\n",
296
    "{response_prompt_instructions}\n",
297
    "\n",
298
    "TEXT: what did the fish say when it bumped into a wall? Dam!\n",
299
    "\n",
300
    "\"\"\",\n",
301
    ")"
302
   ]
303
  }
304
 ],
305
 "metadata": {
306
  "kernelspec": {
307
   "display_name": "Python 3",
308
   "language": "python",
309
   "name": "python3"
310
  },
311
  "language_info": {
312
   "codemirror_mode": {
313
    "name": "ipython",
314
    "version": 3
315
   },
316
   "file_extension": ".py",
317
   "mimetype": "text/x-python",
318
   "name": "python",
319
   "nbconvert_exporter": "python",
320
   "pygments_lexer": "ipython3",
321
   "version": "3.10.12"
322
  }
323
 },
324
 "nbformat": 4,
325
 "nbformat_minor": 2
326
}
327

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.