4
"cell_type": "markdown",
7
"# Using Guidance with AutoGen\n",
9
"This notebook shows how Guidance can be used to enable structured responses from AutoGen agents. In particular, this notebook focuses on creating agents that always output a valid code block or valid json object.\n"
14
"execution_count": 14,
20
"from guidance import assistant, gen, models, system, user\n",
21
"from pydantic import BaseModel\n",
23
"from autogen import Agent, AssistantAgent, UserProxyAgent, config_list_from_json\n",
25
"llm_config = config_list_from_json(\"OAI_CONFIG_LIST\")[0] # use the first config\n",
26
"gpt = models.OpenAI(\"gpt-4\", api_key=llm_config.get(\"api_key\"))"
30
"cell_type": "markdown",
33
"The example below uses guidance to create a `guidance_coder` agent that only responds with valid code blocks."
38
"execution_count": 20,
43
"output_type": "stream",
45
"\u001b[33muser\u001b[0m (to guidance_coder):\n",
47
"Plot and save a chart of nvidia and tsla stock price change YTD.\n",
49
"--------------------------------------------------------------------------------\n",
50
"\u001b[33mguidance_coder\u001b[0m (to user):\n",
53
"# filename: stock_price_change.py\n",
55
"import pandas as pd\n",
56
"import yfinance as yf\n",
57
"import matplotlib.pyplot as plt\n",
58
"from datetime import datetime\n",
60
"# Get today's date\n",
61
"today = datetime.today().strftime('%Y-%m-%d')\n",
63
"# Download stock data\n",
64
"nvda = yf.download('NVDA', start='2022-01-01', end=today)\n",
65
"tsla = yf.download('TSLA', start='2022-01-01', end=today)\n",
67
"# Calculate percentage change in closing price\n",
68
"nvda['Pct Change'] = nvda['Close'].pct_change()\n",
69
"tsla['Pct Change'] = tsla['Close'].pct_change()\n",
71
"# Plot percentage change\n",
72
"plt.figure(figsize=(14,7))\n",
73
"plt.plot(nvda['Pct Change'], label='NVDA')\n",
74
"plt.plot(tsla['Pct Change'], label='TSLA')\n",
75
"plt.title('Nvidia and Tesla Stock Price Change YTD')\n",
76
"plt.xlabel('Date')\n",
77
"plt.ylabel('Percentage Change')\n",
81
"# Save the plot as a PNG file\n",
82
"plt.savefig('stock_price_change.png')\n",
85
"--------------------------------------------------------------------------------\n",
87
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
89
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n"
94
"output_type": "stream",
96
"execute_code was called without specifying a value for use_docker. Since the python docker package is not available, code will be run natively. Note: this fallback behavior is subject to change\n"
101
"output_type": "stream",
103
"\u001b[33muser\u001b[0m (to guidance_coder):\n",
105
"exitcode: 0 (execution succeeded)\n",
108
"[*********************100%%**********************] 1 of 1 completed\n",
110
"[*********************100%%**********************] 1 of 1 completed\n",
113
"--------------------------------------------------------------------------------\n",
114
"\u001b[33mguidance_coder\u001b[0m (to user):\n",
116
"Great! The code executed successfully and the chart of Nvidia and Tesla stock price change Year-To-Date (YTD) has been saved as 'stock_price_change.png' in the current directory. You can open this file to view the chart.\n",
120
"--------------------------------------------------------------------------------\n"
125
"def is_valid_code_block(code):\n",
126
" pattern = r\"```[\\w\\s]*\\n([\\s\\S]*?)\\n```\"\n",
127
" match = re.search(pattern, code)\n",
134
"def generate_structured_response(recipient, messages, sender, config):\n",
135
" gpt = models.OpenAI(\"gpt-4\", api_key=llm_config.get(\"api_key\"), echo=False)\n",
137
" # populate the recipient with the messages from the history\n",
139
" lm = gpt + recipient.system_message\n",
141
" for message in messages:\n",
142
" if message.get(\"role\") == \"user\":\n",
144
" lm += message.get(\"content\")\n",
146
" with assistant():\n",
147
" lm += message.get(\"content\")\n",
149
" # generate a new response and store it\n",
150
" with assistant():\n",
151
" lm += gen(name=\"initial_response\")\n",
152
" # ask the agent to reflect on the nature of the response and store it\n",
154
" lm += \"Does the very last response from you contain code? Respond with yes or no.\"\n",
155
" with assistant():\n",
156
" lm += gen(name=\"contains_code\")\n",
157
" # if the response contains code, ask the agent to generate a proper code block\n",
158
" if \"yes\" in lm[\"contains_code\"].lower():\n",
160
" lm += \"Respond with a single blocks containing the valid code. Valid code blocks start with ```\"\n",
161
" with assistant():\n",
162
" lm += \"```\" + gen(name=\"code\")\n",
163
" response = \"```\" + lm[\"code\"]\n",
165
" is_valid = is_valid_code_block(response)\n",
166
" if not is_valid:\n",
167
" raise ValueError(f\"Failed to generate a valid code block\\n {response}\")\n",
169
" # otherwise, just use the initial response\n",
171
" response = lm[\"initial_response\"]\n",
173
" return True, response\n",
176
"guidance_agent = AssistantAgent(\"guidance_coder\", llm_config=llm_config)\n",
177
"guidance_agent.register_reply(Agent, generate_structured_response, 1)\n",
178
"user_proxy = UserProxyAgent(\n",
180
" human_input_mode=\"TERMINATE\",\n",
181
" code_execution_config={\n",
182
" \"work_dir\": \"coding\",\n",
183
" \"use_docker\": False,\n",
184
" }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n",
185
" is_termination_msg=lambda msg: \"TERMINATE\" in msg.get(\"content\"),\n",
187
"user_proxy.initiate_chat(guidance_agent, message=\"Plot and save a chart of nvidia and tsla stock price change YTD.\")"
191
"cell_type": "markdown",
194
"The example below uses Guidance to enable a `guidance_labeler` agent that only responds with a valid JSON that labels a given comment/joke."
199
"execution_count": 16,
204
"output_type": "stream",
206
"\u001b[33muser\u001b[0m (to guidance_labeler):\n",
209
"Label the TEXT via the following instructions:\n",
211
"The label must be a JSON of the format:\n",
213
" \"label\": str,\n",
214
" \"explanation\": str\n",
217
"TEXT: what did the fish say when it bumped into a wall? Dam!\n",
221
"--------------------------------------------------------------------------------\n",
222
"\u001b[33mguidance_labeler\u001b[0m (to user):\n",
224
"{\"label\":\"Joke\",\"explanation\":\"The text is a joke, using a play on words where the fish says 'Dam!' after bumping into a wall, which is a pun on the word 'damn' and a 'dam' which is a barrier that stops or restricts the flow of water, often creating a reservoir, and is something a fish might encounter.\"}\n",
226
"--------------------------------------------------------------------------------\n"
231
"class Response(BaseModel):\n",
233
" explanation: str\n",
236
"response_prompt_instructions = \"\"\"The label must be a JSON of the format:\n",
238
" \"label\": str,\n",
239
" \"explanation\": str\n",
243
"def generate_structured_response(recipient, messages, sender, config):\n",
244
" gpt = models.OpenAI(\"gpt-4\", api_key=llm_config.get(\"api_key\"), echo=False)\n",
246
" # populate the recipient with the messages from the history\n",
248
" lm = gpt + recipient.system_message\n",
250
" for message in messages:\n",
251
" if message.get(\"role\") == \"user\":\n",
253
" lm += message.get(\"content\")\n",
255
" with assistant():\n",
256
" lm += message.get(\"content\")\n",
258
" # generate a new response and store it\n",
259
" with assistant():\n",
260
" lm += gen(name=\"initial_response\")\n",
261
" # ask the agent to reflect on the nature of the response and store it\n",
263
" lm += \"Does the very last response from you contain JSON object? Respond with yes or no.\"\n",
264
" with assistant():\n",
265
" lm += gen(name=\"contains_json\")\n",
266
" # if the response contains code, ask the agent to generate a proper code block\n",
267
" if \"yes\" in lm[\"contains_json\"].lower():\n",
270
" \"What was that JSON object? Only respond with that valid JSON string. A valid JSON string starts with {\"\n",
272
" with assistant():\n",
273
" lm += \"{\" + gen(name=\"json\")\n",
274
" response = \"{\" + lm[\"json\"]\n",
275
" # verify that the response is valid json\n",
277
" response_obj = Response.model_validate_json(response)\n",
278
" response = response_obj.model_dump_json()\n",
279
" except Exception as e:\n",
280
" response = str(e)\n",
281
" # otherwise, just use the initial response\n",
283
" response = lm[\"initial_response\"]\n",
285
" return True, response\n",
288
"guidance_agent = AssistantAgent(\"guidance_labeler\", llm_config=llm_config, system_message=\"You are a helpful assistant\")\n",
289
"guidance_agent.register_reply(Agent, generate_structured_response, 1)\n",
290
"user_proxy = UserProxyAgent(\"user\", human_input_mode=\"ALWAYS\", code_execution_config=False)\n",
291
"user_proxy.initiate_chat(\n",
292
" guidance_agent,\n",
293
" message=f\"\"\"\n",
294
"Label the TEXT via the following instructions:\n",
296
"{response_prompt_instructions}\n",
298
"TEXT: what did the fish say when it bumped into a wall? Dam!\n",
307
"display_name": "Python 3",
308
"language": "python",
316
"file_extension": ".py",
317
"mimetype": "text/x-python",
319
"nbconvert_exporter": "python",
320
"pygments_lexer": "ipython3",