Flowise

Форк
0
1093 строки · 55.6 Кб
1
{
2
    "description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation",
3
    "categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,Conversational Agent,Langchain",
4
    "framework": "Langchain",
5
    "nodes": [
6
        {
7
            "width": 300,
8
            "height": 459,
9
            "id": "getApiChain_0",
10
            "position": {
11
                "x": 1222.6923202234623,
12
                "y": 359.97676456347756
13
            },
14
            "type": "customNode",
15
            "data": {
16
                "id": "getApiChain_0",
17
                "label": "GET API Chain",
18
                "version": 1,
19
                "name": "getApiChain",
20
                "type": "GETApiChain",
21
                "baseClasses": ["GETApiChain", "BaseChain", "BaseLangChain"],
22
                "category": "Chains",
23
                "description": "Chain to run queries against GET API",
24
                "inputParams": [
25
                    {
26
                        "label": "API Documentation",
27
                        "name": "apiDocs",
28
                        "type": "string",
29
                        "description": "Description of how API works. Please refer to more <a target=\"_blank\" href=\"https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/chains/api/open_meteo_docs.py\">examples</a>",
30
                        "rows": 4,
31
                        "id": "getApiChain_0-input-apiDocs-string"
32
                    },
33
                    {
34
                        "label": "Headers",
35
                        "name": "headers",
36
                        "type": "json",
37
                        "additionalParams": true,
38
                        "optional": true,
39
                        "id": "getApiChain_0-input-headers-json"
40
                    },
41
                    {
42
                        "label": "URL Prompt",
43
                        "name": "urlPrompt",
44
                        "type": "string",
45
                        "description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}",
46
                        "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:",
47
                        "rows": 4,
48
                        "additionalParams": true,
49
                        "id": "getApiChain_0-input-urlPrompt-string"
50
                    },
51
                    {
52
                        "label": "Answer Prompt",
53
                        "name": "ansPrompt",
54
                        "type": "string",
55
                        "description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}",
56
                        "default": "Given this {api_response} response for {api_url}. use the given response to answer this {question}",
57
                        "rows": 4,
58
                        "additionalParams": true,
59
                        "id": "getApiChain_0-input-ansPrompt-string"
60
                    }
61
                ],
62
                "inputAnchors": [
63
                    {
64
                        "label": "Language Model",
65
                        "name": "model",
66
                        "type": "BaseLanguageModel",
67
                        "id": "getApiChain_0-input-model-BaseLanguageModel"
68
                    }
69
                ],
70
                "inputs": {
71
                    "model": "{{chatOpenAI_1.data.instance}}",
72
                    "apiDocs": "BASE URL: https://api.open-meteo.com/\n\nAPI Documentation\nThe API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:\n\nParameter\tFormat\tRequired\tDefault\tDescription\nlatitude, longitude\tFloating point\tYes\t\tGeographical WGS84 coordinate of the location\nhourly\tString array\tNo\t\tA list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.\ndaily\tString array\tNo\t\tA list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.\ncurrent_weather\tBool\tNo\tfalse\tInclude current weather conditions in the JSON output.\ntemperature_unit\tString\tNo\tcelsius\tIf fahrenheit is set, all temperature values are converted to Fahrenheit.\nwindspeed_unit\tString\tNo\tkmh\tOther wind speed speed units: ms, mph and kn\nprecipitation_unit\tString\tNo\tmm\tOther precipitation amount units: inch\ntimeformat\tString\tNo\tiso8601\tIf format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.\ntimezone\tString\tNo\tGMT\tIf timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.\npast_days\tInteger (0-2)\tNo\t0\tIf past_days is set, yesterday or the day before yesterday data are also returned.\nstart_date\nend_date\tString (yyyy-mm-dd)\tNo\t\tThe time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).\nmodels\tString array\tNo\tauto\tManually select one or more weather models. Per default, the best suitable weather models will be combined.\n\nHourly Parameter Definition\nThe parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.\n\nVariable\tValid time\tUnit\tDescription\ntemperature_2m\tInstant\t°C (°F)\tAir temperature at 2 meters above ground\nsnowfall\tPreceding hour sum\tcm (inch)\tSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nrain\tPreceding hour sum\tmm (inch)\tRain from large scale weather systems of the preceding hour in millimeter\nshowers\tPreceding hour sum\tmm (inch)\tShowers from convective precipitation in millimeters from the preceding hour\nweathercode\tInstant\tWMO code\tWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.\nsnow_depth\tInstant\tmeters\tSnow depth on the ground\nfreezinglevel_height\tInstant\tmeters\tAltitude above sea level of the 0°C level\nvisibility\tInstant\tmeters\tViewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.",
73
                    "headers": "",
74
                    "urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:",
75
                    "ansPrompt": "Given this {api_response} response for {api_url}. use the given response to answer this {question}"
76
                },
77
                "outputAnchors": [
78
                    {
79
                        "id": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain",
80
                        "name": "getApiChain",
81
                        "label": "GETApiChain",
82
                        "type": "GETApiChain | BaseChain | BaseLangChain"
83
                    }
84
                ],
85
                "outputs": {},
86
                "selected": false
87
            },
88
            "selected": false,
89
            "positionAbsolute": {
90
                "x": 1222.6923202234623,
91
                "y": 359.97676456347756
92
            },
93
            "dragging": false
94
        },
95
        {
96
            "width": 300,
97
            "height": 602,
98
            "id": "chainTool_0",
99
            "position": {
100
                "x": 1600.1485877701232,
101
                "y": 276.38970893436533
102
            },
103
            "type": "customNode",
104
            "data": {
105
                "id": "chainTool_0",
106
                "label": "Chain Tool",
107
                "version": 1,
108
                "name": "chainTool",
109
                "type": "ChainTool",
110
                "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"],
111
                "category": "Tools",
112
                "description": "Use a chain as allowed tool for agent",
113
                "inputParams": [
114
                    {
115
                        "label": "Chain Name",
116
                        "name": "name",
117
                        "type": "string",
118
                        "placeholder": "state-of-union-qa",
119
                        "id": "chainTool_0-input-name-string"
120
                    },
121
                    {
122
                        "label": "Chain Description",
123
                        "name": "description",
124
                        "type": "string",
125
                        "rows": 3,
126
                        "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.",
127
                        "id": "chainTool_0-input-description-string"
128
                    },
129
                    {
130
                        "label": "Return Direct",
131
                        "name": "returnDirect",
132
                        "type": "boolean",
133
                        "optional": true,
134
                        "id": "chainTool_0-input-returnDirect-boolean"
135
                    }
136
                ],
137
                "inputAnchors": [
138
                    {
139
                        "label": "Base Chain",
140
                        "name": "baseChain",
141
                        "type": "BaseChain",
142
                        "id": "chainTool_0-input-baseChain-BaseChain"
143
                    }
144
                ],
145
                "inputs": {
146
                    "name": "weather-qa",
147
                    "description": "useful for when you need to ask question about weather",
148
                    "returnDirect": "",
149
                    "baseChain": "{{getApiChain_0.data.instance}}"
150
                },
151
                "outputAnchors": [
152
                    {
153
                        "id": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
154
                        "name": "chainTool",
155
                        "label": "ChainTool",
156
                        "type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain"
157
                    }
158
                ],
159
                "outputs": {},
160
                "selected": false
161
            },
162
            "selected": false,
163
            "positionAbsolute": {
164
                "x": 1600.1485877701232,
165
                "y": 276.38970893436533
166
            },
167
            "dragging": false
168
        },
169
        {
170
            "width": 300,
171
            "height": 376,
172
            "id": "bufferMemory_0",
173
            "position": {
174
                "x": 1642.0644080121785,
175
                "y": 1715.6131926891728
176
            },
177
            "type": "customNode",
178
            "data": {
179
                "id": "bufferMemory_0",
180
                "label": "Buffer Memory",
181
                "version": 2,
182
                "name": "bufferMemory",
183
                "type": "BufferMemory",
184
                "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"],
185
                "category": "Memory",
186
                "description": "Retrieve chat messages stored in database",
187
                "inputParams": [
188
                    {
189
                        "label": "Session Id",
190
                        "name": "sessionId",
191
                        "type": "string",
192
                        "description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory#ui-and-embedded-chat\">more</a>",
193
                        "default": "",
194
                        "additionalParams": true,
195
                        "optional": true,
196
                        "id": "bufferMemory_0-input-sessionId-string"
197
                    },
198
                    {
199
                        "label": "Memory Key",
200
                        "name": "memoryKey",
201
                        "type": "string",
202
                        "default": "chat_history",
203
                        "additionalParams": true,
204
                        "id": "bufferMemory_0-input-memoryKey-string"
205
                    }
206
                ],
207
                "inputAnchors": [],
208
                "inputs": {
209
                    "sessionId": "",
210
                    "memoryKey": "chat_history"
211
                },
212
                "outputAnchors": [
213
                    {
214
                        "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
215
                        "name": "bufferMemory",
216
                        "label": "BufferMemory",
217
                        "type": "BufferMemory | BaseChatMemory | BaseMemory"
218
                    }
219
                ],
220
                "outputs": {},
221
                "selected": false
222
            },
223
            "selected": false,
224
            "positionAbsolute": {
225
                "x": 1642.0644080121785,
226
                "y": 1715.6131926891728
227
            },
228
            "dragging": false
229
        },
230
        {
231
            "width": 300,
232
            "height": 602,
233
            "id": "chainTool_1",
234
            "position": {
235
                "x": 1284.7746596034926,
236
                "y": 895.1444797047182
237
            },
238
            "type": "customNode",
239
            "data": {
240
                "id": "chainTool_1",
241
                "label": "Chain Tool",
242
                "version": 1,
243
                "name": "chainTool",
244
                "type": "ChainTool",
245
                "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"],
246
                "category": "Tools",
247
                "description": "Use a chain as allowed tool for agent",
248
                "inputParams": [
249
                    {
250
                        "label": "Chain Name",
251
                        "name": "name",
252
                        "type": "string",
253
                        "placeholder": "state-of-union-qa",
254
                        "id": "chainTool_1-input-name-string"
255
                    },
256
                    {
257
                        "label": "Chain Description",
258
                        "name": "description",
259
                        "type": "string",
260
                        "rows": 3,
261
                        "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.",
262
                        "id": "chainTool_1-input-description-string"
263
                    },
264
                    {
265
                        "label": "Return Direct",
266
                        "name": "returnDirect",
267
                        "type": "boolean",
268
                        "optional": true,
269
                        "id": "chainTool_1-input-returnDirect-boolean"
270
                    }
271
                ],
272
                "inputAnchors": [
273
                    {
274
                        "label": "Base Chain",
275
                        "name": "baseChain",
276
                        "type": "BaseChain",
277
                        "id": "chainTool_1-input-baseChain-BaseChain"
278
                    }
279
                ],
280
                "inputs": {
281
                    "name": "discord-bot",
282
                    "description": "useful for when you need to send message to Discord",
283
                    "returnDirect": "",
284
                    "baseChain": "{{postApiChain_0.data.instance}}"
285
                },
286
                "outputAnchors": [
287
                    {
288
                        "id": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
289
                        "name": "chainTool",
290
                        "label": "ChainTool",
291
                        "type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain"
292
                    }
293
                ],
294
                "outputs": {},
295
                "selected": false
296
            },
297
            "selected": false,
298
            "positionAbsolute": {
299
                "x": 1284.7746596034926,
300
                "y": 895.1444797047182
301
            },
302
            "dragging": false
303
        },
304
        {
305
            "width": 300,
306
            "height": 459,
307
            "id": "postApiChain_0",
308
            "position": {
309
                "x": 933.3631140153886,
310
                "y": 974.8756002461283
311
            },
312
            "type": "customNode",
313
            "data": {
314
                "id": "postApiChain_0",
315
                "label": "POST API Chain",
316
                "version": 1,
317
                "name": "postApiChain",
318
                "type": "POSTApiChain",
319
                "baseClasses": ["POSTApiChain", "BaseChain", "BaseLangChain"],
320
                "category": "Chains",
321
                "description": "Chain to run queries against POST API",
322
                "inputParams": [
323
                    {
324
                        "label": "API Documentation",
325
                        "name": "apiDocs",
326
                        "type": "string",
327
                        "description": "Description of how API works. Please refer to more <a target=\"_blank\" href=\"https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/chains/api/open_meteo_docs.py\">examples</a>",
328
                        "rows": 4,
329
                        "id": "postApiChain_0-input-apiDocs-string"
330
                    },
331
                    {
332
                        "label": "Headers",
333
                        "name": "headers",
334
                        "type": "json",
335
                        "additionalParams": true,
336
                        "optional": true,
337
                        "id": "postApiChain_0-input-headers-json"
338
                    },
339
                    {
340
                        "label": "URL Prompt",
341
                        "name": "urlPrompt",
342
                        "type": "string",
343
                        "description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}",
344
                        "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:",
345
                        "rows": 4,
346
                        "additionalParams": true,
347
                        "id": "postApiChain_0-input-urlPrompt-string"
348
                    },
349
                    {
350
                        "label": "Answer Prompt",
351
                        "name": "ansPrompt",
352
                        "type": "string",
353
                        "description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}",
354
                        "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:",
355
                        "rows": 4,
356
                        "additionalParams": true,
357
                        "id": "postApiChain_0-input-ansPrompt-string"
358
                    }
359
                ],
360
                "inputAnchors": [
361
                    {
362
                        "label": "Language Model",
363
                        "name": "model",
364
                        "type": "BaseLanguageModel",
365
                        "id": "postApiChain_0-input-model-BaseLanguageModel"
366
                    }
367
                ],
368
                "inputs": {
369
                    "model": "{{chatOpenAI_2.data.instance}}",
370
                    "apiDocs": "API documentation:\nEndpoint: https://eog776prcv6dg0j.m.pipedream.net\n\nThis API is for sending Discord message\n\nQuery body table:\nmessage | string | Message to send | required\n\nResponse schema (string):\nresult | string",
371
                    "headers": "",
372
                    "urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:",
373
                    "ansPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:"
374
                },
375
                "outputAnchors": [
376
                    {
377
                        "id": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain",
378
                        "name": "postApiChain",
379
                        "label": "POSTApiChain",
380
                        "type": "POSTApiChain | BaseChain | BaseLangChain"
381
                    }
382
                ],
383
                "outputs": {},
384
                "selected": false
385
            },
386
            "selected": false,
387
            "positionAbsolute": {
388
                "x": 933.3631140153886,
389
                "y": 974.8756002461283
390
            },
391
            "dragging": false
392
        },
393
        {
394
            "width": 300,
395
            "height": 574,
396
            "id": "chatOpenAI_2",
397
            "position": {
398
                "x": 572.8941615312035,
399
                "y": 937.8425220917356
400
            },
401
            "type": "customNode",
402
            "data": {
403
                "id": "chatOpenAI_2",
404
                "label": "ChatOpenAI",
405
                "version": 6.0,
406
                "name": "chatOpenAI",
407
                "type": "ChatOpenAI",
408
                "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
409
                "category": "Chat Models",
410
                "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
411
                "inputParams": [
412
                    {
413
                        "label": "Connect Credential",
414
                        "name": "credential",
415
                        "type": "credential",
416
                        "credentialNames": ["openAIApi"],
417
                        "id": "chatOpenAI_2-input-credential-credential"
418
                    },
419
                    {
420
                        "label": "Model Name",
421
                        "name": "modelName",
422
                        "type": "asyncOptions",
423
                        "loadMethod": "listModels",
424
                        "default": "gpt-3.5-turbo",
425
                        "id": "chatOpenAI_2-input-modelName-options"
426
                    },
427
                    {
428
                        "label": "Temperature",
429
                        "name": "temperature",
430
                        "type": "number",
431
                        "default": 0.9,
432
                        "optional": true,
433
                        "id": "chatOpenAI_2-input-temperature-number"
434
                    },
435
                    {
436
                        "label": "Max Tokens",
437
                        "name": "maxTokens",
438
                        "type": "number",
439
                        "optional": true,
440
                        "additionalParams": true,
441
                        "id": "chatOpenAI_2-input-maxTokens-number"
442
                    },
443
                    {
444
                        "label": "Top Probability",
445
                        "name": "topP",
446
                        "type": "number",
447
                        "optional": true,
448
                        "additionalParams": true,
449
                        "id": "chatOpenAI_2-input-topP-number"
450
                    },
451
                    {
452
                        "label": "Frequency Penalty",
453
                        "name": "frequencyPenalty",
454
                        "type": "number",
455
                        "optional": true,
456
                        "additionalParams": true,
457
                        "id": "chatOpenAI_2-input-frequencyPenalty-number"
458
                    },
459
                    {
460
                        "label": "Presence Penalty",
461
                        "name": "presencePenalty",
462
                        "type": "number",
463
                        "optional": true,
464
                        "additionalParams": true,
465
                        "id": "chatOpenAI_2-input-presencePenalty-number"
466
                    },
467
                    {
468
                        "label": "Timeout",
469
                        "name": "timeout",
470
                        "type": "number",
471
                        "optional": true,
472
                        "additionalParams": true,
473
                        "id": "chatOpenAI_2-input-timeout-number"
474
                    },
475
                    {
476
                        "label": "BasePath",
477
                        "name": "basepath",
478
                        "type": "string",
479
                        "optional": true,
480
                        "additionalParams": true,
481
                        "id": "chatOpenAI_2-input-basepath-string"
482
                    },
483
                    {
484
                        "label": "BaseOptions",
485
                        "name": "baseOptions",
486
                        "type": "json",
487
                        "optional": true,
488
                        "additionalParams": true,
489
                        "id": "chatOpenAI_2-input-baseOptions-json"
490
                    },
491
                    {
492
                        "label": "Allow Image Uploads",
493
                        "name": "allowImageUploads",
494
                        "type": "boolean",
495
                        "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
496
                        "default": false,
497
                        "optional": true,
498
                        "id": "chatOpenAI_2-input-allowImageUploads-boolean"
499
                    },
500
                    {
501
                        "label": "Image Resolution",
502
                        "description": "This parameter controls the resolution in which the model views the image.",
503
                        "name": "imageResolution",
504
                        "type": "options",
505
                        "options": [
506
                            {
507
                                "label": "Low",
508
                                "name": "low"
509
                            },
510
                            {
511
                                "label": "High",
512
                                "name": "high"
513
                            },
514
                            {
515
                                "label": "Auto",
516
                                "name": "auto"
517
                            }
518
                        ],
519
                        "default": "low",
520
                        "optional": false,
521
                        "additionalParams": true,
522
                        "id": "chatOpenAI_2-input-imageResolution-options"
523
                    }
524
                ],
525
                "inputAnchors": [
526
                    {
527
                        "label": "Cache",
528
                        "name": "cache",
529
                        "type": "BaseCache",
530
                        "optional": true,
531
                        "id": "chatOpenAI_2-input-cache-BaseCache"
532
                    }
533
                ],
534
                "inputs": {
535
                    "modelName": "gpt-3.5-turbo",
536
                    "temperature": 0.9,
537
                    "maxTokens": "",
538
                    "topP": "",
539
                    "frequencyPenalty": "",
540
                    "presencePenalty": "",
541
                    "timeout": "",
542
                    "basepath": "",
543
                    "baseOptions": "",
544
                    "allowImageUploads": true,
545
                    "imageResolution": "low"
546
                },
547
                "outputAnchors": [
548
                    {
549
                        "id": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
550
                        "name": "chatOpenAI",
551
                        "label": "ChatOpenAI",
552
                        "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel"
553
                    }
554
                ],
555
                "outputs": {},
556
                "selected": false
557
            },
558
            "selected": false,
559
            "positionAbsolute": {
560
                "x": 572.8941615312035,
561
                "y": 937.8425220917356
562
            },
563
            "dragging": false
564
        },
565
        {
566
            "width": 300,
567
            "height": 574,
568
            "id": "chatOpenAI_1",
569
            "position": {
570
                "x": 828.7788305309582,
571
                "y": 302.8996144964516
572
            },
573
            "type": "customNode",
574
            "data": {
575
                "id": "chatOpenAI_1",
576
                "label": "ChatOpenAI",
577
                "version": 6.0,
578
                "name": "chatOpenAI",
579
                "type": "ChatOpenAI",
580
                "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
581
                "category": "Chat Models",
582
                "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
583
                "inputParams": [
584
                    {
585
                        "label": "Connect Credential",
586
                        "name": "credential",
587
                        "type": "credential",
588
                        "credentialNames": ["openAIApi"],
589
                        "id": "chatOpenAI_1-input-credential-credential"
590
                    },
591
                    {
592
                        "label": "Model Name",
593
                        "name": "modelName",
594
                        "type": "asyncOptions",
595
                        "loadMethod": "listModels",
596
                        "default": "gpt-3.5-turbo",
597
                        "id": "chatOpenAI_1-input-modelName-options"
598
                    },
599
                    {
600
                        "label": "Temperature",
601
                        "name": "temperature",
602
                        "type": "number",
603
                        "default": 0.9,
604
                        "optional": true,
605
                        "id": "chatOpenAI_1-input-temperature-number"
606
                    },
607
                    {
608
                        "label": "Max Tokens",
609
                        "name": "maxTokens",
610
                        "type": "number",
611
                        "optional": true,
612
                        "additionalParams": true,
613
                        "id": "chatOpenAI_1-input-maxTokens-number"
614
                    },
615
                    {
616
                        "label": "Top Probability",
617
                        "name": "topP",
618
                        "type": "number",
619
                        "optional": true,
620
                        "additionalParams": true,
621
                        "id": "chatOpenAI_1-input-topP-number"
622
                    },
623
                    {
624
                        "label": "Frequency Penalty",
625
                        "name": "frequencyPenalty",
626
                        "type": "number",
627
                        "optional": true,
628
                        "additionalParams": true,
629
                        "id": "chatOpenAI_1-input-frequencyPenalty-number"
630
                    },
631
                    {
632
                        "label": "Presence Penalty",
633
                        "name": "presencePenalty",
634
                        "type": "number",
635
                        "optional": true,
636
                        "additionalParams": true,
637
                        "id": "chatOpenAI_1-input-presencePenalty-number"
638
                    },
639
                    {
640
                        "label": "Timeout",
641
                        "name": "timeout",
642
                        "type": "number",
643
                        "optional": true,
644
                        "additionalParams": true,
645
                        "id": "chatOpenAI_1-input-timeout-number"
646
                    },
647
                    {
648
                        "label": "BasePath",
649
                        "name": "basepath",
650
                        "type": "string",
651
                        "optional": true,
652
                        "additionalParams": true,
653
                        "id": "chatOpenAI_1-input-basepath-string"
654
                    },
655
                    {
656
                        "label": "BaseOptions",
657
                        "name": "baseOptions",
658
                        "type": "json",
659
                        "optional": true,
660
                        "additionalParams": true,
661
                        "id": "chatOpenAI_1-input-baseOptions-json"
662
                    },
663
                    {
664
                        "label": "Allow Image Uploads",
665
                        "name": "allowImageUploads",
666
                        "type": "boolean",
667
                        "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
668
                        "default": false,
669
                        "optional": true,
670
                        "id": "chatOpenAI_1-input-allowImageUploads-boolean"
671
                    },
672
                    {
673
                        "label": "Image Resolution",
674
                        "description": "This parameter controls the resolution in which the model views the image.",
675
                        "name": "imageResolution",
676
                        "type": "options",
677
                        "options": [
678
                            {
679
                                "label": "Low",
680
                                "name": "low"
681
                            },
682
                            {
683
                                "label": "High",
684
                                "name": "high"
685
                            },
686
                            {
687
                                "label": "Auto",
688
                                "name": "auto"
689
                            }
690
                        ],
691
                        "default": "low",
692
                        "optional": false,
693
                        "additionalParams": true,
694
                        "id": "chatOpenAI_1-input-imageResolution-options"
695
                    }
696
                ],
697
                "inputAnchors": [
698
                    {
699
                        "label": "Cache",
700
                        "name": "cache",
701
                        "type": "BaseCache",
702
                        "optional": true,
703
                        "id": "chatOpenAI_1-input-cache-BaseCache"
704
                    }
705
                ],
706
                "inputs": {
707
                    "modelName": "gpt-3.5-turbo",
708
                    "temperature": 0.9,
709
                    "maxTokens": "",
710
                    "topP": "",
711
                    "frequencyPenalty": "",
712
                    "presencePenalty": "",
713
                    "timeout": "",
714
                    "basepath": "",
715
                    "baseOptions": "",
716
                    "allowImageUploads": true,
717
                    "imageResolution": "low"
718
                },
719
                "outputAnchors": [
720
                    {
721
                        "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
722
                        "name": "chatOpenAI",
723
                        "label": "ChatOpenAI",
724
                        "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel"
725
                    }
726
                ],
727
                "outputs": {},
728
                "selected": false
729
            },
730
            "selected": false,
731
            "positionAbsolute": {
732
                "x": 828.7788305309582,
733
                "y": 302.8996144964516
734
            },
735
            "dragging": false
736
        },
737
        {
738
            "width": 300,
739
            "height": 574,
740
            "id": "chatOpenAI_3",
741
            "position": {
742
                "x": 1148.338912314111,
743
                "y": 1561.0888070167944
744
            },
745
            "type": "customNode",
746
            "data": {
747
                "id": "chatOpenAI_3",
748
                "label": "ChatOpenAI",
749
                "version": 6.0,
750
                "name": "chatOpenAI",
751
                "type": "ChatOpenAI",
752
                "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
753
                "category": "Chat Models",
754
                "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
755
                "inputParams": [
756
                    {
757
                        "label": "Connect Credential",
758
                        "name": "credential",
759
                        "type": "credential",
760
                        "credentialNames": ["openAIApi"],
761
                        "id": "chatOpenAI_3-input-credential-credential"
762
                    },
763
                    {
764
                        "label": "Model Name",
765
                        "name": "modelName",
766
                        "type": "asyncOptions",
767
                        "loadMethod": "listModels",
768
                        "default": "gpt-3.5-turbo",
769
                        "id": "chatOpenAI_3-input-modelName-options"
770
                    },
771
                    {
772
                        "label": "Temperature",
773
                        "name": "temperature",
774
                        "type": "number",
775
                        "default": 0.9,
776
                        "optional": true,
777
                        "id": "chatOpenAI_3-input-temperature-number"
778
                    },
779
                    {
780
                        "label": "Max Tokens",
781
                        "name": "maxTokens",
782
                        "type": "number",
783
                        "optional": true,
784
                        "additionalParams": true,
785
                        "id": "chatOpenAI_3-input-maxTokens-number"
786
                    },
787
                    {
788
                        "label": "Top Probability",
789
                        "name": "topP",
790
                        "type": "number",
791
                        "optional": true,
792
                        "additionalParams": true,
793
                        "id": "chatOpenAI_3-input-topP-number"
794
                    },
795
                    {
796
                        "label": "Frequency Penalty",
797
                        "name": "frequencyPenalty",
798
                        "type": "number",
799
                        "optional": true,
800
                        "additionalParams": true,
801
                        "id": "chatOpenAI_3-input-frequencyPenalty-number"
802
                    },
803
                    {
804
                        "label": "Presence Penalty",
805
                        "name": "presencePenalty",
806
                        "type": "number",
807
                        "optional": true,
808
                        "additionalParams": true,
809
                        "id": "chatOpenAI_3-input-presencePenalty-number"
810
                    },
811
                    {
812
                        "label": "Timeout",
813
                        "name": "timeout",
814
                        "type": "number",
815
                        "optional": true,
816
                        "additionalParams": true,
817
                        "id": "chatOpenAI_3-input-timeout-number"
818
                    },
819
                    {
820
                        "label": "BasePath",
821
                        "name": "basepath",
822
                        "type": "string",
823
                        "optional": true,
824
                        "additionalParams": true,
825
                        "id": "chatOpenAI_3-input-basepath-string"
826
                    },
827
                    {
828
                        "label": "BaseOptions",
829
                        "name": "baseOptions",
830
                        "type": "json",
831
                        "optional": true,
832
                        "additionalParams": true,
833
                        "id": "chatOpenAI_3-input-baseOptions-json"
834
                    },
835
                    {
836
                        "label": "Allow Image Uploads",
837
                        "name": "allowImageUploads",
838
                        "type": "boolean",
839
                        "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
840
                        "default": false,
841
                        "optional": true,
842
                        "id": "chatOpenAI_3-input-allowImageUploads-boolean"
843
                    },
844
                    {
845
                        "label": "Image Resolution",
846
                        "description": "This parameter controls the resolution in which the model views the image.",
847
                        "name": "imageResolution",
848
                        "type": "options",
849
                        "options": [
850
                            {
851
                                "label": "Low",
852
                                "name": "low"
853
                            },
854
                            {
855
                                "label": "High",
856
                                "name": "high"
857
                            },
858
                            {
859
                                "label": "Auto",
860
                                "name": "auto"
861
                            }
862
                        ],
863
                        "default": "low",
864
                        "optional": false,
865
                        "additionalParams": true,
866
                        "id": "chatOpenAI_3-input-imageResolution-options"
867
                    }
868
                ],
869
                "inputAnchors": [
870
                    {
871
                        "label": "Cache",
872
                        "name": "cache",
873
                        "type": "BaseCache",
874
                        "optional": true,
875
                        "id": "chatOpenAI_3-input-cache-BaseCache"
876
                    }
877
                ],
878
                "inputs": {
879
                    "modelName": "gpt-3.5-turbo-16k",
880
                    "temperature": 0.9,
881
                    "maxTokens": "",
882
                    "topP": "",
883
                    "frequencyPenalty": "",
884
                    "presencePenalty": "",
885
                    "timeout": "",
886
                    "basepath": "",
887
                    "baseOptions": "",
888
                    "allowImageUploads": true,
889
                    "imageResolution": "low"
890
                },
891
                "outputAnchors": [
892
                    {
893
                        "id": "chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
894
                        "name": "chatOpenAI",
895
                        "label": "ChatOpenAI",
896
                        "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel"
897
                    }
898
                ],
899
                "outputs": {},
900
                "selected": false
901
            },
902
            "selected": false,
903
            "positionAbsolute": {
904
                "x": 1148.338912314111,
905
                "y": 1561.0888070167944
906
            },
907
            "dragging": false
908
        },
909
        {
910
            "width": 300,
911
            "height": 383,
912
            "id": "conversationalAgent_0",
913
            "position": {
914
                "x": 2090.570467632979,
915
                "y": 969.5131357270544
916
            },
917
            "type": "customNode",
918
            "data": {
919
                "id": "conversationalAgent_0",
920
                "label": "Conversational Agent",
921
                "version": 3,
922
                "name": "conversationalAgent",
923
                "type": "AgentExecutor",
924
                "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
925
                "category": "Agents",
926
                "description": "Conversational agent for a chat model. It will utilize chat specific prompts",
927
                "inputParams": [
928
                    {
929
                        "label": "System Message",
930
                        "name": "systemMessage",
931
                        "type": "string",
932
                        "rows": 4,
933
                        "default": "Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.",
934
                        "optional": true,
935
                        "additionalParams": true,
936
                        "id": "conversationalAgent_0-input-systemMessage-string"
937
                    },
938
                    {
939
                        "label": "Max Iterations",
940
                        "name": "maxIterations",
941
                        "type": "number",
942
                        "optional": true,
943
                        "additionalParams": true,
944
                        "id": "conversationalAgent_0-input-maxIterations-number"
945
                    }
946
                ],
947
                "inputAnchors": [
948
                    {
949
                        "label": "Allowed Tools",
950
                        "name": "tools",
951
                        "type": "Tool",
952
                        "list": true,
953
                        "id": "conversationalAgent_0-input-tools-Tool"
954
                    },
955
                    {
956
                        "label": "Chat Model",
957
                        "name": "model",
958
                        "type": "BaseChatModel",
959
                        "id": "conversationalAgent_0-input-model-BaseChatModel"
960
                    },
961
                    {
962
                        "label": "Memory",
963
                        "name": "memory",
964
                        "type": "BaseChatMemory",
965
                        "id": "conversationalAgent_0-input-memory-BaseChatMemory"
966
                    },
967
                    {
968
                        "label": "Input Moderation",
969
                        "description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
970
                        "name": "inputModeration",
971
                        "type": "Moderation",
972
                        "optional": true,
973
                        "list": true,
974
                        "id": "conversationalAgent_0-input-inputModeration-Moderation"
975
                    }
976
                ],
977
                "inputs": {
978
                    "inputModeration": "",
979
                    "tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"],
980
                    "model": "{{chatOpenAI_3.data.instance}}",
981
                    "memory": "{{bufferMemory_0.data.instance}}",
982
                    "systemMessage": "Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist."
983
                },
984
                "outputAnchors": [
985
                    {
986
                        "id": "conversationalAgent_0-output-conversationalAgent-AgentExecutor|BaseChain|Runnable",
987
                        "name": "conversationalAgent",
988
                        "label": "AgentExecutor",
989
                        "type": "AgentExecutor | BaseChain | Runnable"
990
                    }
991
                ],
992
                "outputs": {},
993
                "selected": false
994
            },
995
            "selected": false,
996
            "positionAbsolute": {
997
                "x": 2090.570467632979,
998
                "y": 969.5131357270544
999
            },
1000
            "dragging": false
1001
        }
1002
    ],
1003
    "edges": [
1004
        {
1005
            "source": "getApiChain_0",
1006
            "sourceHandle": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain",
1007
            "target": "chainTool_0",
1008
            "targetHandle": "chainTool_0-input-baseChain-BaseChain",
1009
            "type": "buttonedge",
1010
            "id": "getApiChain_0-getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain-chainTool_0-chainTool_0-input-baseChain-BaseChain",
1011
            "data": {
1012
                "label": ""
1013
            }
1014
        },
1015
        {
1016
            "source": "postApiChain_0",
1017
            "sourceHandle": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain",
1018
            "target": "chainTool_1",
1019
            "targetHandle": "chainTool_1-input-baseChain-BaseChain",
1020
            "type": "buttonedge",
1021
            "id": "postApiChain_0-postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain-chainTool_1-chainTool_1-input-baseChain-BaseChain",
1022
            "data": {
1023
                "label": ""
1024
            }
1025
        },
1026
        {
1027
            "source": "chatOpenAI_2",
1028
            "sourceHandle": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
1029
            "target": "postApiChain_0",
1030
            "targetHandle": "postApiChain_0-input-model-BaseLanguageModel",
1031
            "type": "buttonedge",
1032
            "id": "chatOpenAI_2-chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-postApiChain_0-postApiChain_0-input-model-BaseLanguageModel",
1033
            "data": {
1034
                "label": ""
1035
            }
1036
        },
1037
        {
1038
            "source": "chatOpenAI_1",
1039
            "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
1040
            "target": "getApiChain_0",
1041
            "targetHandle": "getApiChain_0-input-model-BaseLanguageModel",
1042
            "type": "buttonedge",
1043
            "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-getApiChain_0-getApiChain_0-input-model-BaseLanguageModel",
1044
            "data": {
1045
                "label": ""
1046
            }
1047
        },
1048
        {
1049
            "source": "chainTool_0",
1050
            "sourceHandle": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
1051
            "target": "conversationalAgent_0",
1052
            "targetHandle": "conversationalAgent_0-input-tools-Tool",
1053
            "type": "buttonedge",
1054
            "id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool",
1055
            "data": {
1056
                "label": ""
1057
            }
1058
        },
1059
        {
1060
            "source": "chainTool_1",
1061
            "sourceHandle": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
1062
            "target": "conversationalAgent_0",
1063
            "targetHandle": "conversationalAgent_0-input-tools-Tool",
1064
            "type": "buttonedge",
1065
            "id": "chainTool_1-chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool",
1066
            "data": {
1067
                "label": ""
1068
            }
1069
        },
1070
        {
1071
            "source": "chatOpenAI_3",
1072
            "sourceHandle": "chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
1073
            "target": "conversationalAgent_0",
1074
            "targetHandle": "conversationalAgent_0-input-model-BaseChatModel",
1075
            "type": "buttonedge",
1076
            "id": "chatOpenAI_3-chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalAgent_0-conversationalAgent_0-input-model-BaseChatModel",
1077
            "data": {
1078
                "label": ""
1079
            }
1080
        },
1081
        {
1082
            "source": "bufferMemory_0",
1083
            "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
1084
            "target": "conversationalAgent_0",
1085
            "targetHandle": "conversationalAgent_0-input-memory-BaseChatMemory",
1086
            "type": "buttonedge",
1087
            "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationalAgent_0-conversationalAgent_0-input-memory-BaseChatMemory",
1088
            "data": {
1089
                "label": ""
1090
            }
1091
        }
1092
    ]
1093
}
1094

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.