Flowise

Форк
0
/
Simple Conversation Chain.json 
354 строки · 15.5 Кб
1
{
2
    "description": "Basic example of Conversation Chain with built-in memory - works exactly like ChatGPT",
3
    "categories": "Buffer Memory,ChatOpenAI,Conversation Chain,Langchain",
4
    "framework": "Langchain",
5
    "badge": "POPULAR",
6
    "nodes": [
7
        {
8
            "width": 300,
9
            "height": 574,
10
            "id": "chatOpenAI_0",
11
            "position": {
12
                "x": 579.0877964395976,
13
                "y": -138.68792413227874
14
            },
15
            "type": "customNode",
16
            "data": {
17
                "id": "chatOpenAI_0",
18
                "label": "ChatOpenAI",
19
                "version": 6.0,
20
                "name": "chatOpenAI",
21
                "type": "ChatOpenAI",
22
                "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
23
                "category": "Chat Models",
24
                "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
25
                "inputParams": [
26
                    {
27
                        "label": "Connect Credential",
28
                        "name": "credential",
29
                        "type": "credential",
30
                        "credentialNames": ["openAIApi"],
31
                        "id": "chatOpenAI_0-input-credential-credential"
32
                    },
33
                    {
34
                        "label": "Model Name",
35
                        "name": "modelName",
36
                        "type": "asyncOptions",
37
                        "loadMethod": "listModels",
38
                        "default": "gpt-3.5-turbo",
39
                        "id": "chatOpenAI_0-input-modelName-options"
40
                    },
41
                    {
42
                        "label": "Temperature",
43
                        "name": "temperature",
44
                        "type": "number",
45
                        "step": 0.1,
46
                        "default": 0.9,
47
                        "optional": true,
48
                        "id": "chatOpenAI_0-input-temperature-number"
49
                    },
50
                    {
51
                        "label": "Max Tokens",
52
                        "name": "maxTokens",
53
                        "type": "number",
54
                        "step": 1,
55
                        "optional": true,
56
                        "additionalParams": true,
57
                        "id": "chatOpenAI_0-input-maxTokens-number"
58
                    },
59
                    {
60
                        "label": "Top Probability",
61
                        "name": "topP",
62
                        "type": "number",
63
                        "step": 0.1,
64
                        "optional": true,
65
                        "additionalParams": true,
66
                        "id": "chatOpenAI_0-input-topP-number"
67
                    },
68
                    {
69
                        "label": "Frequency Penalty",
70
                        "name": "frequencyPenalty",
71
                        "type": "number",
72
                        "step": 0.1,
73
                        "optional": true,
74
                        "additionalParams": true,
75
                        "id": "chatOpenAI_0-input-frequencyPenalty-number"
76
                    },
77
                    {
78
                        "label": "Presence Penalty",
79
                        "name": "presencePenalty",
80
                        "type": "number",
81
                        "step": 0.1,
82
                        "optional": true,
83
                        "additionalParams": true,
84
                        "id": "chatOpenAI_0-input-presencePenalty-number"
85
                    },
86
                    {
87
                        "label": "Timeout",
88
                        "name": "timeout",
89
                        "type": "number",
90
                        "step": 1,
91
                        "optional": true,
92
                        "additionalParams": true,
93
                        "id": "chatOpenAI_0-input-timeout-number"
94
                    },
95
                    {
96
                        "label": "BasePath",
97
                        "name": "basepath",
98
                        "type": "string",
99
                        "optional": true,
100
                        "additionalParams": true,
101
                        "id": "chatOpenAI_0-input-basepath-string"
102
                    },
103
                    {
104
                        "label": "BaseOptions",
105
                        "name": "baseOptions",
106
                        "type": "json",
107
                        "optional": true,
108
                        "additionalParams": true,
109
                        "id": "chatOpenAI_0-input-baseOptions-json"
110
                    },
111
                    {
112
                        "label": "Allow Image Uploads",
113
                        "name": "allowImageUploads",
114
                        "type": "boolean",
115
                        "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
116
                        "default": false,
117
                        "optional": true,
118
                        "id": "chatOpenAI_0-input-allowImageUploads-boolean"
119
                    },
120
                    {
121
                        "label": "Image Resolution",
122
                        "description": "This parameter controls the resolution in which the model views the image.",
123
                        "name": "imageResolution",
124
                        "type": "options",
125
                        "options": [
126
                            {
127
                                "label": "Low",
128
                                "name": "low"
129
                            },
130
                            {
131
                                "label": "High",
132
                                "name": "high"
133
                            },
134
                            {
135
                                "label": "Auto",
136
                                "name": "auto"
137
                            }
138
                        ],
139
                        "default": "low",
140
                        "optional": false,
141
                        "additionalParams": true,
142
                        "id": "chatOpenAI_0-input-imageResolution-options"
143
                    }
144
                ],
145
                "inputAnchors": [
146
                    {
147
                        "label": "Cache",
148
                        "name": "cache",
149
                        "type": "BaseCache",
150
                        "optional": true,
151
                        "id": "chatOpenAI_0-input-cache-BaseCache"
152
                    }
153
                ],
154
                "inputs": {
155
                    "cache": "",
156
                    "modelName": "gpt-3.5-turbo-16k",
157
                    "temperature": 0.9,
158
                    "maxTokens": "",
159
                    "topP": "",
160
                    "frequencyPenalty": "",
161
                    "presencePenalty": "",
162
                    "timeout": "",
163
                    "basepath": "",
164
                    "baseOptions": "",
165
                    "allowImageUploads": true,
166
                    "imageResolution": "low"
167
                },
168
                "outputAnchors": [
169
                    {
170
                        "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
171
                        "name": "chatOpenAI",
172
                        "label": "ChatOpenAI",
173
                        "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
174
                    }
175
                ],
176
                "outputs": {},
177
                "selected": false
178
            },
179
            "selected": false,
180
            "positionAbsolute": {
181
                "x": 579.0877964395976,
182
                "y": -138.68792413227874
183
            },
184
            "dragging": false
185
        },
186
        {
187
            "width": 300,
188
            "height": 376,
189
            "id": "bufferMemory_0",
190
            "position": {
191
                "x": 220.30240896145915,
192
                "y": 351.61324070296877
193
            },
194
            "type": "customNode",
195
            "data": {
196
                "id": "bufferMemory_0",
197
                "label": "Buffer Memory",
198
                "version": 2,
199
                "name": "bufferMemory",
200
                "type": "BufferMemory",
201
                "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"],
202
                "category": "Memory",
203
                "description": "Retrieve chat messages stored in database",
204
                "inputParams": [
205
                    {
206
                        "label": "Session Id",
207
                        "name": "sessionId",
208
                        "type": "string",
209
                        "description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory#ui-and-embedded-chat\">more</a>",
210
                        "default": "",
211
                        "additionalParams": true,
212
                        "optional": true,
213
                        "id": "bufferMemory_0-input-sessionId-string"
214
                    },
215
                    {
216
                        "label": "Memory Key",
217
                        "name": "memoryKey",
218
                        "type": "string",
219
                        "default": "chat_history",
220
                        "additionalParams": true,
221
                        "id": "bufferMemory_0-input-memoryKey-string"
222
                    }
223
                ],
224
                "inputAnchors": [],
225
                "inputs": {
226
                    "sessionId": "",
227
                    "memoryKey": "chat_history"
228
                },
229
                "outputAnchors": [
230
                    {
231
                        "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
232
                        "name": "bufferMemory",
233
                        "label": "BufferMemory",
234
                        "type": "BufferMemory | BaseChatMemory | BaseMemory"
235
                    }
236
                ],
237
                "outputs": {},
238
                "selected": false
239
            },
240
            "selected": false,
241
            "positionAbsolute": {
242
                "x": 220.30240896145915,
243
                "y": 351.61324070296877
244
            },
245
            "dragging": false
246
        },
247
        {
248
            "width": 300,
249
            "height": 383,
250
            "id": "conversationChain_0",
251
            "position": {
252
                "x": 958.9887390513221,
253
                "y": 318.8734467468765
254
            },
255
            "type": "customNode",
256
            "data": {
257
                "id": "conversationChain_0",
258
                "label": "Conversation Chain",
259
                "version": 3,
260
                "name": "conversationChain",
261
                "type": "ConversationChain",
262
                "baseClasses": ["ConversationChain", "LLMChain", "BaseChain", "Runnable"],
263
                "category": "Chains",
264
                "description": "Chat models specific conversational chain with memory",
265
                "inputParams": [
266
                    {
267
                        "label": "System Message",
268
                        "name": "systemMessagePrompt",
269
                        "type": "string",
270
                        "rows": 4,
271
                        "description": "If Chat Prompt Template is provided, this will be ignored",
272
                        "additionalParams": true,
273
                        "optional": true,
274
                        "default": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
275
                        "placeholder": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
276
                        "id": "conversationChain_0-input-systemMessagePrompt-string"
277
                    }
278
                ],
279
                "inputAnchors": [
280
                    {
281
                        "label": "Chat Model",
282
                        "name": "model",
283
                        "type": "BaseChatModel",
284
                        "id": "conversationChain_0-input-model-BaseChatModel"
285
                    },
286
                    {
287
                        "label": "Memory",
288
                        "name": "memory",
289
                        "type": "BaseMemory",
290
                        "id": "conversationChain_0-input-memory-BaseMemory"
291
                    },
292
                    {
293
                        "label": "Chat Prompt Template",
294
                        "name": "chatPromptTemplate",
295
                        "type": "ChatPromptTemplate",
296
                        "description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable",
297
                        "optional": true,
298
                        "id": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate"
299
                    },
300
                    {
301
                        "label": "Input Moderation",
302
                        "description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
303
                        "name": "inputModeration",
304
                        "type": "Moderation",
305
                        "optional": true,
306
                        "list": true,
307
                        "id": "conversationChain_0-input-inputModeration-Moderation"
308
                    }
309
                ],
310
                "inputs": {
311
                    "inputModeration": "",
312
                    "model": "{{chatOpenAI_0.data.instance}}",
313
                    "memory": "{{bufferMemory_0.data.instance}}",
314
                    "chatPromptTemplate": "",
315
                    "systemMessagePrompt": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."
316
                },
317
                "outputAnchors": [
318
                    {
319
                        "id": "conversationChain_0-output-conversationChain-ConversationChain|LLMChain|BaseChain|Runnable",
320
                        "name": "conversationChain",
321
                        "label": "ConversationChain",
322
                        "type": "ConversationChain | LLMChain | BaseChain | Runnable"
323
                    }
324
                ],
325
                "outputs": {},
326
                "selected": false
327
            },
328
            "selected": false,
329
            "positionAbsolute": {
330
                "x": 958.9887390513221,
331
                "y": 318.8734467468765
332
            },
333
            "dragging": false
334
        }
335
    ],
336
    "edges": [
337
        {
338
            "source": "chatOpenAI_0",
339
            "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
340
            "target": "conversationChain_0",
341
            "targetHandle": "conversationChain_0-input-model-BaseChatModel",
342
            "type": "buttonedge",
343
            "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationChain_0-conversationChain_0-input-model-BaseChatModel"
344
        },
345
        {
346
            "source": "bufferMemory_0",
347
            "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
348
            "target": "conversationChain_0",
349
            "targetHandle": "conversationChain_0-input-memory-BaseMemory",
350
            "type": "buttonedge",
351
            "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationChain_0-conversationChain_0-input-memory-BaseMemory"
352
        }
353
    ]
354
}
355

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.